body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
7110fdb8b7486ef7c847a0ccd5ae10e60cc163f1513fcc49426c33396dc2deea | def setStartTime(self, startTime):
'\n :param startTime: (Optional) \n '
self.startTime = startTime | :param startTime: (Optional) | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/detection/apis/GetSiteMonitorDataPointsRequest.py | setStartTime | Ureimu/weather-robot | 14 | python | def setStartTime(self, startTime):
'\n \n '
self.startTime = startTime | def setStartTime(self, startTime):
'\n \n '
self.startTime = startTime<|docstring|>:param startTime: (Optional)<|endoftext|> |
379d4d967fdfdc8476b9ab3e9ec0de20338de71dafd0daebd7d4c44d8c440994 | def setEndTime(self, endTime):
'\n :param endTime: (Optional) \n '
self.endTime = endTime | :param endTime: (Optional) | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/detection/apis/GetSiteMonitorDataPointsRequest.py | setEndTime | Ureimu/weather-robot | 14 | python | def setEndTime(self, endTime):
'\n \n '
self.endTime = endTime | def setEndTime(self, endTime):
'\n \n '
self.endTime = endTime<|docstring|>:param endTime: (Optional)<|endoftext|> |
842b6c9967f75e66d44f66836ee96de2702d275ab03efd81c29f1b5b1549d67d | def __init__(self, input_size, bert_input_size, inference_type='zeroshot', num_topics=10, model_type='prodLDA', hidden_sizes=(100, 100), activation='softplus', dropout=0.2, learn_priors=True, batch_size=64, lr=0.002, momentum=0.99, solver='adam', num_epochs=100, num_samples=10, reduce_on_plateau=False, topic_prior_mean=0.0, topic_prior_variance=None, num_data_loader_workers=0):
"\n :param input_size: int, dimension of input\n :param bert_input_size: int, dimension of input that comes from BERT embeddings\n :param inference_type: string, you can choose between the contextual model and the combined model\n :param num_topics: int, number of topic components, (default 10)\n :param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')\n :param hidden_sizes: tuple, length = n_layers, (default (100, 100))\n :param activation: string, 'softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu',\n 'selu' (default 'softplus')\n :param dropout: float, dropout to use (default 0.2)\n :param learn_priors: bool, make priors a learnable parameter (default True)\n :param batch_size: int, size of batch to use for training (default 64)\n :param lr: float, learning rate to use for training (default 2e-3)\n :param momentum: float, momentum to use for training (default 0.99)\n :param solver: string, optimizer 'adam' or 'sgd' (default 'adam')\n :param num_samples: int, number of times theta needs to be sampled\n :param num_epochs: int, number of epochs to train for, (default 100)\n :param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)\n :param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows\n "
assert (isinstance(input_size, int) and (input_size > 0)), 'input_size must by type int > 0.'
assert ((isinstance(num_topics, int) or isinstance(num_topics, np.int64)) and (num_topics > 0)), 'num_topics must by type int > 0.'
assert (model_type in ['LDA', 'prodLDA']), "model must be 'LDA' or 'prodLDA'."
assert isinstance(hidden_sizes, tuple), 'hidden_sizes must be type tuple.'
assert (activation in ['softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu', 'selu']), "activation must be 'softplus', 'relu', 'sigmoid', 'swish', 'leakyrelu', 'rrelu', 'elu', 'selu' or 'tanh'."
assert (dropout >= 0), 'dropout must be >= 0.'
assert (isinstance(batch_size, int) and (batch_size > 0)), 'batch_size must be int > 0.'
assert (lr > 0), 'lr must be > 0.'
assert (isinstance(momentum, float) and (momentum > 0) and (momentum <= 1)), 'momentum must be 0 < float <= 1.'
assert (solver in ['adagrad', 'adam', 'sgd', 'adadelta', 'rmsprop']), "solver must be 'adam', 'adadelta', 'sgd', 'rmsprop' or 'adagrad'"
assert isinstance(reduce_on_plateau, bool), 'reduce_on_plateau must be type bool.'
assert isinstance(topic_prior_mean, float), 'topic_prior_mean must be type float'
self.input_size = input_size
self.num_topics = num_topics
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.batch_size = batch_size
self.lr = lr
self.num_samples = num_samples
self.bert_size = bert_input_size
self.momentum = momentum
self.solver = solver
self.num_epochs = num_epochs
self.reduce_on_plateau = reduce_on_plateau
self.num_data_loader_workers = num_data_loader_workers
self.topic_prior_mean = topic_prior_mean
self.topic_prior_variance = topic_prior_variance
self.model = DecoderNetwork(input_size, self.bert_size, inference_type, num_topics, model_type, hidden_sizes, activation, dropout, self.learn_priors, self.topic_prior_mean, self.topic_prior_variance)
self.early_stopping = EarlyStopping(patience=5, verbose=False)
if (self.solver == 'adam'):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(self.momentum, 0.99))
elif (self.solver == 'sgd'):
self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=self.momentum)
elif (self.solver == 'adagrad'):
self.optimizer = optim.Adagrad(self.model.parameters(), lr=lr)
elif (self.solver == 'adadelta'):
self.optimizer = optim.Adadelta(self.model.parameters(), lr=lr)
elif (self.solver == 'rmsprop'):
self.optimizer = optim.RMSprop(self.model.parameters(), lr=lr, momentum=self.momentum)
if self.reduce_on_plateau:
self.scheduler = ReduceLROnPlateau(self.optimizer, patience=10)
self.best_loss_train = float('inf')
self.model_dir = None
self.train_data = None
self.nn_epoch = None
self.best_components = None
if torch.cuda.is_available():
self.USE_CUDA = True
else:
self.USE_CUDA = False
if self.USE_CUDA:
self.model = self.model.cuda() | :param input_size: int, dimension of input
:param bert_input_size: int, dimension of input that comes from BERT embeddings
:param inference_type: string, you can choose between the contextual model and the combined model
:param num_topics: int, number of topic components, (default 10)
:param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')
:param hidden_sizes: tuple, length = n_layers, (default (100, 100))
:param activation: string, 'softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu',
'selu' (default 'softplus')
:param dropout: float, dropout to use (default 0.2)
:param learn_priors: bool, make priors a learnable parameter (default True)
:param batch_size: int, size of batch to use for training (default 64)
:param lr: float, learning rate to use for training (default 2e-3)
:param momentum: float, momentum to use for training (default 0.99)
:param solver: string, optimizer 'adam' or 'sgd' (default 'adam')
:param num_samples: int, number of times theta needs to be sampled
:param num_epochs: int, number of epochs to train for, (default 100)
:param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)
:param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows | octis/models/contextualized_topic_models/models/ctm.py | __init__ | lei-liu1/OCTIS | 340 | python | def __init__(self, input_size, bert_input_size, inference_type='zeroshot', num_topics=10, model_type='prodLDA', hidden_sizes=(100, 100), activation='softplus', dropout=0.2, learn_priors=True, batch_size=64, lr=0.002, momentum=0.99, solver='adam', num_epochs=100, num_samples=10, reduce_on_plateau=False, topic_prior_mean=0.0, topic_prior_variance=None, num_data_loader_workers=0):
"\n :param input_size: int, dimension of input\n :param bert_input_size: int, dimension of input that comes from BERT embeddings\n :param inference_type: string, you can choose between the contextual model and the combined model\n :param num_topics: int, number of topic components, (default 10)\n :param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')\n :param hidden_sizes: tuple, length = n_layers, (default (100, 100))\n :param activation: string, 'softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu',\n 'selu' (default 'softplus')\n :param dropout: float, dropout to use (default 0.2)\n :param learn_priors: bool, make priors a learnable parameter (default True)\n :param batch_size: int, size of batch to use for training (default 64)\n :param lr: float, learning rate to use for training (default 2e-3)\n :param momentum: float, momentum to use for training (default 0.99)\n :param solver: string, optimizer 'adam' or 'sgd' (default 'adam')\n :param num_samples: int, number of times theta needs to be sampled\n :param num_epochs: int, number of epochs to train for, (default 100)\n :param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)\n :param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows\n "
assert (isinstance(input_size, int) and (input_size > 0)), 'input_size must by type int > 0.'
assert ((isinstance(num_topics, int) or isinstance(num_topics, np.int64)) and (num_topics > 0)), 'num_topics must by type int > 0.'
assert (model_type in ['LDA', 'prodLDA']), "model must be 'LDA' or 'prodLDA'."
assert isinstance(hidden_sizes, tuple), 'hidden_sizes must be type tuple.'
assert (activation in ['softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu', 'selu']), "activation must be 'softplus', 'relu', 'sigmoid', 'swish', 'leakyrelu', 'rrelu', 'elu', 'selu' or 'tanh'."
assert (dropout >= 0), 'dropout must be >= 0.'
assert (isinstance(batch_size, int) and (batch_size > 0)), 'batch_size must be int > 0.'
assert (lr > 0), 'lr must be > 0.'
assert (isinstance(momentum, float) and (momentum > 0) and (momentum <= 1)), 'momentum must be 0 < float <= 1.'
assert (solver in ['adagrad', 'adam', 'sgd', 'adadelta', 'rmsprop']), "solver must be 'adam', 'adadelta', 'sgd', 'rmsprop' or 'adagrad'"
assert isinstance(reduce_on_plateau, bool), 'reduce_on_plateau must be type bool.'
assert isinstance(topic_prior_mean, float), 'topic_prior_mean must be type float'
self.input_size = input_size
self.num_topics = num_topics
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.batch_size = batch_size
self.lr = lr
self.num_samples = num_samples
self.bert_size = bert_input_size
self.momentum = momentum
self.solver = solver
self.num_epochs = num_epochs
self.reduce_on_plateau = reduce_on_plateau
self.num_data_loader_workers = num_data_loader_workers
self.topic_prior_mean = topic_prior_mean
self.topic_prior_variance = topic_prior_variance
self.model = DecoderNetwork(input_size, self.bert_size, inference_type, num_topics, model_type, hidden_sizes, activation, dropout, self.learn_priors, self.topic_prior_mean, self.topic_prior_variance)
self.early_stopping = EarlyStopping(patience=5, verbose=False)
if (self.solver == 'adam'):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(self.momentum, 0.99))
elif (self.solver == 'sgd'):
self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=self.momentum)
elif (self.solver == 'adagrad'):
self.optimizer = optim.Adagrad(self.model.parameters(), lr=lr)
elif (self.solver == 'adadelta'):
self.optimizer = optim.Adadelta(self.model.parameters(), lr=lr)
elif (self.solver == 'rmsprop'):
self.optimizer = optim.RMSprop(self.model.parameters(), lr=lr, momentum=self.momentum)
if self.reduce_on_plateau:
self.scheduler = ReduceLROnPlateau(self.optimizer, patience=10)
self.best_loss_train = float('inf')
self.model_dir = None
self.train_data = None
self.nn_epoch = None
self.best_components = None
if torch.cuda.is_available():
self.USE_CUDA = True
else:
self.USE_CUDA = False
if self.USE_CUDA:
self.model = self.model.cuda() | def __init__(self, input_size, bert_input_size, inference_type='zeroshot', num_topics=10, model_type='prodLDA', hidden_sizes=(100, 100), activation='softplus', dropout=0.2, learn_priors=True, batch_size=64, lr=0.002, momentum=0.99, solver='adam', num_epochs=100, num_samples=10, reduce_on_plateau=False, topic_prior_mean=0.0, topic_prior_variance=None, num_data_loader_workers=0):
"\n :param input_size: int, dimension of input\n :param bert_input_size: int, dimension of input that comes from BERT embeddings\n :param inference_type: string, you can choose between the contextual model and the combined model\n :param num_topics: int, number of topic components, (default 10)\n :param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')\n :param hidden_sizes: tuple, length = n_layers, (default (100, 100))\n :param activation: string, 'softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu',\n 'selu' (default 'softplus')\n :param dropout: float, dropout to use (default 0.2)\n :param learn_priors: bool, make priors a learnable parameter (default True)\n :param batch_size: int, size of batch to use for training (default 64)\n :param lr: float, learning rate to use for training (default 2e-3)\n :param momentum: float, momentum to use for training (default 0.99)\n :param solver: string, optimizer 'adam' or 'sgd' (default 'adam')\n :param num_samples: int, number of times theta needs to be sampled\n :param num_epochs: int, number of epochs to train for, (default 100)\n :param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)\n :param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows\n "
assert (isinstance(input_size, int) and (input_size > 0)), 'input_size must by type int > 0.'
assert ((isinstance(num_topics, int) or isinstance(num_topics, np.int64)) and (num_topics > 0)), 'num_topics must by type int > 0.'
assert (model_type in ['LDA', 'prodLDA']), "model must be 'LDA' or 'prodLDA'."
assert isinstance(hidden_sizes, tuple), 'hidden_sizes must be type tuple.'
assert (activation in ['softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu', 'selu']), "activation must be 'softplus', 'relu', 'sigmoid', 'swish', 'leakyrelu', 'rrelu', 'elu', 'selu' or 'tanh'."
assert (dropout >= 0), 'dropout must be >= 0.'
assert (isinstance(batch_size, int) and (batch_size > 0)), 'batch_size must be int > 0.'
assert (lr > 0), 'lr must be > 0.'
assert (isinstance(momentum, float) and (momentum > 0) and (momentum <= 1)), 'momentum must be 0 < float <= 1.'
assert (solver in ['adagrad', 'adam', 'sgd', 'adadelta', 'rmsprop']), "solver must be 'adam', 'adadelta', 'sgd', 'rmsprop' or 'adagrad'"
assert isinstance(reduce_on_plateau, bool), 'reduce_on_plateau must be type bool.'
assert isinstance(topic_prior_mean, float), 'topic_prior_mean must be type float'
self.input_size = input_size
self.num_topics = num_topics
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.batch_size = batch_size
self.lr = lr
self.num_samples = num_samples
self.bert_size = bert_input_size
self.momentum = momentum
self.solver = solver
self.num_epochs = num_epochs
self.reduce_on_plateau = reduce_on_plateau
self.num_data_loader_workers = num_data_loader_workers
self.topic_prior_mean = topic_prior_mean
self.topic_prior_variance = topic_prior_variance
self.model = DecoderNetwork(input_size, self.bert_size, inference_type, num_topics, model_type, hidden_sizes, activation, dropout, self.learn_priors, self.topic_prior_mean, self.topic_prior_variance)
self.early_stopping = EarlyStopping(patience=5, verbose=False)
if (self.solver == 'adam'):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(self.momentum, 0.99))
elif (self.solver == 'sgd'):
self.optimizer = optim.SGD(self.model.parameters(), lr=lr, momentum=self.momentum)
elif (self.solver == 'adagrad'):
self.optimizer = optim.Adagrad(self.model.parameters(), lr=lr)
elif (self.solver == 'adadelta'):
self.optimizer = optim.Adadelta(self.model.parameters(), lr=lr)
elif (self.solver == 'rmsprop'):
self.optimizer = optim.RMSprop(self.model.parameters(), lr=lr, momentum=self.momentum)
if self.reduce_on_plateau:
self.scheduler = ReduceLROnPlateau(self.optimizer, patience=10)
self.best_loss_train = float('inf')
self.model_dir = None
self.train_data = None
self.nn_epoch = None
self.best_components = None
if torch.cuda.is_available():
self.USE_CUDA = True
else:
self.USE_CUDA = False
if self.USE_CUDA:
self.model = self.model.cuda()<|docstring|>:param input_size: int, dimension of input
:param bert_input_size: int, dimension of input that comes from BERT embeddings
:param inference_type: string, you can choose between the contextual model and the combined model
:param num_topics: int, number of topic components, (default 10)
:param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')
:param hidden_sizes: tuple, length = n_layers, (default (100, 100))
:param activation: string, 'softplus', 'relu', 'sigmoid', 'swish', 'tanh', 'leakyrelu', 'rrelu', 'elu',
'selu' (default 'softplus')
:param dropout: float, dropout to use (default 0.2)
:param learn_priors: bool, make priors a learnable parameter (default True)
:param batch_size: int, size of batch to use for training (default 64)
:param lr: float, learning rate to use for training (default 2e-3)
:param momentum: float, momentum to use for training (default 0.99)
:param solver: string, optimizer 'adam' or 'sgd' (default 'adam')
:param num_samples: int, number of times theta needs to be sampled
:param num_epochs: int, number of epochs to train for, (default 100)
:param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)
:param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows<|endoftext|> |
2bdaabb5d612cb6624a58bb596b7d693b20c60f8b81b68c4419e0d86db098be0 | def _train_epoch(self, loader):
'Train epoch.'
self.model.train()
train_loss = 0
samples_processed = 0
topic_doc_list = []
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, topic_word, topic_document) = self.model(X, X_bert)
topic_doc_list.extend(topic_document)
loss = self._loss(X, word_dists, prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance)
loss.backward()
self.optimizer.step()
samples_processed += X.size()[0]
train_loss += loss.item()
train_loss /= samples_processed
return (samples_processed, train_loss, topic_word, topic_doc_list) | Train epoch. | octis/models/contextualized_topic_models/models/ctm.py | _train_epoch | lei-liu1/OCTIS | 340 | python | def _train_epoch(self, loader):
self.model.train()
train_loss = 0
samples_processed = 0
topic_doc_list = []
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, topic_word, topic_document) = self.model(X, X_bert)
topic_doc_list.extend(topic_document)
loss = self._loss(X, word_dists, prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance)
loss.backward()
self.optimizer.step()
samples_processed += X.size()[0]
train_loss += loss.item()
train_loss /= samples_processed
return (samples_processed, train_loss, topic_word, topic_doc_list) | def _train_epoch(self, loader):
self.model.train()
train_loss = 0
samples_processed = 0
topic_doc_list = []
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, topic_word, topic_document) = self.model(X, X_bert)
topic_doc_list.extend(topic_document)
loss = self._loss(X, word_dists, prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance)
loss.backward()
self.optimizer.step()
samples_processed += X.size()[0]
train_loss += loss.item()
train_loss /= samples_processed
return (samples_processed, train_loss, topic_word, topic_doc_list)<|docstring|>Train epoch.<|endoftext|> |
9e3eda9276461c22dd2a545e8a583775c721cee2a3b787e90917153f4af5b34a | def _validation(self, loader):
'Train epoch.'
self.model.eval()
val_loss = 0
samples_processed = 0
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, topic_word, topic_document) = self.model(X, X_bert)
loss = self._loss(X, word_dists, prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance)
samples_processed += X.size()[0]
val_loss += loss.item()
val_loss /= samples_processed
return (samples_processed, val_loss) | Train epoch. | octis/models/contextualized_topic_models/models/ctm.py | _validation | lei-liu1/OCTIS | 340 | python | def _validation(self, loader):
self.model.eval()
val_loss = 0
samples_processed = 0
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, topic_word, topic_document) = self.model(X, X_bert)
loss = self._loss(X, word_dists, prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance)
samples_processed += X.size()[0]
val_loss += loss.item()
val_loss /= samples_processed
return (samples_processed, val_loss) | def _validation(self, loader):
self.model.eval()
val_loss = 0
samples_processed = 0
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, topic_word, topic_document) = self.model(X, X_bert)
loss = self._loss(X, word_dists, prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance)
samples_processed += X.size()[0]
val_loss += loss.item()
val_loss /= samples_processed
return (samples_processed, val_loss)<|docstring|>Train epoch.<|endoftext|> |
b8d92e45728c178b46ffd9fb97e0a2821daea1c75587c99c9690b583615af1b5 | def fit(self, train_dataset, validation_dataset=None, save_dir=None, verbose=True):
'\n Train the CTM model.\n\n :param train_dataset: PyTorch Dataset class for training data.\n :param validation_dataset: PyTorch Dataset class for validation data\n :param save_dir: directory to save checkpoint models to.\n :param verbose: verbose\n '
if verbose:
print('Settings: \n N Components: {}\n Topic Prior Mean: {}\n Topic Prior Variance: {}\n Model Type: {}\n Hidden Sizes: {}\n Activation: {}\n Dropout: {}\n Learn Priors: {}\n Learning Rate: {}\n Momentum: {}\n Reduce On Plateau: {}\n Save Dir: {}'.format(self.num_topics, self.topic_prior_mean, self.topic_prior_variance, self.model_type, self.hidden_sizes, self.activation, self.dropout, self.learn_priors, self.lr, self.momentum, self.reduce_on_plateau, save_dir))
self.model_dir = save_dir
self.train_data = train_dataset
self.validation_data = validation_dataset
train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_data_loader_workers)
train_loss = 0
samples_processed = 0
for epoch in range(self.num_epochs):
self.nn_epoch = epoch
s = datetime.datetime.now()
(sp, train_loss, topic_word, topic_document) = self._train_epoch(train_loader)
samples_processed += sp
e = datetime.datetime.now()
if verbose:
print('Epoch: [{}/{}]\tSamples: [{}/{}]\tTrain Loss: {}\tTime: {}'.format((epoch + 1), self.num_epochs, samples_processed, (len(self.train_data) * self.num_epochs), train_loss, (e - s)))
self.best_components = self.model.beta
self.final_topic_word = topic_word
self.final_topic_document = topic_document
self.best_loss_train = train_loss
if (self.validation_data is not None):
validation_loader = DataLoader(self.validation_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_data_loader_workers)
s = datetime.datetime.now()
(val_samples_processed, val_loss) = self._validation(validation_loader)
e = datetime.datetime.now()
if verbose:
print('Epoch: [{}/{}]\tSamples: [{}/{}]\tValidation Loss: {}\tTime: {}'.format((epoch + 1), self.num_epochs, val_samples_processed, (len(self.validation_data) * self.num_epochs), val_loss, (e - s)))
if (np.isnan(val_loss) or np.isnan(train_loss)):
break
else:
self.early_stopping(val_loss, self.model)
if self.early_stopping.early_stop:
if verbose:
print('Early stopping')
if (save_dir is not None):
self.save(save_dir)
break | Train the CTM model.
:param train_dataset: PyTorch Dataset class for training data.
:param validation_dataset: PyTorch Dataset class for validation data
:param save_dir: directory to save checkpoint models to.
:param verbose: verbose | octis/models/contextualized_topic_models/models/ctm.py | fit | lei-liu1/OCTIS | 340 | python | def fit(self, train_dataset, validation_dataset=None, save_dir=None, verbose=True):
'\n Train the CTM model.\n\n :param train_dataset: PyTorch Dataset class for training data.\n :param validation_dataset: PyTorch Dataset class for validation data\n :param save_dir: directory to save checkpoint models to.\n :param verbose: verbose\n '
if verbose:
print('Settings: \n N Components: {}\n Topic Prior Mean: {}\n Topic Prior Variance: {}\n Model Type: {}\n Hidden Sizes: {}\n Activation: {}\n Dropout: {}\n Learn Priors: {}\n Learning Rate: {}\n Momentum: {}\n Reduce On Plateau: {}\n Save Dir: {}'.format(self.num_topics, self.topic_prior_mean, self.topic_prior_variance, self.model_type, self.hidden_sizes, self.activation, self.dropout, self.learn_priors, self.lr, self.momentum, self.reduce_on_plateau, save_dir))
self.model_dir = save_dir
self.train_data = train_dataset
self.validation_data = validation_dataset
train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_data_loader_workers)
train_loss = 0
samples_processed = 0
for epoch in range(self.num_epochs):
self.nn_epoch = epoch
s = datetime.datetime.now()
(sp, train_loss, topic_word, topic_document) = self._train_epoch(train_loader)
samples_processed += sp
e = datetime.datetime.now()
if verbose:
print('Epoch: [{}/{}]\tSamples: [{}/{}]\tTrain Loss: {}\tTime: {}'.format((epoch + 1), self.num_epochs, samples_processed, (len(self.train_data) * self.num_epochs), train_loss, (e - s)))
self.best_components = self.model.beta
self.final_topic_word = topic_word
self.final_topic_document = topic_document
self.best_loss_train = train_loss
if (self.validation_data is not None):
validation_loader = DataLoader(self.validation_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_data_loader_workers)
s = datetime.datetime.now()
(val_samples_processed, val_loss) = self._validation(validation_loader)
e = datetime.datetime.now()
if verbose:
print('Epoch: [{}/{}]\tSamples: [{}/{}]\tValidation Loss: {}\tTime: {}'.format((epoch + 1), self.num_epochs, val_samples_processed, (len(self.validation_data) * self.num_epochs), val_loss, (e - s)))
if (np.isnan(val_loss) or np.isnan(train_loss)):
break
else:
self.early_stopping(val_loss, self.model)
if self.early_stopping.early_stop:
if verbose:
print('Early stopping')
if (save_dir is not None):
self.save(save_dir)
break | def fit(self, train_dataset, validation_dataset=None, save_dir=None, verbose=True):
'\n Train the CTM model.\n\n :param train_dataset: PyTorch Dataset class for training data.\n :param validation_dataset: PyTorch Dataset class for validation data\n :param save_dir: directory to save checkpoint models to.\n :param verbose: verbose\n '
if verbose:
print('Settings: \n N Components: {}\n Topic Prior Mean: {}\n Topic Prior Variance: {}\n Model Type: {}\n Hidden Sizes: {}\n Activation: {}\n Dropout: {}\n Learn Priors: {}\n Learning Rate: {}\n Momentum: {}\n Reduce On Plateau: {}\n Save Dir: {}'.format(self.num_topics, self.topic_prior_mean, self.topic_prior_variance, self.model_type, self.hidden_sizes, self.activation, self.dropout, self.learn_priors, self.lr, self.momentum, self.reduce_on_plateau, save_dir))
self.model_dir = save_dir
self.train_data = train_dataset
self.validation_data = validation_dataset
train_loader = DataLoader(self.train_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_data_loader_workers)
train_loss = 0
samples_processed = 0
for epoch in range(self.num_epochs):
self.nn_epoch = epoch
s = datetime.datetime.now()
(sp, train_loss, topic_word, topic_document) = self._train_epoch(train_loader)
samples_processed += sp
e = datetime.datetime.now()
if verbose:
print('Epoch: [{}/{}]\tSamples: [{}/{}]\tTrain Loss: {}\tTime: {}'.format((epoch + 1), self.num_epochs, samples_processed, (len(self.train_data) * self.num_epochs), train_loss, (e - s)))
self.best_components = self.model.beta
self.final_topic_word = topic_word
self.final_topic_document = topic_document
self.best_loss_train = train_loss
if (self.validation_data is not None):
validation_loader = DataLoader(self.validation_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_data_loader_workers)
s = datetime.datetime.now()
(val_samples_processed, val_loss) = self._validation(validation_loader)
e = datetime.datetime.now()
if verbose:
print('Epoch: [{}/{}]\tSamples: [{}/{}]\tValidation Loss: {}\tTime: {}'.format((epoch + 1), self.num_epochs, val_samples_processed, (len(self.validation_data) * self.num_epochs), val_loss, (e - s)))
if (np.isnan(val_loss) or np.isnan(train_loss)):
break
else:
self.early_stopping(val_loss, self.model)
if self.early_stopping.early_stop:
if verbose:
print('Early stopping')
if (save_dir is not None):
self.save(save_dir)
break<|docstring|>Train the CTM model.
:param train_dataset: PyTorch Dataset class for training data.
:param validation_dataset: PyTorch Dataset class for validation data
:param save_dir: directory to save checkpoint models to.
:param verbose: verbose<|endoftext|> |
3b7098fe7a6581b03b118a641ff819e1b80e8f031cd008e930f8fa3ba465f527 | def predict(self, dataset):
'Predict input.'
self.model.eval()
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_data_loader_workers)
topic_document_mat = []
with torch.no_grad():
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(_, _, _, _, _, _, _, topic_document) = self.model(X, X_bert)
topic_document_mat.append(topic_document)
results = self.get_info()
results['test-topic-document-matrix'] = np.asarray(self.get_thetas(dataset)).T
return results | Predict input. | octis/models/contextualized_topic_models/models/ctm.py | predict | lei-liu1/OCTIS | 340 | python | def predict(self, dataset):
self.model.eval()
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_data_loader_workers)
topic_document_mat = []
with torch.no_grad():
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(_, _, _, _, _, _, _, topic_document) = self.model(X, X_bert)
topic_document_mat.append(topic_document)
results = self.get_info()
results['test-topic-document-matrix'] = np.asarray(self.get_thetas(dataset)).T
return results | def predict(self, dataset):
self.model.eval()
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_data_loader_workers)
topic_document_mat = []
with torch.no_grad():
for batch_samples in loader:
X = batch_samples['X']
X = X.reshape(X.shape[0], (- 1))
X_bert = batch_samples['X_bert']
if self.USE_CUDA:
X = X.cuda()
X_bert = X_bert.cuda()
self.model.zero_grad()
(_, _, _, _, _, _, _, topic_document) = self.model(X, X_bert)
topic_document_mat.append(topic_document)
results = self.get_info()
results['test-topic-document-matrix'] = np.asarray(self.get_thetas(dataset)).T
return results<|docstring|>Predict input.<|endoftext|> |
a2e4405290942803d5ee1f8e5c534ef9d6954dddbbc3e2e6026bde31e9ff0ac2 | def get_topics(self, k=10):
'\n Retrieve topic words.\n\n Args\n k : (int) number of words to return per topic, default 10.\n '
assert (k <= self.input_size), 'k must be <= input size.'
component_dists = self.best_components
topics = defaultdict(list)
topics_list = []
if (self.num_topics is not None):
for i in range(self.num_topics):
(_, idxs) = torch.topk(component_dists[i], k)
component_words = [self.train_data.idx2token[idx] for idx in idxs.cpu().numpy()]
topics[i] = component_words
topics_list.append(component_words)
return topics_list | Retrieve topic words.
Args
k : (int) number of words to return per topic, default 10. | octis/models/contextualized_topic_models/models/ctm.py | get_topics | lei-liu1/OCTIS | 340 | python | def get_topics(self, k=10):
'\n Retrieve topic words.\n\n Args\n k : (int) number of words to return per topic, default 10.\n '
assert (k <= self.input_size), 'k must be <= input size.'
component_dists = self.best_components
topics = defaultdict(list)
topics_list = []
if (self.num_topics is not None):
for i in range(self.num_topics):
(_, idxs) = torch.topk(component_dists[i], k)
component_words = [self.train_data.idx2token[idx] for idx in idxs.cpu().numpy()]
topics[i] = component_words
topics_list.append(component_words)
return topics_list | def get_topics(self, k=10):
'\n Retrieve topic words.\n\n Args\n k : (int) number of words to return per topic, default 10.\n '
assert (k <= self.input_size), 'k must be <= input size.'
component_dists = self.best_components
topics = defaultdict(list)
topics_list = []
if (self.num_topics is not None):
for i in range(self.num_topics):
(_, idxs) = torch.topk(component_dists[i], k)
component_words = [self.train_data.idx2token[idx] for idx in idxs.cpu().numpy()]
topics[i] = component_words
topics_list.append(component_words)
return topics_list<|docstring|>Retrieve topic words.
Args
k : (int) number of words to return per topic, default 10.<|endoftext|> |
8f40e18fec1dc2b65c2657881df648ac3a76612b6420141dff45f8dc11b72fe3 | def save(self, models_dir=None):
'\n Save model.\n\n :param models_dir: path to directory for saving NN models.\n '
if ((self.model is not None) and (models_dir is not None)):
model_dir = self._format_file()
if (not os.path.isdir(os.path.join(models_dir, model_dir))):
os.makedirs(os.path.join(models_dir, model_dir))
filename = ('epoch_{}'.format(self.nn_epoch) + '.pth')
fileloc = os.path.join(models_dir, model_dir, filename)
with open(fileloc, 'wb') as file:
torch.save({'state_dict': self.model.state_dict(), 'dcue_dict': self.__dict__}, file) | Save model.
:param models_dir: path to directory for saving NN models. | octis/models/contextualized_topic_models/models/ctm.py | save | lei-liu1/OCTIS | 340 | python | def save(self, models_dir=None):
'\n Save model.\n\n :param models_dir: path to directory for saving NN models.\n '
if ((self.model is not None) and (models_dir is not None)):
model_dir = self._format_file()
if (not os.path.isdir(os.path.join(models_dir, model_dir))):
os.makedirs(os.path.join(models_dir, model_dir))
filename = ('epoch_{}'.format(self.nn_epoch) + '.pth')
fileloc = os.path.join(models_dir, model_dir, filename)
with open(fileloc, 'wb') as file:
torch.save({'state_dict': self.model.state_dict(), 'dcue_dict': self.__dict__}, file) | def save(self, models_dir=None):
'\n Save model.\n\n :param models_dir: path to directory for saving NN models.\n '
if ((self.model is not None) and (models_dir is not None)):
model_dir = self._format_file()
if (not os.path.isdir(os.path.join(models_dir, model_dir))):
os.makedirs(os.path.join(models_dir, model_dir))
filename = ('epoch_{}'.format(self.nn_epoch) + '.pth')
fileloc = os.path.join(models_dir, model_dir, filename)
with open(fileloc, 'wb') as file:
torch.save({'state_dict': self.model.state_dict(), 'dcue_dict': self.__dict__}, file)<|docstring|>Save model.
:param models_dir: path to directory for saving NN models.<|endoftext|> |
61addc45f18cc48ce060920087c9984090c1d432c5eaafe4a0c467a13d71c967 | def load(self, model_dir, epoch):
'\n Load a previously trained model.\n\n :param model_dir: directory where models are saved.\n :param epoch: epoch of model to load.\n '
epoch_file = (('epoch_' + str(epoch)) + '.pth')
model_file = os.path.join(model_dir, epoch_file)
with open(model_file, 'rb') as model_dict:
checkpoint = torch.load(model_dict)
for (k, v) in checkpoint['dcue_dict'].items():
setattr(self, k, v)
self.model.load_state_dict(checkpoint['state_dict']) | Load a previously trained model.
:param model_dir: directory where models are saved.
:param epoch: epoch of model to load. | octis/models/contextualized_topic_models/models/ctm.py | load | lei-liu1/OCTIS | 340 | python | def load(self, model_dir, epoch):
'\n Load a previously trained model.\n\n :param model_dir: directory where models are saved.\n :param epoch: epoch of model to load.\n '
epoch_file = (('epoch_' + str(epoch)) + '.pth')
model_file = os.path.join(model_dir, epoch_file)
with open(model_file, 'rb') as model_dict:
checkpoint = torch.load(model_dict)
for (k, v) in checkpoint['dcue_dict'].items():
setattr(self, k, v)
self.model.load_state_dict(checkpoint['state_dict']) | def load(self, model_dir, epoch):
'\n Load a previously trained model.\n\n :param model_dir: directory where models are saved.\n :param epoch: epoch of model to load.\n '
epoch_file = (('epoch_' + str(epoch)) + '.pth')
model_file = os.path.join(model_dir, epoch_file)
with open(model_file, 'rb') as model_dict:
checkpoint = torch.load(model_dict)
for (k, v) in checkpoint['dcue_dict'].items():
setattr(self, k, v)
self.model.load_state_dict(checkpoint['state_dict'])<|docstring|>Load a previously trained model.
:param model_dir: directory where models are saved.
:param epoch: epoch of model to load.<|endoftext|> |
5c46593c210865c21c83ad6cd7480a6836c8735b466ea0a88dfdd269430eead7 | def get_thetas(self, dataset):
'\n Get the document-topic distribution for a dataset of topics. Includes multiple sampling to reduce variation via\n the parameter num_samples.\n :param dataset: a PyTorch Dataset containing the documents\n '
self.model.eval()
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_data_loader_workers)
final_thetas = []
for sample_index in range(self.num_samples):
with torch.no_grad():
collect_theta = []
for batch_samples in loader:
x = batch_samples['X']
x = x.reshape(x.shape[0], (- 1))
x_bert = batch_samples['X_bert']
if self.USE_CUDA:
x = x.cuda()
x_bert = x_bert.cuda()
self.model.zero_grad()
collect_theta.extend(self.model.get_theta(x, x_bert).cpu().numpy().tolist())
final_thetas.append(np.array(collect_theta))
return (np.sum(final_thetas, axis=0) / self.num_samples) | Get the document-topic distribution for a dataset of topics. Includes multiple sampling to reduce variation via
the parameter num_samples.
:param dataset: a PyTorch Dataset containing the documents | octis/models/contextualized_topic_models/models/ctm.py | get_thetas | lei-liu1/OCTIS | 340 | python | def get_thetas(self, dataset):
'\n Get the document-topic distribution for a dataset of topics. Includes multiple sampling to reduce variation via\n the parameter num_samples.\n :param dataset: a PyTorch Dataset containing the documents\n '
self.model.eval()
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_data_loader_workers)
final_thetas = []
for sample_index in range(self.num_samples):
with torch.no_grad():
collect_theta = []
for batch_samples in loader:
x = batch_samples['X']
x = x.reshape(x.shape[0], (- 1))
x_bert = batch_samples['X_bert']
if self.USE_CUDA:
x = x.cuda()
x_bert = x_bert.cuda()
self.model.zero_grad()
collect_theta.extend(self.model.get_theta(x, x_bert).cpu().numpy().tolist())
final_thetas.append(np.array(collect_theta))
return (np.sum(final_thetas, axis=0) / self.num_samples) | def get_thetas(self, dataset):
'\n Get the document-topic distribution for a dataset of topics. Includes multiple sampling to reduce variation via\n the parameter num_samples.\n :param dataset: a PyTorch Dataset containing the documents\n '
self.model.eval()
loader = DataLoader(dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_data_loader_workers)
final_thetas = []
for sample_index in range(self.num_samples):
with torch.no_grad():
collect_theta = []
for batch_samples in loader:
x = batch_samples['X']
x = x.reshape(x.shape[0], (- 1))
x_bert = batch_samples['X_bert']
if self.USE_CUDA:
x = x.cuda()
x_bert = x_bert.cuda()
self.model.zero_grad()
collect_theta.extend(self.model.get_theta(x, x_bert).cpu().numpy().tolist())
final_thetas.append(np.array(collect_theta))
return (np.sum(final_thetas, axis=0) / self.num_samples)<|docstring|>Get the document-topic distribution for a dataset of topics. Includes multiple sampling to reduce variation via
the parameter num_samples.
:param dataset: a PyTorch Dataset containing the documents<|endoftext|> |
a62f7fb05fa92b33e3d4ce26d16526b5f571ed352fc38d68c6a14b481afbdf7f | def _get_profile_model_predictions_batch(model, coords, num_tasks, input_func, controls=None, fourier_att_prior_freq_limit=200, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False):
'\n Fetches the necessary data from the given coordinates or bin indices and\n runs it through a profile or binary model. This will perform computation\n in a single batch.\n Arguments:\n `model`: a trained `ProfilePredictorWithMatchedControls`,\n `ProfilePredictorWithSharedControls`, or\n `ProfilePredictorWithoutControls`\n `coords`: a B x 3 array of coordinates to compute outputs for\n `num_tasks`: number of tasks for the model\n `input_func`: a function that takes in `coords` and returns the\n B x I x 4 array of one-hot sequences and the\n B x (T or T + 1 or 2T) x O x S array of profiles (perhaps with\n controls)\n `controls`: the type of control profiles (if any) used in model; can be\n "matched" (each task has a matched control), "shared" (all tasks\n share a control), or None (no controls); must match the model class\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n Returns a dictionary of the following structure:\n true_profs: true profile raw counts (B x T x O x S)\n log_pred_profs: predicted profile log probabilities (B x T x O x S)\n true_counts: true total counts (B x T x S)\n log_pred_counts: predicted log counts (B x T x S)\n prof_losses: profile NLL losses (B-array), if `return_losses` is True\n count_losses: counts MSE losses (B-array) if `return_losses` is True\n att_losses: prior losses (B-array), if `return_losses` is True\n input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (B x I x 4), if\n `return_gradients` is true\n '
result = {}
(input_seqs, profiles) = input_func(coords)
if return_gradients:
input_seqs_np = input_seqs
model.zero_grad()
input_seqs = model_util.place_tensor(torch.tensor(input_seqs)).float()
profiles = model_util.place_tensor(torch.tensor(profiles)).float()
if (controls is not None):
tf_profs = profiles[(:, :num_tasks, :, :)]
cont_profs = profiles[(:, num_tasks:, :, :)]
else:
(tf_profs, cont_profs) = (profiles, None)
if (return_losses or return_gradients):
input_seqs.requires_grad = True
(logit_pred_profs, log_pred_counts) = model(input_seqs, cont_profs)
norm_logit_pred_profs = (logit_pred_profs - torch.mean(logit_pred_profs, dim=2, keepdim=True))
pred_prof_probs = profile_models.profile_logits_to_log_probs(logit_pred_profs).detach()
weighted_norm_logits = (norm_logit_pred_profs * pred_prof_probs)
(input_grads,) = torch.autograd.grad(weighted_norm_logits, input_seqs, grad_outputs=model_util.place_tensor(torch.ones(weighted_norm_logits.size())), retain_graph=True, create_graph=True)
input_grads_np = input_grads.detach().cpu().numpy()
input_seqs.requires_grad = False
else:
(logit_pred_profs, log_pred_counts) = model(input_seqs, cont_profs)
result['true_profs'] = tf_profs.detach().cpu().numpy()
result['true_counts'] = np.sum(result['true_profs'], axis=2)
logit_pred_profs_np = logit_pred_profs.detach().cpu().numpy()
result['log_pred_profs'] = profile_models.profile_logits_to_log_probs(logit_pred_profs_np)
result['log_pred_counts'] = log_pred_counts.detach().cpu().numpy()
if return_losses:
log_pred_profs = profile_models.profile_logits_to_log_probs(logit_pred_profs)
num_samples = log_pred_profs.size(0)
result['prof_losses'] = np.empty(num_samples)
result['count_losses'] = np.empty(num_samples)
result['att_losses'] = np.empty(num_samples)
for i in range(num_samples):
(_, prof_loss, count_loss) = model.correctness_loss(tf_profs[i:(i + 1)], log_pred_profs[i:(i + 1)], log_pred_counts[i:(i + 1)], 1, return_separate_losses=True)
att_loss = model.fourier_att_prior_loss(model_util.place_tensor(torch.ones(1)), input_grads[i:(i + 1)], fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma)
result['prof_losses'][i] = prof_loss
result['count_losses'][i] = count_loss
result['att_losses'][i] = att_loss
if return_gradients:
result['input_seqs'] = input_seqs_np
result['input_grads'] = input_grads_np
return result | Fetches the necessary data from the given coordinates or bin indices and
runs it through a profile or binary model. This will perform computation
in a single batch.
Arguments:
`model`: a trained `ProfilePredictorWithMatchedControls`,
`ProfilePredictorWithSharedControls`, or
`ProfilePredictorWithoutControls`
`coords`: a B x 3 array of coordinates to compute outputs for
`num_tasks`: number of tasks for the model
`input_func`: a function that takes in `coords` and returns the
B x I x 4 array of one-hot sequences and the
B x (T or T + 1 or 2T) x O x S array of profiles (perhaps with
controls)
`controls`: the type of control profiles (if any) used in model; can be
"matched" (each task has a matched control), "shared" (all tasks
share a control), or None (no controls); must match the model class
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
Returns a dictionary of the following structure:
true_profs: true profile raw counts (B x T x O x S)
log_pred_profs: predicted profile log probabilities (B x T x O x S)
true_counts: true total counts (B x T x S)
log_pred_counts: predicted log counts (B x T x S)
prof_losses: profile NLL losses (B-array), if `return_losses` is True
count_losses: counts MSE losses (B-array) if `return_losses` is True
att_losses: prior losses (B-array), if `return_losses` is True
input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`
is true
input_grads: "hypothetical" input gradients (B x I x 4), if
`return_gradients` is true | src/extract/compute_predictions.py | _get_profile_model_predictions_batch | atseng95/fourier_attribution_priors | 8 | python | def _get_profile_model_predictions_batch(model, coords, num_tasks, input_func, controls=None, fourier_att_prior_freq_limit=200, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False):
'\n Fetches the necessary data from the given coordinates or bin indices and\n runs it through a profile or binary model. This will perform computation\n in a single batch.\n Arguments:\n `model`: a trained `ProfilePredictorWithMatchedControls`,\n `ProfilePredictorWithSharedControls`, or\n `ProfilePredictorWithoutControls`\n `coords`: a B x 3 array of coordinates to compute outputs for\n `num_tasks`: number of tasks for the model\n `input_func`: a function that takes in `coords` and returns the\n B x I x 4 array of one-hot sequences and the\n B x (T or T + 1 or 2T) x O x S array of profiles (perhaps with\n controls)\n `controls`: the type of control profiles (if any) used in model; can be\n "matched" (each task has a matched control), "shared" (all tasks\n share a control), or None (no controls); must match the model class\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n Returns a dictionary of the following structure:\n true_profs: true profile raw counts (B x T x O x S)\n log_pred_profs: predicted profile log probabilities (B x T x O x S)\n true_counts: true total counts (B x T x S)\n log_pred_counts: predicted log counts (B x T x S)\n prof_losses: profile NLL losses (B-array), if `return_losses` is True\n count_losses: counts MSE losses (B-array) if `return_losses` is True\n att_losses: prior losses (B-array), if `return_losses` is True\n input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (B x I x 4), if\n `return_gradients` is true\n '
result = {}
(input_seqs, profiles) = input_func(coords)
if return_gradients:
input_seqs_np = input_seqs
model.zero_grad()
input_seqs = model_util.place_tensor(torch.tensor(input_seqs)).float()
profiles = model_util.place_tensor(torch.tensor(profiles)).float()
if (controls is not None):
tf_profs = profiles[(:, :num_tasks, :, :)]
cont_profs = profiles[(:, num_tasks:, :, :)]
else:
(tf_profs, cont_profs) = (profiles, None)
if (return_losses or return_gradients):
input_seqs.requires_grad = True
(logit_pred_profs, log_pred_counts) = model(input_seqs, cont_profs)
norm_logit_pred_profs = (logit_pred_profs - torch.mean(logit_pred_profs, dim=2, keepdim=True))
pred_prof_probs = profile_models.profile_logits_to_log_probs(logit_pred_profs).detach()
weighted_norm_logits = (norm_logit_pred_profs * pred_prof_probs)
(input_grads,) = torch.autograd.grad(weighted_norm_logits, input_seqs, grad_outputs=model_util.place_tensor(torch.ones(weighted_norm_logits.size())), retain_graph=True, create_graph=True)
input_grads_np = input_grads.detach().cpu().numpy()
input_seqs.requires_grad = False
else:
(logit_pred_profs, log_pred_counts) = model(input_seqs, cont_profs)
result['true_profs'] = tf_profs.detach().cpu().numpy()
result['true_counts'] = np.sum(result['true_profs'], axis=2)
logit_pred_profs_np = logit_pred_profs.detach().cpu().numpy()
result['log_pred_profs'] = profile_models.profile_logits_to_log_probs(logit_pred_profs_np)
result['log_pred_counts'] = log_pred_counts.detach().cpu().numpy()
if return_losses:
log_pred_profs = profile_models.profile_logits_to_log_probs(logit_pred_profs)
num_samples = log_pred_profs.size(0)
result['prof_losses'] = np.empty(num_samples)
result['count_losses'] = np.empty(num_samples)
result['att_losses'] = np.empty(num_samples)
for i in range(num_samples):
(_, prof_loss, count_loss) = model.correctness_loss(tf_profs[i:(i + 1)], log_pred_profs[i:(i + 1)], log_pred_counts[i:(i + 1)], 1, return_separate_losses=True)
att_loss = model.fourier_att_prior_loss(model_util.place_tensor(torch.ones(1)), input_grads[i:(i + 1)], fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma)
result['prof_losses'][i] = prof_loss
result['count_losses'][i] = count_loss
result['att_losses'][i] = att_loss
if return_gradients:
result['input_seqs'] = input_seqs_np
result['input_grads'] = input_grads_np
return result | def _get_profile_model_predictions_batch(model, coords, num_tasks, input_func, controls=None, fourier_att_prior_freq_limit=200, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False):
'\n Fetches the necessary data from the given coordinates or bin indices and\n runs it through a profile or binary model. This will perform computation\n in a single batch.\n Arguments:\n `model`: a trained `ProfilePredictorWithMatchedControls`,\n `ProfilePredictorWithSharedControls`, or\n `ProfilePredictorWithoutControls`\n `coords`: a B x 3 array of coordinates to compute outputs for\n `num_tasks`: number of tasks for the model\n `input_func`: a function that takes in `coords` and returns the\n B x I x 4 array of one-hot sequences and the\n B x (T or T + 1 or 2T) x O x S array of profiles (perhaps with\n controls)\n `controls`: the type of control profiles (if any) used in model; can be\n "matched" (each task has a matched control), "shared" (all tasks\n share a control), or None (no controls); must match the model class\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n Returns a dictionary of the following structure:\n true_profs: true profile raw counts (B x T x O x S)\n log_pred_profs: predicted profile log probabilities (B x T x O x S)\n true_counts: true total counts (B x T x S)\n log_pred_counts: predicted log counts (B x T x S)\n prof_losses: profile NLL losses (B-array), if `return_losses` is True\n count_losses: counts MSE losses (B-array) if `return_losses` is True\n att_losses: prior losses (B-array), if `return_losses` is True\n input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (B x I x 4), if\n `return_gradients` is true\n '
result = {}
(input_seqs, profiles) = input_func(coords)
if return_gradients:
input_seqs_np = input_seqs
model.zero_grad()
input_seqs = model_util.place_tensor(torch.tensor(input_seqs)).float()
profiles = model_util.place_tensor(torch.tensor(profiles)).float()
if (controls is not None):
tf_profs = profiles[(:, :num_tasks, :, :)]
cont_profs = profiles[(:, num_tasks:, :, :)]
else:
(tf_profs, cont_profs) = (profiles, None)
if (return_losses or return_gradients):
input_seqs.requires_grad = True
(logit_pred_profs, log_pred_counts) = model(input_seqs, cont_profs)
norm_logit_pred_profs = (logit_pred_profs - torch.mean(logit_pred_profs, dim=2, keepdim=True))
pred_prof_probs = profile_models.profile_logits_to_log_probs(logit_pred_profs).detach()
weighted_norm_logits = (norm_logit_pred_profs * pred_prof_probs)
(input_grads,) = torch.autograd.grad(weighted_norm_logits, input_seqs, grad_outputs=model_util.place_tensor(torch.ones(weighted_norm_logits.size())), retain_graph=True, create_graph=True)
input_grads_np = input_grads.detach().cpu().numpy()
input_seqs.requires_grad = False
else:
(logit_pred_profs, log_pred_counts) = model(input_seqs, cont_profs)
result['true_profs'] = tf_profs.detach().cpu().numpy()
result['true_counts'] = np.sum(result['true_profs'], axis=2)
logit_pred_profs_np = logit_pred_profs.detach().cpu().numpy()
result['log_pred_profs'] = profile_models.profile_logits_to_log_probs(logit_pred_profs_np)
result['log_pred_counts'] = log_pred_counts.detach().cpu().numpy()
if return_losses:
log_pred_profs = profile_models.profile_logits_to_log_probs(logit_pred_profs)
num_samples = log_pred_profs.size(0)
result['prof_losses'] = np.empty(num_samples)
result['count_losses'] = np.empty(num_samples)
result['att_losses'] = np.empty(num_samples)
for i in range(num_samples):
(_, prof_loss, count_loss) = model.correctness_loss(tf_profs[i:(i + 1)], log_pred_profs[i:(i + 1)], log_pred_counts[i:(i + 1)], 1, return_separate_losses=True)
att_loss = model.fourier_att_prior_loss(model_util.place_tensor(torch.ones(1)), input_grads[i:(i + 1)], fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma)
result['prof_losses'][i] = prof_loss
result['count_losses'][i] = count_loss
result['att_losses'][i] = att_loss
if return_gradients:
result['input_seqs'] = input_seqs_np
result['input_grads'] = input_grads_np
return result<|docstring|>Fetches the necessary data from the given coordinates or bin indices and
runs it through a profile or binary model. This will perform computation
in a single batch.
Arguments:
`model`: a trained `ProfilePredictorWithMatchedControls`,
`ProfilePredictorWithSharedControls`, or
`ProfilePredictorWithoutControls`
`coords`: a B x 3 array of coordinates to compute outputs for
`num_tasks`: number of tasks for the model
`input_func`: a function that takes in `coords` and returns the
B x I x 4 array of one-hot sequences and the
B x (T or T + 1 or 2T) x O x S array of profiles (perhaps with
controls)
`controls`: the type of control profiles (if any) used in model; can be
"matched" (each task has a matched control), "shared" (all tasks
share a control), or None (no controls); must match the model class
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
Returns a dictionary of the following structure:
true_profs: true profile raw counts (B x T x O x S)
log_pred_profs: predicted profile log probabilities (B x T x O x S)
true_counts: true total counts (B x T x S)
log_pred_counts: predicted log counts (B x T x S)
prof_losses: profile NLL losses (B-array), if `return_losses` is True
count_losses: counts MSE losses (B-array) if `return_losses` is True
att_losses: prior losses (B-array), if `return_losses` is True
input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`
is true
input_grads: "hypothetical" input gradients (B x I x 4), if
`return_gradients` is true<|endoftext|> |
ac07599baea2caea4432c6b3a9d1700a0f75290908d3edfa23c9fdbbaa31076b | def _get_binary_model_predictions_batch(model, bins, input_func, fourier_att_prior_freq_limit=150, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False):
'\n Arguments:\n `model`: a trained `BinaryPredictor`,\n `bins`: an N-array of bin indices to compute outputs for\n `input_func`: a function that takes in `bins` and returns the B x I x 4\n array of one-hot sequences, the B x T array of output values, and\n B x 3 array of underlying coordinates for the input sequence\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n Returns a dictionary of the following structure:\n true_vals: true binary values (B x T)\n pred_vals: predicted probabilities (B x T)\n coords: coordinates used for prediction (B x 3 object array)\n corr_losses: correctness losses (B-array) if `return_losses` is True\n att_losses: prior losses (B-array), if `return_losses` is True\n input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`\n is True\n input_grads: "hypothetical" input gradients (B x I x 4), if\n `return_gradients` is true\n '
result = {}
(input_seqs, output_vals, coords) = input_func(bins)
output_vals_np = output_vals
if return_gradients:
input_seqs_np = input_seqs
model.zero_grad()
input_seqs = model_util.place_tensor(torch.tensor(input_seqs)).float()
output_vals = model_util.place_tensor(torch.tensor(output_vals)).float()
if (return_losses or return_gradients):
input_seqs.requires_grad = True
logit_pred_vals = model(input_seqs)
(input_grads,) = torch.autograd.grad(logit_pred_vals, input_seqs, grad_outputs=model_util.place_tensor(torch.ones(logit_pred_vals.size())), retain_graph=True, create_graph=True)
input_grads_np = input_grads.detach().cpu().numpy()
input_seqs.requires_grad = False
else:
logit_pred_vals = model(input_seqs)
(status, input_grads) = (None, None)
result['true_vals'] = output_vals_np
logit_pred_vals_np = logit_pred_vals.detach().cpu().numpy()
result['pred_vals'] = binary_models.binary_logits_to_probs(logit_pred_vals_np)
result['coords'] = coords
if return_losses:
num_samples = logit_pred_vals.size(0)
result['corr_losses'] = np.empty(num_samples)
result['att_losses'] = np.empty(num_samples)
for i in range(num_samples):
corr_loss = model.correctness_loss(output_vals[i:(i + 1)], logit_pred_vals[i:(i + 1)], True)
att_loss = model.fourier_att_prior_loss(model_util.place_tensor(torch.ones(1)), input_grads[i:(i + 1)], fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma)
result['corr_losses'][i] = corr_loss
result['att_losses'][i] = att_loss
if return_gradients:
result['input_seqs'] = input_seqs_np
result['input_grads'] = input_grads_np
return result | Arguments:
`model`: a trained `BinaryPredictor`,
`bins`: an N-array of bin indices to compute outputs for
`input_func`: a function that takes in `bins` and returns the B x I x 4
array of one-hot sequences, the B x T array of output values, and
B x 3 array of underlying coordinates for the input sequence
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
Returns a dictionary of the following structure:
true_vals: true binary values (B x T)
pred_vals: predicted probabilities (B x T)
coords: coordinates used for prediction (B x 3 object array)
corr_losses: correctness losses (B-array) if `return_losses` is True
att_losses: prior losses (B-array), if `return_losses` is True
input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`
is True
input_grads: "hypothetical" input gradients (B x I x 4), if
`return_gradients` is true | src/extract/compute_predictions.py | _get_binary_model_predictions_batch | atseng95/fourier_attribution_priors | 8 | python | def _get_binary_model_predictions_batch(model, bins, input_func, fourier_att_prior_freq_limit=150, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False):
'\n Arguments:\n `model`: a trained `BinaryPredictor`,\n `bins`: an N-array of bin indices to compute outputs for\n `input_func`: a function that takes in `bins` and returns the B x I x 4\n array of one-hot sequences, the B x T array of output values, and\n B x 3 array of underlying coordinates for the input sequence\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n Returns a dictionary of the following structure:\n true_vals: true binary values (B x T)\n pred_vals: predicted probabilities (B x T)\n coords: coordinates used for prediction (B x 3 object array)\n corr_losses: correctness losses (B-array) if `return_losses` is True\n att_losses: prior losses (B-array), if `return_losses` is True\n input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`\n is True\n input_grads: "hypothetical" input gradients (B x I x 4), if\n `return_gradients` is true\n '
result = {}
(input_seqs, output_vals, coords) = input_func(bins)
output_vals_np = output_vals
if return_gradients:
input_seqs_np = input_seqs
model.zero_grad()
input_seqs = model_util.place_tensor(torch.tensor(input_seqs)).float()
output_vals = model_util.place_tensor(torch.tensor(output_vals)).float()
if (return_losses or return_gradients):
input_seqs.requires_grad = True
logit_pred_vals = model(input_seqs)
(input_grads,) = torch.autograd.grad(logit_pred_vals, input_seqs, grad_outputs=model_util.place_tensor(torch.ones(logit_pred_vals.size())), retain_graph=True, create_graph=True)
input_grads_np = input_grads.detach().cpu().numpy()
input_seqs.requires_grad = False
else:
logit_pred_vals = model(input_seqs)
(status, input_grads) = (None, None)
result['true_vals'] = output_vals_np
logit_pred_vals_np = logit_pred_vals.detach().cpu().numpy()
result['pred_vals'] = binary_models.binary_logits_to_probs(logit_pred_vals_np)
result['coords'] = coords
if return_losses:
num_samples = logit_pred_vals.size(0)
result['corr_losses'] = np.empty(num_samples)
result['att_losses'] = np.empty(num_samples)
for i in range(num_samples):
corr_loss = model.correctness_loss(output_vals[i:(i + 1)], logit_pred_vals[i:(i + 1)], True)
att_loss = model.fourier_att_prior_loss(model_util.place_tensor(torch.ones(1)), input_grads[i:(i + 1)], fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma)
result['corr_losses'][i] = corr_loss
result['att_losses'][i] = att_loss
if return_gradients:
result['input_seqs'] = input_seqs_np
result['input_grads'] = input_grads_np
return result | def _get_binary_model_predictions_batch(model, bins, input_func, fourier_att_prior_freq_limit=150, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False):
'\n Arguments:\n `model`: a trained `BinaryPredictor`,\n `bins`: an N-array of bin indices to compute outputs for\n `input_func`: a function that takes in `bins` and returns the B x I x 4\n array of one-hot sequences, the B x T array of output values, and\n B x 3 array of underlying coordinates for the input sequence\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n Returns a dictionary of the following structure:\n true_vals: true binary values (B x T)\n pred_vals: predicted probabilities (B x T)\n coords: coordinates used for prediction (B x 3 object array)\n corr_losses: correctness losses (B-array) if `return_losses` is True\n att_losses: prior losses (B-array), if `return_losses` is True\n input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`\n is True\n input_grads: "hypothetical" input gradients (B x I x 4), if\n `return_gradients` is true\n '
result = {}
(input_seqs, output_vals, coords) = input_func(bins)
output_vals_np = output_vals
if return_gradients:
input_seqs_np = input_seqs
model.zero_grad()
input_seqs = model_util.place_tensor(torch.tensor(input_seqs)).float()
output_vals = model_util.place_tensor(torch.tensor(output_vals)).float()
if (return_losses or return_gradients):
input_seqs.requires_grad = True
logit_pred_vals = model(input_seqs)
(input_grads,) = torch.autograd.grad(logit_pred_vals, input_seqs, grad_outputs=model_util.place_tensor(torch.ones(logit_pred_vals.size())), retain_graph=True, create_graph=True)
input_grads_np = input_grads.detach().cpu().numpy()
input_seqs.requires_grad = False
else:
logit_pred_vals = model(input_seqs)
(status, input_grads) = (None, None)
result['true_vals'] = output_vals_np
logit_pred_vals_np = logit_pred_vals.detach().cpu().numpy()
result['pred_vals'] = binary_models.binary_logits_to_probs(logit_pred_vals_np)
result['coords'] = coords
if return_losses:
num_samples = logit_pred_vals.size(0)
result['corr_losses'] = np.empty(num_samples)
result['att_losses'] = np.empty(num_samples)
for i in range(num_samples):
corr_loss = model.correctness_loss(output_vals[i:(i + 1)], logit_pred_vals[i:(i + 1)], True)
att_loss = model.fourier_att_prior_loss(model_util.place_tensor(torch.ones(1)), input_grads[i:(i + 1)], fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma)
result['corr_losses'][i] = corr_loss
result['att_losses'][i] = att_loss
if return_gradients:
result['input_seqs'] = input_seqs_np
result['input_grads'] = input_grads_np
return result<|docstring|>Arguments:
`model`: a trained `BinaryPredictor`,
`bins`: an N-array of bin indices to compute outputs for
`input_func`: a function that takes in `bins` and returns the B x I x 4
array of one-hot sequences, the B x T array of output values, and
B x 3 array of underlying coordinates for the input sequence
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
Returns a dictionary of the following structure:
true_vals: true binary values (B x T)
pred_vals: predicted probabilities (B x T)
coords: coordinates used for prediction (B x 3 object array)
corr_losses: correctness losses (B-array) if `return_losses` is True
att_losses: prior losses (B-array), if `return_losses` is True
input_seqs: one-hot input sequences (B x I x 4), if `return_gradients`
is True
input_grads: "hypothetical" input gradients (B x I x 4), if
`return_gradients` is true<|endoftext|> |
d7b46723f4f59a15956959293a258ae520bbc499a2914c7a25e49748885aa26b | def get_profile_model_predictions(model, coords, num_tasks, input_func, controls=None, fourier_att_prior_freq_limit=200, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False, batch_size=128, show_progress=False):
'\n Fetches the necessary data from the given coordinates and runs it through a\n profile model.\n Arguments:\n `model`: a trained `ProfilePredictorWithMatchedControls`,\n `ProfilePredictorWithSharedControls`, or\n `ProfilePredictorWithoutControls`\n `coords`: a N x 3 array of coordinates to compute outputs for\n `num_tasks`: number of tasks for the model\n `input_func`: a function that takes in `coords` and returns the\n N x I x 4 array of one-hot sequences and the\n N x (T or T + 1 or 2T) x O x S array of profiles (perhaps with\n controls)\n `controls`: the type of control profiles (if any) used in model; can be\n "matched" (each task has a matched control), "shared" (all tasks\n share a control), or None (no controls); must match the model class\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n `batch_size`: batch size to use for prediction\n `show_progress`: whether or not to show progress bar over batches\n Returns a dictionary of the following structure:\n true_profs: true profile raw counts (N x T x O x S)\n log_pred_profs: predicted profile log probabilities (N x T x O x S)\n true_counts: true total counts (N x T x S)\n log_pred_counts: predicted log counts (N x T x S)\n prof_losses: profile NLL losses (N-array), if `return_losses` is True\n count_losses: counts MSE losses (N-array) if `return_losses` is True\n att_loss: prior losses (N-array), if `return_losses` is True\n input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (N x I x 4), if\n `return_gradients` is true\n '
result = {}
num_examples = len(coords)
num_batches = int(np.ceil((num_examples / batch_size)))
t_iter = (tqdm.trange(num_batches) if show_progress else range(num_batches))
first_batch = True
for i in t_iter:
batch_slice = slice((i * batch_size), ((i + 1) * batch_size))
coords_batch = coords[batch_slice]
batch_result = _get_profile_model_predictions_batch(model, coords_batch, num_tasks, input_func, controls=controls, fourier_att_prior_freq_limit=fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness=fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma=att_prior_grad_smooth_sigma, return_losses=return_losses, return_gradients=return_gradients)
if first_batch:
result['true_profs'] = np.empty(((num_examples,) + batch_result['true_profs'].shape[1:]))
result['log_pred_profs'] = np.empty(((num_examples,) + batch_result['log_pred_profs'].shape[1:]))
result['true_counts'] = np.empty(((num_examples,) + batch_result['true_counts'].shape[1:]))
result['log_pred_counts'] = np.empty(((num_examples,) + batch_result['log_pred_counts'].shape[1:]))
if return_losses:
result['prof_losses'] = np.empty(num_examples)
result['count_losses'] = np.empty(num_examples)
result['att_losses'] = np.empty(num_examples)
if return_gradients:
result['input_seqs'] = np.empty(((num_examples,) + batch_result['input_seqs'].shape[1:]))
result['input_grads'] = np.empty(((num_examples,) + batch_result['input_grads'].shape[1:]))
first_batch = False
result['true_profs'][batch_slice] = batch_result['true_profs']
result['log_pred_profs'][batch_slice] = batch_result['log_pred_profs']
result['true_counts'][batch_slice] = batch_result['true_counts']
result['log_pred_counts'][batch_slice] = batch_result['log_pred_counts']
if return_losses:
result['prof_losses'][batch_slice] = batch_result['prof_losses']
result['count_losses'][batch_slice] = batch_result['count_losses']
result['att_losses'][batch_slice] = batch_result['att_losses']
if return_gradients:
result['input_seqs'][batch_slice] = batch_result['input_seqs']
result['input_grads'][batch_slice] = batch_result['input_grads']
return result | Fetches the necessary data from the given coordinates and runs it through a
profile model.
Arguments:
`model`: a trained `ProfilePredictorWithMatchedControls`,
`ProfilePredictorWithSharedControls`, or
`ProfilePredictorWithoutControls`
`coords`: a N x 3 array of coordinates to compute outputs for
`num_tasks`: number of tasks for the model
`input_func`: a function that takes in `coords` and returns the
N x I x 4 array of one-hot sequences and the
N x (T or T + 1 or 2T) x O x S array of profiles (perhaps with
controls)
`controls`: the type of control profiles (if any) used in model; can be
"matched" (each task has a matched control), "shared" (all tasks
share a control), or None (no controls); must match the model class
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
`batch_size`: batch size to use for prediction
`show_progress`: whether or not to show progress bar over batches
Returns a dictionary of the following structure:
true_profs: true profile raw counts (N x T x O x S)
log_pred_profs: predicted profile log probabilities (N x T x O x S)
true_counts: true total counts (N x T x S)
log_pred_counts: predicted log counts (N x T x S)
prof_losses: profile NLL losses (N-array), if `return_losses` is True
count_losses: counts MSE losses (N-array) if `return_losses` is True
att_loss: prior losses (N-array), if `return_losses` is True
input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`
is true
input_grads: "hypothetical" input gradients (N x I x 4), if
`return_gradients` is true | src/extract/compute_predictions.py | get_profile_model_predictions | atseng95/fourier_attribution_priors | 8 | python | def get_profile_model_predictions(model, coords, num_tasks, input_func, controls=None, fourier_att_prior_freq_limit=200, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False, batch_size=128, show_progress=False):
'\n Fetches the necessary data from the given coordinates and runs it through a\n profile model.\n Arguments:\n `model`: a trained `ProfilePredictorWithMatchedControls`,\n `ProfilePredictorWithSharedControls`, or\n `ProfilePredictorWithoutControls`\n `coords`: a N x 3 array of coordinates to compute outputs for\n `num_tasks`: number of tasks for the model\n `input_func`: a function that takes in `coords` and returns the\n N x I x 4 array of one-hot sequences and the\n N x (T or T + 1 or 2T) x O x S array of profiles (perhaps with\n controls)\n `controls`: the type of control profiles (if any) used in model; can be\n "matched" (each task has a matched control), "shared" (all tasks\n share a control), or None (no controls); must match the model class\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n `batch_size`: batch size to use for prediction\n `show_progress`: whether or not to show progress bar over batches\n Returns a dictionary of the following structure:\n true_profs: true profile raw counts (N x T x O x S)\n log_pred_profs: predicted profile log probabilities (N x T x O x S)\n true_counts: true total counts (N x T x S)\n log_pred_counts: predicted log counts (N x T x S)\n prof_losses: profile NLL losses (N-array), if `return_losses` is True\n count_losses: counts MSE losses (N-array) if `return_losses` is True\n att_loss: prior losses (N-array), if `return_losses` is True\n input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (N x I x 4), if\n `return_gradients` is true\n '
result = {}
num_examples = len(coords)
num_batches = int(np.ceil((num_examples / batch_size)))
t_iter = (tqdm.trange(num_batches) if show_progress else range(num_batches))
first_batch = True
for i in t_iter:
batch_slice = slice((i * batch_size), ((i + 1) * batch_size))
coords_batch = coords[batch_slice]
batch_result = _get_profile_model_predictions_batch(model, coords_batch, num_tasks, input_func, controls=controls, fourier_att_prior_freq_limit=fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness=fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma=att_prior_grad_smooth_sigma, return_losses=return_losses, return_gradients=return_gradients)
if first_batch:
result['true_profs'] = np.empty(((num_examples,) + batch_result['true_profs'].shape[1:]))
result['log_pred_profs'] = np.empty(((num_examples,) + batch_result['log_pred_profs'].shape[1:]))
result['true_counts'] = np.empty(((num_examples,) + batch_result['true_counts'].shape[1:]))
result['log_pred_counts'] = np.empty(((num_examples,) + batch_result['log_pred_counts'].shape[1:]))
if return_losses:
result['prof_losses'] = np.empty(num_examples)
result['count_losses'] = np.empty(num_examples)
result['att_losses'] = np.empty(num_examples)
if return_gradients:
result['input_seqs'] = np.empty(((num_examples,) + batch_result['input_seqs'].shape[1:]))
result['input_grads'] = np.empty(((num_examples,) + batch_result['input_grads'].shape[1:]))
first_batch = False
result['true_profs'][batch_slice] = batch_result['true_profs']
result['log_pred_profs'][batch_slice] = batch_result['log_pred_profs']
result['true_counts'][batch_slice] = batch_result['true_counts']
result['log_pred_counts'][batch_slice] = batch_result['log_pred_counts']
if return_losses:
result['prof_losses'][batch_slice] = batch_result['prof_losses']
result['count_losses'][batch_slice] = batch_result['count_losses']
result['att_losses'][batch_slice] = batch_result['att_losses']
if return_gradients:
result['input_seqs'][batch_slice] = batch_result['input_seqs']
result['input_grads'][batch_slice] = batch_result['input_grads']
return result | def get_profile_model_predictions(model, coords, num_tasks, input_func, controls=None, fourier_att_prior_freq_limit=200, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False, batch_size=128, show_progress=False):
'\n Fetches the necessary data from the given coordinates and runs it through a\n profile model.\n Arguments:\n `model`: a trained `ProfilePredictorWithMatchedControls`,\n `ProfilePredictorWithSharedControls`, or\n `ProfilePredictorWithoutControls`\n `coords`: a N x 3 array of coordinates to compute outputs for\n `num_tasks`: number of tasks for the model\n `input_func`: a function that takes in `coords` and returns the\n N x I x 4 array of one-hot sequences and the\n N x (T or T + 1 or 2T) x O x S array of profiles (perhaps with\n controls)\n `controls`: the type of control profiles (if any) used in model; can be\n "matched" (each task has a matched control), "shared" (all tasks\n share a control), or None (no controls); must match the model class\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n `batch_size`: batch size to use for prediction\n `show_progress`: whether or not to show progress bar over batches\n Returns a dictionary of the following structure:\n true_profs: true profile raw counts (N x T x O x S)\n log_pred_profs: predicted profile log probabilities (N x T x O x S)\n true_counts: true total counts (N x T x S)\n log_pred_counts: predicted log counts (N x T x S)\n prof_losses: profile NLL losses (N-array), if `return_losses` is True\n count_losses: counts MSE losses (N-array) if `return_losses` is True\n att_loss: prior losses (N-array), if `return_losses` is True\n input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (N x I x 4), if\n `return_gradients` is true\n '
result = {}
num_examples = len(coords)
num_batches = int(np.ceil((num_examples / batch_size)))
t_iter = (tqdm.trange(num_batches) if show_progress else range(num_batches))
first_batch = True
for i in t_iter:
batch_slice = slice((i * batch_size), ((i + 1) * batch_size))
coords_batch = coords[batch_slice]
batch_result = _get_profile_model_predictions_batch(model, coords_batch, num_tasks, input_func, controls=controls, fourier_att_prior_freq_limit=fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness=fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma=att_prior_grad_smooth_sigma, return_losses=return_losses, return_gradients=return_gradients)
if first_batch:
result['true_profs'] = np.empty(((num_examples,) + batch_result['true_profs'].shape[1:]))
result['log_pred_profs'] = np.empty(((num_examples,) + batch_result['log_pred_profs'].shape[1:]))
result['true_counts'] = np.empty(((num_examples,) + batch_result['true_counts'].shape[1:]))
result['log_pred_counts'] = np.empty(((num_examples,) + batch_result['log_pred_counts'].shape[1:]))
if return_losses:
result['prof_losses'] = np.empty(num_examples)
result['count_losses'] = np.empty(num_examples)
result['att_losses'] = np.empty(num_examples)
if return_gradients:
result['input_seqs'] = np.empty(((num_examples,) + batch_result['input_seqs'].shape[1:]))
result['input_grads'] = np.empty(((num_examples,) + batch_result['input_grads'].shape[1:]))
first_batch = False
result['true_profs'][batch_slice] = batch_result['true_profs']
result['log_pred_profs'][batch_slice] = batch_result['log_pred_profs']
result['true_counts'][batch_slice] = batch_result['true_counts']
result['log_pred_counts'][batch_slice] = batch_result['log_pred_counts']
if return_losses:
result['prof_losses'][batch_slice] = batch_result['prof_losses']
result['count_losses'][batch_slice] = batch_result['count_losses']
result['att_losses'][batch_slice] = batch_result['att_losses']
if return_gradients:
result['input_seqs'][batch_slice] = batch_result['input_seqs']
result['input_grads'][batch_slice] = batch_result['input_grads']
return result<|docstring|>Fetches the necessary data from the given coordinates and runs it through a
profile model.
Arguments:
`model`: a trained `ProfilePredictorWithMatchedControls`,
`ProfilePredictorWithSharedControls`, or
`ProfilePredictorWithoutControls`
`coords`: a N x 3 array of coordinates to compute outputs for
`num_tasks`: number of tasks for the model
`input_func`: a function that takes in `coords` and returns the
N x I x 4 array of one-hot sequences and the
N x (T or T + 1 or 2T) x O x S array of profiles (perhaps with
controls)
`controls`: the type of control profiles (if any) used in model; can be
"matched" (each task has a matched control), "shared" (all tasks
share a control), or None (no controls); must match the model class
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
`batch_size`: batch size to use for prediction
`show_progress`: whether or not to show progress bar over batches
Returns a dictionary of the following structure:
true_profs: true profile raw counts (N x T x O x S)
log_pred_profs: predicted profile log probabilities (N x T x O x S)
true_counts: true total counts (N x T x S)
log_pred_counts: predicted log counts (N x T x S)
prof_losses: profile NLL losses (N-array), if `return_losses` is True
count_losses: counts MSE losses (N-array) if `return_losses` is True
att_loss: prior losses (N-array), if `return_losses` is True
input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`
is true
input_grads: "hypothetical" input gradients (N x I x 4), if
`return_gradients` is true<|endoftext|> |
2fd92e4d9aa93a9d9588503d5aba259e09d52a2c5c027d0edecc14b044ce303c | def get_binary_model_predictions(model, bins, input_func, fourier_att_prior_freq_limit=150, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False, batch_size=128, show_progress=False):
'\n Fetches the necessary data from the given bin indices and runs it through a\n binary model.\n Arguments:\n `model`: a trained `BinaryPredictor`,\n `bins`: an N-array of bin indices to compute outputs for\n `input_func`: a function that takes in `bins` and returns the B x I x 4\n array of one-hot sequences, the B x T array of output values, and\n B x 3 array of underlying coordinates for the input sequence\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n `batch_size`: batch size to use for prediction\n `show_progress`: whether or not to show progress bar over batches\n Returns a dictionary of the following structure:\n true_vals: true binary values (N x T)\n pred_vals: predicted probabilities (N x T)\n coords: coordinates used for prediction (N x 3 object array)\n corr_losses: correctness losses (N-array) if `return_losses` is True\n att_losses: prior losses (N-array), if `return_losses` is True\n input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (N x I x 4), if\n `return_gradients` is true\n '
result = {}
num_examples = len(bins)
num_batches = int(np.ceil((num_examples / batch_size)))
t_iter = (tqdm.trange(num_batches) if show_progress else range(num_batches))
first_batch = True
for i in t_iter:
batch_slice = slice((i * batch_size), ((i + 1) * batch_size))
bins_batch = bins[batch_slice]
batch_result = _get_binary_model_predictions_batch(model, bins_batch, input_func, fourier_att_prior_freq_limit=fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness=fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma=att_prior_grad_smooth_sigma, return_losses=return_losses, return_gradients=return_gradients)
if first_batch:
result['true_vals'] = np.empty(((num_examples,) + batch_result['true_vals'].shape[1:]))
result['pred_vals'] = np.empty(((num_examples,) + batch_result['pred_vals'].shape[1:]))
result['coords'] = np.empty((num_examples, 3), dtype=object)
if return_losses:
result['corr_losses'] = np.empty(num_examples)
result['att_losses'] = np.empty(num_examples)
if return_gradients:
result['input_seqs'] = np.empty(((num_examples,) + batch_result['input_seqs'].shape[1:]))
result['input_grads'] = np.empty(((num_examples,) + batch_result['input_grads'].shape[1:]))
first_batch = False
result['true_vals'][batch_slice] = batch_result['true_vals']
result['pred_vals'][batch_slice] = batch_result['pred_vals']
result['coords'][batch_slice] = batch_result['coords']
if return_losses:
result['corr_losses'][batch_slice] = batch_result['corr_losses']
result['att_losses'][batch_slice] = batch_result['att_losses']
if return_gradients:
result['input_seqs'][batch_slice] = batch_result['input_seqs']
result['input_grads'][batch_slice] = batch_result['input_grads']
return result | Fetches the necessary data from the given bin indices and runs it through a
binary model.
Arguments:
`model`: a trained `BinaryPredictor`,
`bins`: an N-array of bin indices to compute outputs for
`input_func`: a function that takes in `bins` and returns the B x I x 4
array of one-hot sequences, the B x T array of output values, and
B x 3 array of underlying coordinates for the input sequence
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
`batch_size`: batch size to use for prediction
`show_progress`: whether or not to show progress bar over batches
Returns a dictionary of the following structure:
true_vals: true binary values (N x T)
pred_vals: predicted probabilities (N x T)
coords: coordinates used for prediction (N x 3 object array)
corr_losses: correctness losses (N-array) if `return_losses` is True
att_losses: prior losses (N-array), if `return_losses` is True
input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`
is true
input_grads: "hypothetical" input gradients (N x I x 4), if
`return_gradients` is true | src/extract/compute_predictions.py | get_binary_model_predictions | atseng95/fourier_attribution_priors | 8 | python | def get_binary_model_predictions(model, bins, input_func, fourier_att_prior_freq_limit=150, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False, batch_size=128, show_progress=False):
'\n Fetches the necessary data from the given bin indices and runs it through a\n binary model.\n Arguments:\n `model`: a trained `BinaryPredictor`,\n `bins`: an N-array of bin indices to compute outputs for\n `input_func`: a function that takes in `bins` and returns the B x I x 4\n array of one-hot sequences, the B x T array of output values, and\n B x 3 array of underlying coordinates for the input sequence\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n `batch_size`: batch size to use for prediction\n `show_progress`: whether or not to show progress bar over batches\n Returns a dictionary of the following structure:\n true_vals: true binary values (N x T)\n pred_vals: predicted probabilities (N x T)\n coords: coordinates used for prediction (N x 3 object array)\n corr_losses: correctness losses (N-array) if `return_losses` is True\n att_losses: prior losses (N-array), if `return_losses` is True\n input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (N x I x 4), if\n `return_gradients` is true\n '
result = {}
num_examples = len(bins)
num_batches = int(np.ceil((num_examples / batch_size)))
t_iter = (tqdm.trange(num_batches) if show_progress else range(num_batches))
first_batch = True
for i in t_iter:
batch_slice = slice((i * batch_size), ((i + 1) * batch_size))
bins_batch = bins[batch_slice]
batch_result = _get_binary_model_predictions_batch(model, bins_batch, input_func, fourier_att_prior_freq_limit=fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness=fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma=att_prior_grad_smooth_sigma, return_losses=return_losses, return_gradients=return_gradients)
if first_batch:
result['true_vals'] = np.empty(((num_examples,) + batch_result['true_vals'].shape[1:]))
result['pred_vals'] = np.empty(((num_examples,) + batch_result['pred_vals'].shape[1:]))
result['coords'] = np.empty((num_examples, 3), dtype=object)
if return_losses:
result['corr_losses'] = np.empty(num_examples)
result['att_losses'] = np.empty(num_examples)
if return_gradients:
result['input_seqs'] = np.empty(((num_examples,) + batch_result['input_seqs'].shape[1:]))
result['input_grads'] = np.empty(((num_examples,) + batch_result['input_grads'].shape[1:]))
first_batch = False
result['true_vals'][batch_slice] = batch_result['true_vals']
result['pred_vals'][batch_slice] = batch_result['pred_vals']
result['coords'][batch_slice] = batch_result['coords']
if return_losses:
result['corr_losses'][batch_slice] = batch_result['corr_losses']
result['att_losses'][batch_slice] = batch_result['att_losses']
if return_gradients:
result['input_seqs'][batch_slice] = batch_result['input_seqs']
result['input_grads'][batch_slice] = batch_result['input_grads']
return result | def get_binary_model_predictions(model, bins, input_func, fourier_att_prior_freq_limit=150, fourier_att_prior_freq_limit_softness=0.2, att_prior_grad_smooth_sigma=3, return_losses=False, return_gradients=False, batch_size=128, show_progress=False):
'\n Fetches the necessary data from the given bin indices and runs it through a\n binary model.\n Arguments:\n `model`: a trained `BinaryPredictor`,\n `bins`: an N-array of bin indices to compute outputs for\n `input_func`: a function that takes in `bins` and returns the B x I x 4\n array of one-hot sequences, the B x T array of output values, and\n B x 3 array of underlying coordinates for the input sequence\n `fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior\n loss\n `fourier_att_prior_freq_limit_softness`: degree of softness for limit\n `att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients\n `return_losses`: if True, compute/return the loss values\n `return_gradients`: if True, compute/return the input gradients and\n sequences\n `batch_size`: batch size to use for prediction\n `show_progress`: whether or not to show progress bar over batches\n Returns a dictionary of the following structure:\n true_vals: true binary values (N x T)\n pred_vals: predicted probabilities (N x T)\n coords: coordinates used for prediction (N x 3 object array)\n corr_losses: correctness losses (N-array) if `return_losses` is True\n att_losses: prior losses (N-array), if `return_losses` is True\n input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`\n is true\n input_grads: "hypothetical" input gradients (N x I x 4), if\n `return_gradients` is true\n '
result = {}
num_examples = len(bins)
num_batches = int(np.ceil((num_examples / batch_size)))
t_iter = (tqdm.trange(num_batches) if show_progress else range(num_batches))
first_batch = True
for i in t_iter:
batch_slice = slice((i * batch_size), ((i + 1) * batch_size))
bins_batch = bins[batch_slice]
batch_result = _get_binary_model_predictions_batch(model, bins_batch, input_func, fourier_att_prior_freq_limit=fourier_att_prior_freq_limit, fourier_att_prior_freq_limit_softness=fourier_att_prior_freq_limit_softness, att_prior_grad_smooth_sigma=att_prior_grad_smooth_sigma, return_losses=return_losses, return_gradients=return_gradients)
if first_batch:
result['true_vals'] = np.empty(((num_examples,) + batch_result['true_vals'].shape[1:]))
result['pred_vals'] = np.empty(((num_examples,) + batch_result['pred_vals'].shape[1:]))
result['coords'] = np.empty((num_examples, 3), dtype=object)
if return_losses:
result['corr_losses'] = np.empty(num_examples)
result['att_losses'] = np.empty(num_examples)
if return_gradients:
result['input_seqs'] = np.empty(((num_examples,) + batch_result['input_seqs'].shape[1:]))
result['input_grads'] = np.empty(((num_examples,) + batch_result['input_grads'].shape[1:]))
first_batch = False
result['true_vals'][batch_slice] = batch_result['true_vals']
result['pred_vals'][batch_slice] = batch_result['pred_vals']
result['coords'][batch_slice] = batch_result['coords']
if return_losses:
result['corr_losses'][batch_slice] = batch_result['corr_losses']
result['att_losses'][batch_slice] = batch_result['att_losses']
if return_gradients:
result['input_seqs'][batch_slice] = batch_result['input_seqs']
result['input_grads'][batch_slice] = batch_result['input_grads']
return result<|docstring|>Fetches the necessary data from the given bin indices and runs it through a
binary model.
Arguments:
`model`: a trained `BinaryPredictor`,
`bins`: an N-array of bin indices to compute outputs for
`input_func`: a function that takes in `bins` and returns the B x I x 4
array of one-hot sequences, the B x T array of output values, and
B x 3 array of underlying coordinates for the input sequence
`fourier_att_prior_freq_limit`: limit for frequencies in Fourier prior
loss
`fourier_att_prior_freq_limit_softness`: degree of softness for limit
`att_prior_grad_smooth_sigma`: width of smoothing kernel for gradients
`return_losses`: if True, compute/return the loss values
`return_gradients`: if True, compute/return the input gradients and
sequences
`batch_size`: batch size to use for prediction
`show_progress`: whether or not to show progress bar over batches
Returns a dictionary of the following structure:
true_vals: true binary values (N x T)
pred_vals: predicted probabilities (N x T)
coords: coordinates used for prediction (N x 3 object array)
corr_losses: correctness losses (N-array) if `return_losses` is True
att_losses: prior losses (N-array), if `return_losses` is True
input_seqs: one-hot input sequences (N x I x 4), if `return_gradients`
is true
input_grads: "hypothetical" input gradients (N x I x 4), if
`return_gradients` is true<|endoftext|> |
8b473f8477487a95c98f15133b2fd26d5e8c3fdab6b6c5da30476c290cc3608a | def input_files_from_files_list(files_file: str) -> List[str]:
'\n Takes as input a path to a file containing a list of filepaths (one per\n line, empty lines are ignored). Returns the list of filepaths, all\n prefixed by the path to that listing file (so that the paths in that list file\n may be relative to the list file location).\n '
root = os.path.dirname(files_file)
with open(files_file) as handle:
lines = [f'{root}/{file}' for file in list(filter(None, handle.read().splitlines()))]
return lines | Takes as input a path to a file containing a list of filepaths (one per
line, empty lines are ignored). Returns the list of filepaths, all
prefixed by the path to that listing file (so that the paths in that list file
may be relative to the list file location). | ada/run_analysis.py | input_files_from_files_list | tuxji/RACK | 4 | python | def input_files_from_files_list(files_file: str) -> List[str]:
'\n Takes as input a path to a file containing a list of filepaths (one per\n line, empty lines are ignored). Returns the list of filepaths, all\n prefixed by the path to that listing file (so that the paths in that list file\n may be relative to the list file location).\n '
root = os.path.dirname(files_file)
with open(files_file) as handle:
lines = [f'{root}/{file}' for file in list(filter(None, handle.read().splitlines()))]
return lines | def input_files_from_files_list(files_file: str) -> List[str]:
'\n Takes as input a path to a file containing a list of filepaths (one per\n line, empty lines are ignored). Returns the list of filepaths, all\n prefixed by the path to that listing file (so that the paths in that list file\n may be relative to the list file location).\n '
root = os.path.dirname(files_file)
with open(files_file) as handle:
lines = [f'{root}/{file}' for file in list(filter(None, handle.read().splitlines()))]
return lines<|docstring|>Takes as input a path to a file containing a list of filepaths (one per
line, empty lines are ignored). Returns the list of filepaths, all
prefixed by the path to that listing file (so that the paths in that list file
may be relative to the list file location).<|endoftext|> |
611d0a957419d9d71d98e45d0042380e74d106d9983465b9cea3f89b2ce88ec3 | def register_component(analysis_output: AnalysisOutput, component: GraphNode, component_type: ontology.ComponentTypeIdentifier) -> ontology.SoftwareComponent:
'\n Makes sure that the component is already present in the components\n dictionary. Adds it if necessary.\n '
components = analysis_output['components']
component_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(component))
if (component_identifier not in components):
components[component_identifier] = ontology.SoftwareComponent(identifier=component_identifier, title=component.doc_name, component_type=ontology.SOURCE_FUNCTION, uri=SOFTWARE_COMPONENT_NS[component_identifier])
return components[component_identifier] | Makes sure that the component is already present in the components
dictionary. Adds it if necessary. | ada/run_analysis.py | register_component | tuxji/RACK | 4 | python | def register_component(analysis_output: AnalysisOutput, component: GraphNode, component_type: ontology.ComponentTypeIdentifier) -> ontology.SoftwareComponent:
'\n Makes sure that the component is already present in the components\n dictionary. Adds it if necessary.\n '
components = analysis_output['components']
component_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(component))
if (component_identifier not in components):
components[component_identifier] = ontology.SoftwareComponent(identifier=component_identifier, title=component.doc_name, component_type=ontology.SOURCE_FUNCTION, uri=SOFTWARE_COMPONENT_NS[component_identifier])
return components[component_identifier] | def register_component(analysis_output: AnalysisOutput, component: GraphNode, component_type: ontology.ComponentTypeIdentifier) -> ontology.SoftwareComponent:
'\n Makes sure that the component is already present in the components\n dictionary. Adds it if necessary.\n '
components = analysis_output['components']
component_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(component))
if (component_identifier not in components):
components[component_identifier] = ontology.SoftwareComponent(identifier=component_identifier, title=component.doc_name, component_type=ontology.SOURCE_FUNCTION, uri=SOFTWARE_COMPONENT_NS[component_identifier])
return components[component_identifier]<|docstring|>Makes sure that the component is already present in the components
dictionary. Adds it if necessary.<|endoftext|> |
ff061e32bdbe567ff0d6ee466cff50188737e36ff52f3269be097c60958926d6 | def register_ada_file(analysis_output: AnalysisOutput, file_identifier: Optional[ontology.FileIdentifier]) -> Optional[ontology.File]:
'\n Creates an entry corresponding to \'file_key\', unless it already exists,\n and stores it in "files".\n '
if (file_identifier is None):
return None
files = analysis_output['files']
if (file_identifier not in files):
files[file_identifier] = ontology.File(format_=ADA_FORMAT, identifier=file_identifier, name=file_identifier, uri=FILE_NS[file_identifier])
return files[file_identifier] | Creates an entry corresponding to 'file_key', unless it already exists,
and stores it in "files". | ada/run_analysis.py | register_ada_file | tuxji/RACK | 4 | python | def register_ada_file(analysis_output: AnalysisOutput, file_identifier: Optional[ontology.FileIdentifier]) -> Optional[ontology.File]:
'\n Creates an entry corresponding to \'file_key\', unless it already exists,\n and stores it in "files".\n '
if (file_identifier is None):
return None
files = analysis_output['files']
if (file_identifier not in files):
files[file_identifier] = ontology.File(format_=ADA_FORMAT, identifier=file_identifier, name=file_identifier, uri=FILE_NS[file_identifier])
return files[file_identifier] | def register_ada_file(analysis_output: AnalysisOutput, file_identifier: Optional[ontology.FileIdentifier]) -> Optional[ontology.File]:
'\n Creates an entry corresponding to \'file_key\', unless it already exists,\n and stores it in "files".\n '
if (file_identifier is None):
return None
files = analysis_output['files']
if (file_identifier not in files):
files[file_identifier] = ontology.File(format_=ADA_FORMAT, identifier=file_identifier, name=file_identifier, uri=FILE_NS[file_identifier])
return files[file_identifier]<|docstring|>Creates an entry corresponding to 'file_key', unless it already exists,
and stores it in "files".<|endoftext|> |
5344954d9675f5cac019df7f2011f2b75b5541263b7003f68d69c45a7f10de01 | def as_optional_file_identifier(filename: Optional[str]) -> Optional[ontology.FileIdentifier]:
'Applies the FileIdentifier newtype within an Optional.'
if (filename is None):
return None
return ontology.FileIdentifier(filename) | Applies the FileIdentifier newtype within an Optional. | ada/run_analysis.py | as_optional_file_identifier | tuxji/RACK | 4 | python | def as_optional_file_identifier(filename: Optional[str]) -> Optional[ontology.FileIdentifier]:
if (filename is None):
return None
return ontology.FileIdentifier(filename) | def as_optional_file_identifier(filename: Optional[str]) -> Optional[ontology.FileIdentifier]:
if (filename is None):
return None
return ontology.FileIdentifier(filename)<|docstring|>Applies the FileIdentifier newtype within an Optional.<|endoftext|> |
c9b2be13157fa072922f12a53f6dfa1754d9535f03cb0770d39249bd474ce996 | def make_empty_analysis_output() -> AnalysisOutput:
'Creates an empty output, to be populated by mutation.'
return AnalysisOutput({'component_types': set(), 'components': dict(), 'files': dict(), 'formats': set()}) | Creates an empty output, to be populated by mutation. | ada/run_analysis.py | make_empty_analysis_output | tuxji/RACK | 4 | python | def make_empty_analysis_output() -> AnalysisOutput:
return AnalysisOutput({'component_types': set(), 'components': dict(), 'files': dict(), 'formats': set()}) | def make_empty_analysis_output() -> AnalysisOutput:
return AnalysisOutput({'component_types': set(), 'components': dict(), 'files': dict(), 'formats': set()})<|docstring|>Creates an empty output, to be populated by mutation.<|endoftext|> |
58e17a7ccdd712f9fc93f4e2acea8f00cfe7926b0dffc4bf1abaa40890504330 | def output_as_turtle(analysis_output: AnalysisOutput) -> None:
'\n Outputs the analysis results as Turtle, currently to stdout but could be\n made to output in a file.\n '
graph = Graph()
graph.bind('data:file', FILE_NS)
graph.bind('data:format', FORMAT_NS)
graph.bind('data:software-component', SOFTWARE_COMPONENT_NS)
for format_ in analysis_output['formats']:
format_.add_to_graph(graph)
for file_key in analysis_output['files']:
file = analysis_output['files'][file_key]
file.add_to_graph(graph)
for component_key in analysis_output['components']:
component = analysis_output['components'][component_key]
component.add_to_graph(graph)
sys.stdout.buffer.write(graph.serialize(format='turtle')) | Outputs the analysis results as Turtle, currently to stdout but could be
made to output in a file. | ada/run_analysis.py | output_as_turtle | tuxji/RACK | 4 | python | def output_as_turtle(analysis_output: AnalysisOutput) -> None:
'\n Outputs the analysis results as Turtle, currently to stdout but could be\n made to output in a file.\n '
graph = Graph()
graph.bind('data:file', FILE_NS)
graph.bind('data:format', FORMAT_NS)
graph.bind('data:software-component', SOFTWARE_COMPONENT_NS)
for format_ in analysis_output['formats']:
format_.add_to_graph(graph)
for file_key in analysis_output['files']:
file = analysis_output['files'][file_key]
file.add_to_graph(graph)
for component_key in analysis_output['components']:
component = analysis_output['components'][component_key]
component.add_to_graph(graph)
sys.stdout.buffer.write(graph.serialize(format='turtle')) | def output_as_turtle(analysis_output: AnalysisOutput) -> None:
'\n Outputs the analysis results as Turtle, currently to stdout but could be\n made to output in a file.\n '
graph = Graph()
graph.bind('data:file', FILE_NS)
graph.bind('data:format', FORMAT_NS)
graph.bind('data:software-component', SOFTWARE_COMPONENT_NS)
for format_ in analysis_output['formats']:
format_.add_to_graph(graph)
for file_key in analysis_output['files']:
file = analysis_output['files'][file_key]
file.add_to_graph(graph)
for component_key in analysis_output['components']:
component = analysis_output['components'][component_key]
component.add_to_graph(graph)
sys.stdout.buffer.write(graph.serialize(format='turtle'))<|docstring|>Outputs the analysis results as Turtle, currently to stdout but could be
made to output in a file.<|endoftext|> |
f1259466b60262f39133f5f0aa8665d9d7919791db4ff91b98a73a3b35ad551d | def output_using_scrapingtoolkit(analysis_output: AnalysisOutput) -> None:
'Outputs the analysis output using ScrapingToolKit.'
for component_type in analysis_output['component_types']:
Evidence.Add.COMPONENT_TYPE(identifier=component_type)
for format_ in analysis_output['formats']:
Evidence.Add.FORMAT(identifier=format_.identifier)
files = analysis_output['files']
for file_identifier in files:
file: ontology.File = files[file_identifier]
Evidence.Add.FILE(fileFormat_identifier=format_.identifier, filename=file.name, identifier=file_identifier)
components = analysis_output['components']
for component_identifier in components:
component: ontology.SoftwareComponent = components[component_identifier]
Evidence.Add.SWCOMPONENT(identifier=component_identifier, componentType_identifier=component.component_type, title=escape(component.title), definedIn_identifier=(component.defined_in.identifier if component.defined_in else None))
for callee in component.mentions:
Evidence.Add.SWCOMPONENT(identifier=component_identifier, mentions_identifier=callee.identifier) | Outputs the analysis output using ScrapingToolKit. | ada/run_analysis.py | output_using_scrapingtoolkit | tuxji/RACK | 4 | python | def output_using_scrapingtoolkit(analysis_output: AnalysisOutput) -> None:
for component_type in analysis_output['component_types']:
Evidence.Add.COMPONENT_TYPE(identifier=component_type)
for format_ in analysis_output['formats']:
Evidence.Add.FORMAT(identifier=format_.identifier)
files = analysis_output['files']
for file_identifier in files:
file: ontology.File = files[file_identifier]
Evidence.Add.FILE(fileFormat_identifier=format_.identifier, filename=file.name, identifier=file_identifier)
components = analysis_output['components']
for component_identifier in components:
component: ontology.SoftwareComponent = components[component_identifier]
Evidence.Add.SWCOMPONENT(identifier=component_identifier, componentType_identifier=component.component_type, title=escape(component.title), definedIn_identifier=(component.defined_in.identifier if component.defined_in else None))
for callee in component.mentions:
Evidence.Add.SWCOMPONENT(identifier=component_identifier, mentions_identifier=callee.identifier) | def output_using_scrapingtoolkit(analysis_output: AnalysisOutput) -> None:
for component_type in analysis_output['component_types']:
Evidence.Add.COMPONENT_TYPE(identifier=component_type)
for format_ in analysis_output['formats']:
Evidence.Add.FORMAT(identifier=format_.identifier)
files = analysis_output['files']
for file_identifier in files:
file: ontology.File = files[file_identifier]
Evidence.Add.FILE(fileFormat_identifier=format_.identifier, filename=file.name, identifier=file_identifier)
components = analysis_output['components']
for component_identifier in components:
component: ontology.SoftwareComponent = components[component_identifier]
Evidence.Add.SWCOMPONENT(identifier=component_identifier, componentType_identifier=component.component_type, title=escape(component.title), definedIn_identifier=(component.defined_in.identifier if component.defined_in else None))
for callee in component.mentions:
Evidence.Add.SWCOMPONENT(identifier=component_identifier, mentions_identifier=callee.identifier)<|docstring|>Outputs the analysis output using ScrapingToolKit.<|endoftext|> |
524d36626f27497045a621ffcadb463774b9620ad6f44e853c27a40e3164d6fc | def analyze_traceability(unit: lal.AnalysisUnit) -> None:
'Extracts traceability identifiers from subprograms.'
if (not unit.root):
return
visitor = TE.TraceabilityExtraction(context=context)
visitor.visit(unit.root)
analysis_output = visitor.traceability
for (component_id, requirement_ids) in analysis_output.items():
for requirement_id in requirement_ids:
Evidence.Add.REQUIREMENT(identifier=requirement_id)
Evidence.Add.SWCOMPONENT(identifier=component_id, wasImpactedBy_identifier=requirement_id) | Extracts traceability identifiers from subprograms. | ada/run_analysis.py | analyze_traceability | tuxji/RACK | 4 | python | def analyze_traceability(unit: lal.AnalysisUnit) -> None:
if (not unit.root):
return
visitor = TE.TraceabilityExtraction(context=context)
visitor.visit(unit.root)
analysis_output = visitor.traceability
for (component_id, requirement_ids) in analysis_output.items():
for requirement_id in requirement_ids:
Evidence.Add.REQUIREMENT(identifier=requirement_id)
Evidence.Add.SWCOMPONENT(identifier=component_id, wasImpactedBy_identifier=requirement_id) | def analyze_traceability(unit: lal.AnalysisUnit) -> None:
if (not unit.root):
return
visitor = TE.TraceabilityExtraction(context=context)
visitor.visit(unit.root)
analysis_output = visitor.traceability
for (component_id, requirement_ids) in analysis_output.items():
for requirement_id in requirement_ids:
Evidence.Add.REQUIREMENT(identifier=requirement_id)
Evidence.Add.SWCOMPONENT(identifier=component_id, wasImpactedBy_identifier=requirement_id)<|docstring|>Extracts traceability identifiers from subprograms.<|endoftext|> |
8164e960e856e039c9603ff0f77077b2d98b574fc9dcb9ae09a62a3bc25cec27 | def analyze_structure(unit: lal.AnalysisUnit) -> None:
'Extracts traceability identifiers from subprograms.'
if (not unit.root):
return
visitor = PS.StructureExtractor()
visitor.visit(unit.root)
analysis_output = visitor.packages
for (package, components) in analysis_output.items():
Evidence.Add.SWCOMPONENT(identifier=get_node_identifier(package), title=escape(package.doc_name), componentType_identifier=ontology.MODULE)
for component in components:
Evidence.Add.SWCOMPONENT(identifier=get_node_identifier(component), subcomponentOf_identifier=get_node_identifier(package)) | Extracts traceability identifiers from subprograms. | ada/run_analysis.py | analyze_structure | tuxji/RACK | 4 | python | def analyze_structure(unit: lal.AnalysisUnit) -> None:
if (not unit.root):
return
visitor = PS.StructureExtractor()
visitor.visit(unit.root)
analysis_output = visitor.packages
for (package, components) in analysis_output.items():
Evidence.Add.SWCOMPONENT(identifier=get_node_identifier(package), title=escape(package.doc_name), componentType_identifier=ontology.MODULE)
for component in components:
Evidence.Add.SWCOMPONENT(identifier=get_node_identifier(component), subcomponentOf_identifier=get_node_identifier(package)) | def analyze_structure(unit: lal.AnalysisUnit) -> None:
if (not unit.root):
return
visitor = PS.StructureExtractor()
visitor.visit(unit.root)
analysis_output = visitor.packages
for (package, components) in analysis_output.items():
Evidence.Add.SWCOMPONENT(identifier=get_node_identifier(package), title=escape(package.doc_name), componentType_identifier=ontology.MODULE)
for component in components:
Evidence.Add.SWCOMPONENT(identifier=get_node_identifier(component), subcomponentOf_identifier=get_node_identifier(package))<|docstring|>Extracts traceability identifiers from subprograms.<|endoftext|> |
f792f198891eccbd630ac9a654620ff8f2306dfbf61e24d9072213922a4f29df | def analyze_unit(unit: lal.AnalysisUnit) -> None:
'Computes and displays the static call graph of some unit.'
if unit.root:
if DEBUG:
ada_visitor = AdaPrintVisitor(max_depth=20)
ada_visitor.visit(unit.root)
static_call_graph_visitor = SCG.StaticCallGraphVisitor(context=context, caller_being_defined=None, edges=dict(), nodes=dict())
static_call_graph_visitor.visit(unit.root)
analysis_output = make_empty_analysis_output()
component_types = analysis_output['component_types']
component_types.add(ontology.SOURCE_FUNCTION)
component_types.add(ontology.MODULE)
formats = analysis_output['formats']
formats.add(ADA_FORMAT)
components = analysis_output['components']
for component_key in static_call_graph_visitor.nodes:
component_node = static_call_graph_visitor.nodes[component_key]
component = register_component(analysis_output, component_node, ontology.SOURCE_FUNCTION)
file_ = register_ada_file(analysis_output, as_optional_file_identifier(get_node_file(component_node)))
component.defined_in = file_
for caller_key in static_call_graph_visitor.edges:
caller_node = static_call_graph_visitor.nodes[caller_key]
caller_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(caller_node))
for callee_key in static_call_graph_visitor.edges[caller_key]:
callee_node = static_call_graph_visitor.nodes[callee_key]
callee_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(callee_node))
callee = components[callee_identifier]
caller = components[caller_identifier]
caller.add_mention(callee)
if DEBUG_TURTLE:
output_as_turtle(analysis_output)
else:
output_using_scrapingtoolkit(analysis_output)
else:
print('No root found, diagnostics:')
print(unit.diagnostics) | Computes and displays the static call graph of some unit. | ada/run_analysis.py | analyze_unit | tuxji/RACK | 4 | python | def analyze_unit(unit: lal.AnalysisUnit) -> None:
if unit.root:
if DEBUG:
ada_visitor = AdaPrintVisitor(max_depth=20)
ada_visitor.visit(unit.root)
static_call_graph_visitor = SCG.StaticCallGraphVisitor(context=context, caller_being_defined=None, edges=dict(), nodes=dict())
static_call_graph_visitor.visit(unit.root)
analysis_output = make_empty_analysis_output()
component_types = analysis_output['component_types']
component_types.add(ontology.SOURCE_FUNCTION)
component_types.add(ontology.MODULE)
formats = analysis_output['formats']
formats.add(ADA_FORMAT)
components = analysis_output['components']
for component_key in static_call_graph_visitor.nodes:
component_node = static_call_graph_visitor.nodes[component_key]
component = register_component(analysis_output, component_node, ontology.SOURCE_FUNCTION)
file_ = register_ada_file(analysis_output, as_optional_file_identifier(get_node_file(component_node)))
component.defined_in = file_
for caller_key in static_call_graph_visitor.edges:
caller_node = static_call_graph_visitor.nodes[caller_key]
caller_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(caller_node))
for callee_key in static_call_graph_visitor.edges[caller_key]:
callee_node = static_call_graph_visitor.nodes[callee_key]
callee_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(callee_node))
callee = components[callee_identifier]
caller = components[caller_identifier]
caller.add_mention(callee)
if DEBUG_TURTLE:
output_as_turtle(analysis_output)
else:
output_using_scrapingtoolkit(analysis_output)
else:
print('No root found, diagnostics:')
print(unit.diagnostics) | def analyze_unit(unit: lal.AnalysisUnit) -> None:
if unit.root:
if DEBUG:
ada_visitor = AdaPrintVisitor(max_depth=20)
ada_visitor.visit(unit.root)
static_call_graph_visitor = SCG.StaticCallGraphVisitor(context=context, caller_being_defined=None, edges=dict(), nodes=dict())
static_call_graph_visitor.visit(unit.root)
analysis_output = make_empty_analysis_output()
component_types = analysis_output['component_types']
component_types.add(ontology.SOURCE_FUNCTION)
component_types.add(ontology.MODULE)
formats = analysis_output['formats']
formats.add(ADA_FORMAT)
components = analysis_output['components']
for component_key in static_call_graph_visitor.nodes:
component_node = static_call_graph_visitor.nodes[component_key]
component = register_component(analysis_output, component_node, ontology.SOURCE_FUNCTION)
file_ = register_ada_file(analysis_output, as_optional_file_identifier(get_node_file(component_node)))
component.defined_in = file_
for caller_key in static_call_graph_visitor.edges:
caller_node = static_call_graph_visitor.nodes[caller_key]
caller_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(caller_node))
for callee_key in static_call_graph_visitor.edges[caller_key]:
callee_node = static_call_graph_visitor.nodes[callee_key]
callee_identifier = ontology.SoftwareComponentIdentifier(get_node_identifier(callee_node))
callee = components[callee_identifier]
caller = components[caller_identifier]
caller.add_mention(callee)
if DEBUG_TURTLE:
output_as_turtle(analysis_output)
else:
output_using_scrapingtoolkit(analysis_output)
else:
print('No root found, diagnostics:')
print(unit.diagnostics)<|docstring|>Computes and displays the static call graph of some unit.<|endoftext|> |
dab5afff1004f9ce907a4978fe24211a6822b66e66b6ffcd0dc2c968b5753424 | @property
def status_display(self):
'Display text for the status in a user-friendly way'
cur_status = self.status.status
if (cur_status == 'Cancelled'):
return 'Cancelled'
elif (cur_status == 'Declined'):
return 'Declined for approval'
elif (cur_status == 'Recommended'):
return 'Recommended for decision'
return cur_status | Display text for the status in a user-friendly way | backend/api/models/CreditTrade.py | status_display | ActionAnalytics/tfrs | 0 | python | @property
def status_display(self):
cur_status = self.status.status
if (cur_status == 'Cancelled'):
return 'Cancelled'
elif (cur_status == 'Declined'):
return 'Declined for approval'
elif (cur_status == 'Recommended'):
return 'Recommended for decision'
return cur_status | @property
def status_display(self):
cur_status = self.status.status
if (cur_status == 'Cancelled'):
return 'Cancelled'
elif (cur_status == 'Declined'):
return 'Declined for approval'
elif (cur_status == 'Recommended'):
return 'Recommended for decision'
return cur_status<|docstring|>Display text for the status in a user-friendly way<|endoftext|> |
ee096ff5c73b7dcb1067521ced95d442e6fdfa8d116190a517c8f38926353720 | def sanitize_unit(unit, wavelength):
'Sanitize a unit token, either an astropy unit or a string.\n\n Parameters\n ----------\n unit : `astropy.Unit` or `str`\n unit or string version of unit\n wavelength : `astropy.Unit`\n a wavelength unit generated by mkwvl or equivalent code\n\n Returns\n -------\n `astropy.Unit`\n an astropy unit\n\n '
if (not isinstance(unit, all_ap_unit_types)):
if (unit.lower() in ('waves', 'wave', 'λ')):
unit = wavelength
else:
unit = getattr(u, unit)
else:
unit = unit
return unit | Sanitize a unit token, either an astropy unit or a string.
Parameters
----------
unit : `astropy.Unit` or `str`
unit or string version of unit
wavelength : `astropy.Unit`
a wavelength unit generated by mkwvl or equivalent code
Returns
-------
`astropy.Unit`
an astropy unit | prysm/conf.py | sanitize_unit | derekjgriffith/prysm | 1 | python | def sanitize_unit(unit, wavelength):
'Sanitize a unit token, either an astropy unit or a string.\n\n Parameters\n ----------\n unit : `astropy.Unit` or `str`\n unit or string version of unit\n wavelength : `astropy.Unit`\n a wavelength unit generated by mkwvl or equivalent code\n\n Returns\n -------\n `astropy.Unit`\n an astropy unit\n\n '
if (not isinstance(unit, all_ap_unit_types)):
if (unit.lower() in ('waves', 'wave', 'λ')):
unit = wavelength
else:
unit = getattr(u, unit)
else:
unit = unit
return unit | def sanitize_unit(unit, wavelength):
'Sanitize a unit token, either an astropy unit or a string.\n\n Parameters\n ----------\n unit : `astropy.Unit` or `str`\n unit or string version of unit\n wavelength : `astropy.Unit`\n a wavelength unit generated by mkwvl or equivalent code\n\n Returns\n -------\n `astropy.Unit`\n an astropy unit\n\n '
if (not isinstance(unit, all_ap_unit_types)):
if (unit.lower() in ('waves', 'wave', 'λ')):
unit = wavelength
else:
unit = getattr(u, unit)
else:
unit = unit
return unit<|docstring|>Sanitize a unit token, either an astropy unit or a string.
Parameters
----------
unit : `astropy.Unit` or `str`
unit or string version of unit
wavelength : `astropy.Unit`
a wavelength unit generated by mkwvl or equivalent code
Returns
-------
`astropy.Unit`
an astropy unit<|endoftext|> |
7330ec2d78d29b907c06a2f4fbbb7a7ab85ebd450162dd91d7fabb19e1b03657 | def format_unit(unit_or_quantity, fmt):
"(string) format a unit or quantity\n\n Parameters\n ----------\n unit_or_quantity : `astropy.units.Unit` or `astropy.units.Quantity`\n a unit or quantity\n fmt : `str`, {'latex', 'unicode'}\n a string format\n\n Returns\n -------\n `str`\n string\n\n "
if isinstance(unit_or_quantity, all_ap_unit_types):
return unit_or_quantity.to_string(fmt)
elif isinstance(unit_or_quantity, u.quantity.Quantity):
return unit_or_quantity.unit.to_string(fmt)
else:
raise ValueError('must be a Unit or Quantity instance.') | (string) format a unit or quantity
Parameters
----------
unit_or_quantity : `astropy.units.Unit` or `astropy.units.Quantity`
a unit or quantity
fmt : `str`, {'latex', 'unicode'}
a string format
Returns
-------
`str`
string | prysm/conf.py | format_unit | derekjgriffith/prysm | 1 | python | def format_unit(unit_or_quantity, fmt):
"(string) format a unit or quantity\n\n Parameters\n ----------\n unit_or_quantity : `astropy.units.Unit` or `astropy.units.Quantity`\n a unit or quantity\n fmt : `str`, {'latex', 'unicode'}\n a string format\n\n Returns\n -------\n `str`\n string\n\n "
if isinstance(unit_or_quantity, all_ap_unit_types):
return unit_or_quantity.to_string(fmt)
elif isinstance(unit_or_quantity, u.quantity.Quantity):
return unit_or_quantity.unit.to_string(fmt)
else:
raise ValueError('must be a Unit or Quantity instance.') | def format_unit(unit_or_quantity, fmt):
"(string) format a unit or quantity\n\n Parameters\n ----------\n unit_or_quantity : `astropy.units.Unit` or `astropy.units.Quantity`\n a unit or quantity\n fmt : `str`, {'latex', 'unicode'}\n a string format\n\n Returns\n -------\n `str`\n string\n\n "
if isinstance(unit_or_quantity, all_ap_unit_types):
return unit_or_quantity.to_string(fmt)
elif isinstance(unit_or_quantity, u.quantity.Quantity):
return unit_or_quantity.unit.to_string(fmt)
else:
raise ValueError('must be a Unit or Quantity instance.')<|docstring|>(string) format a unit or quantity
Parameters
----------
unit_or_quantity : `astropy.units.Unit` or `astropy.units.Quantity`
a unit or quantity
fmt : `str`, {'latex', 'unicode'}
a string format
Returns
-------
`str`
string<|endoftext|> |
511de70eeacfc2963ec33745c12ab78056114b194ea0bb447080014f50bb6171 | def __init__(self, xy_base, z, xy_additions=['X', 'Y'], xy_addition_side='right', addition_joiner=' ', unit_prefix='[', unit_suffix=']', unit_joiner=' '):
"Create a new Labels instance\n\n Parameters\n ----------\n xy_base : `str`\n basic string used to build the X and Y labels\n z : `str`\n z label, stored as self._z to avoid clash with self.z()\n xy_additions : iterable, optional\n text to add to the (x, y) labels\n xy_addition_side : {'left', 'right'. 'l', 'r'}, optional\n side to add the x and y additional text to, left or right\n addition_joiner : `str`, optional\n text used to join the x or y addition\n unit_prefix : `str`, optional\n prefix used to surround the unit text\n unit_suffix : `str`, optional\n suffix used to surround the unit text\n unit_joiner : `str`, optional\n text used to combine the base label and the unit\n "
(self.xy_base, self._z) = (xy_base, z)
(self.xy_additions, self.xy_addition_side) = (xy_additions, xy_addition_side)
self.addition_joiner = addition_joiner
(self.unit_prefix, self.unit_suffix) = (unit_prefix, unit_suffix)
self.unit_joiner = unit_joiner | Create a new Labels instance
Parameters
----------
xy_base : `str`
basic string used to build the X and Y labels
z : `str`
z label, stored as self._z to avoid clash with self.z()
xy_additions : iterable, optional
text to add to the (x, y) labels
xy_addition_side : {'left', 'right'. 'l', 'r'}, optional
side to add the x and y additional text to, left or right
addition_joiner : `str`, optional
text used to join the x or y addition
unit_prefix : `str`, optional
prefix used to surround the unit text
unit_suffix : `str`, optional
suffix used to surround the unit text
unit_joiner : `str`, optional
text used to combine the base label and the unit | prysm/conf.py | __init__ | derekjgriffith/prysm | 1 | python | def __init__(self, xy_base, z, xy_additions=['X', 'Y'], xy_addition_side='right', addition_joiner=' ', unit_prefix='[', unit_suffix=']', unit_joiner=' '):
"Create a new Labels instance\n\n Parameters\n ----------\n xy_base : `str`\n basic string used to build the X and Y labels\n z : `str`\n z label, stored as self._z to avoid clash with self.z()\n xy_additions : iterable, optional\n text to add to the (x, y) labels\n xy_addition_side : {'left', 'right'. 'l', 'r'}, optional\n side to add the x and y additional text to, left or right\n addition_joiner : `str`, optional\n text used to join the x or y addition\n unit_prefix : `str`, optional\n prefix used to surround the unit text\n unit_suffix : `str`, optional\n suffix used to surround the unit text\n unit_joiner : `str`, optional\n text used to combine the base label and the unit\n "
(self.xy_base, self._z) = (xy_base, z)
(self.xy_additions, self.xy_addition_side) = (xy_additions, xy_addition_side)
self.addition_joiner = addition_joiner
(self.unit_prefix, self.unit_suffix) = (unit_prefix, unit_suffix)
self.unit_joiner = unit_joiner | def __init__(self, xy_base, z, xy_additions=['X', 'Y'], xy_addition_side='right', addition_joiner=' ', unit_prefix='[', unit_suffix=']', unit_joiner=' '):
"Create a new Labels instance\n\n Parameters\n ----------\n xy_base : `str`\n basic string used to build the X and Y labels\n z : `str`\n z label, stored as self._z to avoid clash with self.z()\n xy_additions : iterable, optional\n text to add to the (x, y) labels\n xy_addition_side : {'left', 'right'. 'l', 'r'}, optional\n side to add the x and y additional text to, left or right\n addition_joiner : `str`, optional\n text used to join the x or y addition\n unit_prefix : `str`, optional\n prefix used to surround the unit text\n unit_suffix : `str`, optional\n suffix used to surround the unit text\n unit_joiner : `str`, optional\n text used to combine the base label and the unit\n "
(self.xy_base, self._z) = (xy_base, z)
(self.xy_additions, self.xy_addition_side) = (xy_additions, xy_addition_side)
self.addition_joiner = addition_joiner
(self.unit_prefix, self.unit_suffix) = (unit_prefix, unit_suffix)
self.unit_joiner = unit_joiner<|docstring|>Create a new Labels instance
Parameters
----------
xy_base : `str`
basic string used to build the X and Y labels
z : `str`
z label, stored as self._z to avoid clash with self.z()
xy_additions : iterable, optional
text to add to the (x, y) labels
xy_addition_side : {'left', 'right'. 'l', 'r'}, optional
side to add the x and y additional text to, left or right
addition_joiner : `str`, optional
text used to join the x or y addition
unit_prefix : `str`, optional
prefix used to surround the unit text
unit_suffix : `str`, optional
suffix used to surround the unit text
unit_joiner : `str`, optional
text used to combine the base label and the unit<|endoftext|> |
83430b65af5141d25a1b1123c65389a94862849842c601814310d26af569f8c9 | def _label_factory(self, label, xy_unit, z_unit):
"Factory method to produce complex labels.\n\n Parameters\n ----------\n label : `str`, {'x', 'y', 'z'}\n label to produce\n\n Returns\n -------\n `str`\n completed label\n\n "
if (label in ('x', 'y')):
if (label == 'x'):
xy_pos = 0
else:
xy_pos = 1
label_basics = [self.xy_base]
if (self.xy_addition_side.lower() in ('left', 'l')):
label_basics.insert(0, self.xy_additions[xy_pos])
else:
label_basics.append(self.xy_additions[xy_pos])
label_ = self.addition_joiner.join(label_basics)
unit_str = format_unit(xy_unit, config.unit_format)
else:
label_ = self._z
unit_str = format_unit(z_unit, config.unit_format)
unit_text = ''
if config.show_units:
unit_text = unit_text.join([self.unit_prefix, unit_str, self.unit_suffix])
label_ = self.unit_joiner.join([label_, unit_text])
return label_ | Factory method to produce complex labels.
Parameters
----------
label : `str`, {'x', 'y', 'z'}
label to produce
Returns
-------
`str`
completed label | prysm/conf.py | _label_factory | derekjgriffith/prysm | 1 | python | def _label_factory(self, label, xy_unit, z_unit):
"Factory method to produce complex labels.\n\n Parameters\n ----------\n label : `str`, {'x', 'y', 'z'}\n label to produce\n\n Returns\n -------\n `str`\n completed label\n\n "
if (label in ('x', 'y')):
if (label == 'x'):
xy_pos = 0
else:
xy_pos = 1
label_basics = [self.xy_base]
if (self.xy_addition_side.lower() in ('left', 'l')):
label_basics.insert(0, self.xy_additions[xy_pos])
else:
label_basics.append(self.xy_additions[xy_pos])
label_ = self.addition_joiner.join(label_basics)
unit_str = format_unit(xy_unit, config.unit_format)
else:
label_ = self._z
unit_str = format_unit(z_unit, config.unit_format)
unit_text =
if config.show_units:
unit_text = unit_text.join([self.unit_prefix, unit_str, self.unit_suffix])
label_ = self.unit_joiner.join([label_, unit_text])
return label_ | def _label_factory(self, label, xy_unit, z_unit):
"Factory method to produce complex labels.\n\n Parameters\n ----------\n label : `str`, {'x', 'y', 'z'}\n label to produce\n\n Returns\n -------\n `str`\n completed label\n\n "
if (label in ('x', 'y')):
if (label == 'x'):
xy_pos = 0
else:
xy_pos = 1
label_basics = [self.xy_base]
if (self.xy_addition_side.lower() in ('left', 'l')):
label_basics.insert(0, self.xy_additions[xy_pos])
else:
label_basics.append(self.xy_additions[xy_pos])
label_ = self.addition_joiner.join(label_basics)
unit_str = format_unit(xy_unit, config.unit_format)
else:
label_ = self._z
unit_str = format_unit(z_unit, config.unit_format)
unit_text =
if config.show_units:
unit_text = unit_text.join([self.unit_prefix, unit_str, self.unit_suffix])
label_ = self.unit_joiner.join([label_, unit_text])
return label_<|docstring|>Factory method to produce complex labels.
Parameters
----------
label : `str`, {'x', 'y', 'z'}
label to produce
Returns
-------
`str`
completed label<|endoftext|> |
62db53d2caf5b370e7d8a3ff18a00e99590bd726bdc56244084903c79a7c9880 | def x(self, xy_unit, z_unit):
'X label.'
return self._label_factory('x', xy_unit, z_unit) | X label. | prysm/conf.py | x | derekjgriffith/prysm | 1 | python | def x(self, xy_unit, z_unit):
return self._label_factory('x', xy_unit, z_unit) | def x(self, xy_unit, z_unit):
return self._label_factory('x', xy_unit, z_unit)<|docstring|>X label.<|endoftext|> |
7a24f6795271527ea828d1d9b42f9a35cc861fe54791a0b8ddd9487609f0c11e | def y(self, xy_unit, z_unit):
'Y label.'
return self._label_factory('y', xy_unit, z_unit) | Y label. | prysm/conf.py | y | derekjgriffith/prysm | 1 | python | def y(self, xy_unit, z_unit):
return self._label_factory('y', xy_unit, z_unit) | def y(self, xy_unit, z_unit):
return self._label_factory('y', xy_unit, z_unit)<|docstring|>Y label.<|endoftext|> |
03c65331d5207384c0825b51a3c8355a8ba58343bb4f2122de12a3b8a3979d5a | def z(self, xy_unit, z_unit):
'Z label.'
return self._label_factory('z', xy_unit, z_unit) | Z label. | prysm/conf.py | z | derekjgriffith/prysm | 1 | python | def z(self, xy_unit, z_unit):
return self._label_factory('z', xy_unit, z_unit) | def z(self, xy_unit, z_unit):
return self._label_factory('z', xy_unit, z_unit)<|docstring|>Z label.<|endoftext|> |
73d7a1c623783272878e70e16ab24a0342964f5ed6f185f21a879f72e3b11d9d | def generic(self, xy_unit, z_unit):
'Generic label without extra X/Y annotation.'
base = self.xy_base
join = self.unit_joiner
unit = format_unit(xy_unit, config.unit_format)
prefix = self.unit_prefix
suffix = self.unit_suffix
return f'{base}{join}{prefix}{unit}{suffix}' | Generic label without extra X/Y annotation. | prysm/conf.py | generic | derekjgriffith/prysm | 1 | python | def generic(self, xy_unit, z_unit):
base = self.xy_base
join = self.unit_joiner
unit = format_unit(xy_unit, config.unit_format)
prefix = self.unit_prefix
suffix = self.unit_suffix
return f'{base}{join}{prefix}{unit}{suffix}' | def generic(self, xy_unit, z_unit):
base = self.xy_base
join = self.unit_joiner
unit = format_unit(xy_unit, config.unit_format)
prefix = self.unit_prefix
suffix = self.unit_suffix
return f'{base}{join}{prefix}{unit}{suffix}'<|docstring|>Generic label without extra X/Y annotation.<|endoftext|> |
561df7b90561cb76f206c1f576b91992bcb62cef1f49e2be220da727b8f28c4e | def __init__(self, precision=64, backend=np, zernike_base=1, Q=2, wavelength=HeNe, phase_cmap='inferno', image_cmap='Greys_r', lw=3, zorder=3, alpha=1, interpolation='lanczos', unit_format='latex_inline', show_units=True, phase_xy_unit=u.mm, phase_z_unit=u.nm, image_xy_unit=u.um, image_z_unit=u.adu, mtf_xy_unit=(u.mm ** (- 1)), mtf_z_unit=rel, ptf_xy_unit=(u.mm ** (- 1)), ptf_z_unit=u.deg, pupil_labels=default_pupil_labels, interferogram_labels=default_interferogram_labels, convolvable_labels=default_convolvable_labels, mtf_labels=default_mtf_labels, ptf_labels=default_ptf_labels, psd_labels=default_psd_labels):
'Create a new `Config` object.\n\n Parameters\n ----------\n precision : `int`\n 32 or 64, number of bits of precision\n backend : `str`, {\'np\'}\n a supported backend. Current options are only "np" for numpy\n zernike_base : `int`, {0, 1}\n base for zernikes; start at 0 or 1\n Q : `float`\n oversampling parameter for numerical propagations\n phase_cmap : `str`\n colormap used for plotting optical phases\n image_cmap : `str`\n colormap used for plotting greyscale images\n lw : `float`\n linewidth\n zorder : `int`, optional\n zorder used for graphics made with matplotlib\n interpolation : `str`\n interpolation type for 2D plots\n unit_formatter : `str`, optional\n string passed to astropy.units.(unit).to_string\n xylabel_joiner : `str`, optional\n text used to glue together X/Y units and their basic string\n unit_prefix : `str`, optional\n text preceeding the unit\'s representation, after the joiner\n unit_suffix : `str`, optional\n text following the unit\'s representation\n unit_joiner : `str`, optional\n text used to glue basic labels and the units together\n show_units : `bool`, optional\n if True, shows units on graphics\n phase_units : `Units`\n default units used for phase-like types\n image_units : `Units`\n default units used for image-like types\n\n '
self.initialized = False
self.precision = precision
self.backend = backend
self.zernike_base = zernike_base
self.chbackend_observers = []
self.Q = Q
self.wavelength = wavelength
self.phase_cmap = phase_cmap
self.image_cmap = image_cmap
self.lw = lw
self.zorder = zorder
self.alpha = alpha
self.interpolation = interpolation
self.unit_format = unit_format
self.show_units = show_units
self.phase_xy_unit = phase_xy_unit
self.phase_z_unit = phase_z_unit
self.image_xy_unit = image_xy_unit
self.image_z_unit = image_z_unit
self.mtf_xy_unit = mtf_xy_unit
self.mtf_z_unit = mtf_z_unit
self.ptf_xy_unit = ptf_xy_unit
self.ptf_z_unit = ptf_z_unit
self.pupil_labels = pupil_labels
self.interferogram_labels = interferogram_labels
self.convolvable_labels = convolvable_labels
self.mtf_labels = mtf_labels
self.ptf_labels = ptf_labels
self.psd_labels = psd_labels
self.initialized = True | Create a new `Config` object.
Parameters
----------
precision : `int`
32 or 64, number of bits of precision
backend : `str`, {'np'}
a supported backend. Current options are only "np" for numpy
zernike_base : `int`, {0, 1}
base for zernikes; start at 0 or 1
Q : `float`
oversampling parameter for numerical propagations
phase_cmap : `str`
colormap used for plotting optical phases
image_cmap : `str`
colormap used for plotting greyscale images
lw : `float`
linewidth
zorder : `int`, optional
zorder used for graphics made with matplotlib
interpolation : `str`
interpolation type for 2D plots
unit_formatter : `str`, optional
string passed to astropy.units.(unit).to_string
xylabel_joiner : `str`, optional
text used to glue together X/Y units and their basic string
unit_prefix : `str`, optional
text preceeding the unit's representation, after the joiner
unit_suffix : `str`, optional
text following the unit's representation
unit_joiner : `str`, optional
text used to glue basic labels and the units together
show_units : `bool`, optional
if True, shows units on graphics
phase_units : `Units`
default units used for phase-like types
image_units : `Units`
default units used for image-like types | prysm/conf.py | __init__ | derekjgriffith/prysm | 1 | python | def __init__(self, precision=64, backend=np, zernike_base=1, Q=2, wavelength=HeNe, phase_cmap='inferno', image_cmap='Greys_r', lw=3, zorder=3, alpha=1, interpolation='lanczos', unit_format='latex_inline', show_units=True, phase_xy_unit=u.mm, phase_z_unit=u.nm, image_xy_unit=u.um, image_z_unit=u.adu, mtf_xy_unit=(u.mm ** (- 1)), mtf_z_unit=rel, ptf_xy_unit=(u.mm ** (- 1)), ptf_z_unit=u.deg, pupil_labels=default_pupil_labels, interferogram_labels=default_interferogram_labels, convolvable_labels=default_convolvable_labels, mtf_labels=default_mtf_labels, ptf_labels=default_ptf_labels, psd_labels=default_psd_labels):
'Create a new `Config` object.\n\n Parameters\n ----------\n precision : `int`\n 32 or 64, number of bits of precision\n backend : `str`, {\'np\'}\n a supported backend. Current options are only "np" for numpy\n zernike_base : `int`, {0, 1}\n base for zernikes; start at 0 or 1\n Q : `float`\n oversampling parameter for numerical propagations\n phase_cmap : `str`\n colormap used for plotting optical phases\n image_cmap : `str`\n colormap used for plotting greyscale images\n lw : `float`\n linewidth\n zorder : `int`, optional\n zorder used for graphics made with matplotlib\n interpolation : `str`\n interpolation type for 2D plots\n unit_formatter : `str`, optional\n string passed to astropy.units.(unit).to_string\n xylabel_joiner : `str`, optional\n text used to glue together X/Y units and their basic string\n unit_prefix : `str`, optional\n text preceeding the unit\'s representation, after the joiner\n unit_suffix : `str`, optional\n text following the unit\'s representation\n unit_joiner : `str`, optional\n text used to glue basic labels and the units together\n show_units : `bool`, optional\n if True, shows units on graphics\n phase_units : `Units`\n default units used for phase-like types\n image_units : `Units`\n default units used for image-like types\n\n '
self.initialized = False
self.precision = precision
self.backend = backend
self.zernike_base = zernike_base
self.chbackend_observers = []
self.Q = Q
self.wavelength = wavelength
self.phase_cmap = phase_cmap
self.image_cmap = image_cmap
self.lw = lw
self.zorder = zorder
self.alpha = alpha
self.interpolation = interpolation
self.unit_format = unit_format
self.show_units = show_units
self.phase_xy_unit = phase_xy_unit
self.phase_z_unit = phase_z_unit
self.image_xy_unit = image_xy_unit
self.image_z_unit = image_z_unit
self.mtf_xy_unit = mtf_xy_unit
self.mtf_z_unit = mtf_z_unit
self.ptf_xy_unit = ptf_xy_unit
self.ptf_z_unit = ptf_z_unit
self.pupil_labels = pupil_labels
self.interferogram_labels = interferogram_labels
self.convolvable_labels = convolvable_labels
self.mtf_labels = mtf_labels
self.ptf_labels = ptf_labels
self.psd_labels = psd_labels
self.initialized = True | def __init__(self, precision=64, backend=np, zernike_base=1, Q=2, wavelength=HeNe, phase_cmap='inferno', image_cmap='Greys_r', lw=3, zorder=3, alpha=1, interpolation='lanczos', unit_format='latex_inline', show_units=True, phase_xy_unit=u.mm, phase_z_unit=u.nm, image_xy_unit=u.um, image_z_unit=u.adu, mtf_xy_unit=(u.mm ** (- 1)), mtf_z_unit=rel, ptf_xy_unit=(u.mm ** (- 1)), ptf_z_unit=u.deg, pupil_labels=default_pupil_labels, interferogram_labels=default_interferogram_labels, convolvable_labels=default_convolvable_labels, mtf_labels=default_mtf_labels, ptf_labels=default_ptf_labels, psd_labels=default_psd_labels):
'Create a new `Config` object.\n\n Parameters\n ----------\n precision : `int`\n 32 or 64, number of bits of precision\n backend : `str`, {\'np\'}\n a supported backend. Current options are only "np" for numpy\n zernike_base : `int`, {0, 1}\n base for zernikes; start at 0 or 1\n Q : `float`\n oversampling parameter for numerical propagations\n phase_cmap : `str`\n colormap used for plotting optical phases\n image_cmap : `str`\n colormap used for plotting greyscale images\n lw : `float`\n linewidth\n zorder : `int`, optional\n zorder used for graphics made with matplotlib\n interpolation : `str`\n interpolation type for 2D plots\n unit_formatter : `str`, optional\n string passed to astropy.units.(unit).to_string\n xylabel_joiner : `str`, optional\n text used to glue together X/Y units and their basic string\n unit_prefix : `str`, optional\n text preceeding the unit\'s representation, after the joiner\n unit_suffix : `str`, optional\n text following the unit\'s representation\n unit_joiner : `str`, optional\n text used to glue basic labels and the units together\n show_units : `bool`, optional\n if True, shows units on graphics\n phase_units : `Units`\n default units used for phase-like types\n image_units : `Units`\n default units used for image-like types\n\n '
self.initialized = False
self.precision = precision
self.backend = backend
self.zernike_base = zernike_base
self.chbackend_observers = []
self.Q = Q
self.wavelength = wavelength
self.phase_cmap = phase_cmap
self.image_cmap = image_cmap
self.lw = lw
self.zorder = zorder
self.alpha = alpha
self.interpolation = interpolation
self.unit_format = unit_format
self.show_units = show_units
self.phase_xy_unit = phase_xy_unit
self.phase_z_unit = phase_z_unit
self.image_xy_unit = image_xy_unit
self.image_z_unit = image_z_unit
self.mtf_xy_unit = mtf_xy_unit
self.mtf_z_unit = mtf_z_unit
self.ptf_xy_unit = ptf_xy_unit
self.ptf_z_unit = ptf_z_unit
self.pupil_labels = pupil_labels
self.interferogram_labels = interferogram_labels
self.convolvable_labels = convolvable_labels
self.mtf_labels = mtf_labels
self.ptf_labels = ptf_labels
self.psd_labels = psd_labels
self.initialized = True<|docstring|>Create a new `Config` object.
Parameters
----------
precision : `int`
32 or 64, number of bits of precision
backend : `str`, {'np'}
a supported backend. Current options are only "np" for numpy
zernike_base : `int`, {0, 1}
base for zernikes; start at 0 or 1
Q : `float`
oversampling parameter for numerical propagations
phase_cmap : `str`
colormap used for plotting optical phases
image_cmap : `str`
colormap used for plotting greyscale images
lw : `float`
linewidth
zorder : `int`, optional
zorder used for graphics made with matplotlib
interpolation : `str`
interpolation type for 2D plots
unit_formatter : `str`, optional
string passed to astropy.units.(unit).to_string
xylabel_joiner : `str`, optional
text used to glue together X/Y units and their basic string
unit_prefix : `str`, optional
text preceeding the unit's representation, after the joiner
unit_suffix : `str`, optional
text following the unit's representation
unit_joiner : `str`, optional
text used to glue basic labels and the units together
show_units : `bool`, optional
if True, shows units on graphics
phase_units : `Units`
default units used for phase-like types
image_units : `Units`
default units used for image-like types<|endoftext|> |
a8c3f8e4053386a34c7a455d03ffee5412329caf461e03aab447ea47e5f1453a | @property
def precision(self):
'Precision used for computations.\n\n Returns\n -------\n `object` : `numpy.float32` or `numpy.float64`\n precision used\n\n '
return self._precision | Precision used for computations.
Returns
-------
`object` : `numpy.float32` or `numpy.float64`
precision used | prysm/conf.py | precision | derekjgriffith/prysm | 1 | python | @property
def precision(self):
'Precision used for computations.\n\n Returns\n -------\n `object` : `numpy.float32` or `numpy.float64`\n precision used\n\n '
return self._precision | @property
def precision(self):
'Precision used for computations.\n\n Returns\n -------\n `object` : `numpy.float32` or `numpy.float64`\n precision used\n\n '
return self._precision<|docstring|>Precision used for computations.
Returns
-------
`object` : `numpy.float32` or `numpy.float64`
precision used<|endoftext|> |
b91dfdcba25f140dc649452c9f436d38246997e3356add4f8a005283e0a2422a | @property
def precision_complex(self):
'Precision used for complex array computations.\n\n Returns\n -------\n `object` : `numpy.complex64` or `numpy.complex128`\n precision used for complex arrays\n\n '
return self._precision_complex | Precision used for complex array computations.
Returns
-------
`object` : `numpy.complex64` or `numpy.complex128`
precision used for complex arrays | prysm/conf.py | precision_complex | derekjgriffith/prysm | 1 | python | @property
def precision_complex(self):
'Precision used for complex array computations.\n\n Returns\n -------\n `object` : `numpy.complex64` or `numpy.complex128`\n precision used for complex arrays\n\n '
return self._precision_complex | @property
def precision_complex(self):
'Precision used for complex array computations.\n\n Returns\n -------\n `object` : `numpy.complex64` or `numpy.complex128`\n precision used for complex arrays\n\n '
return self._precision_complex<|docstring|>Precision used for complex array computations.
Returns
-------
`object` : `numpy.complex64` or `numpy.complex128`
precision used for complex arrays<|endoftext|> |
3ac2d4cc3471aece6fa946c452f9b3eddf58f7750af245d9ee3676d3f3fe5fe7 | @precision.setter
def precision(self, precision):
'Adjust precision used by prysm.\n\n Parameters\n ----------\n precision : `int`, {32, 64}\n what precision to use; either 32 or 64 bits\n\n Raises\n ------\n ValueError\n if precision is not a valid option\n\n '
if (precision not in (32, 64)):
raise ValueError('invalid precision. Precision should be 32 or 64.')
if (precision == 32):
self._precision = np.float32
self._precision_complex = np.complex64
else:
self._precision = np.float64
self._precision_complex = np.complex128 | Adjust precision used by prysm.
Parameters
----------
precision : `int`, {32, 64}
what precision to use; either 32 or 64 bits
Raises
------
ValueError
if precision is not a valid option | prysm/conf.py | precision | derekjgriffith/prysm | 1 | python | @precision.setter
def precision(self, precision):
'Adjust precision used by prysm.\n\n Parameters\n ----------\n precision : `int`, {32, 64}\n what precision to use; either 32 or 64 bits\n\n Raises\n ------\n ValueError\n if precision is not a valid option\n\n '
if (precision not in (32, 64)):
raise ValueError('invalid precision. Precision should be 32 or 64.')
if (precision == 32):
self._precision = np.float32
self._precision_complex = np.complex64
else:
self._precision = np.float64
self._precision_complex = np.complex128 | @precision.setter
def precision(self, precision):
'Adjust precision used by prysm.\n\n Parameters\n ----------\n precision : `int`, {32, 64}\n what precision to use; either 32 or 64 bits\n\n Raises\n ------\n ValueError\n if precision is not a valid option\n\n '
if (precision not in (32, 64)):
raise ValueError('invalid precision. Precision should be 32 or 64.')
if (precision == 32):
self._precision = np.float32
self._precision_complex = np.complex64
else:
self._precision = np.float64
self._precision_complex = np.complex128<|docstring|>Adjust precision used by prysm.
Parameters
----------
precision : `int`, {32, 64}
what precision to use; either 32 or 64 bits
Raises
------
ValueError
if precision is not a valid option<|endoftext|> |
b65642650e836292475ebc8800a5a065b90f214a20563513b8c5555fce2a38d7 | @property
def backend(self):
"Backend used.\n\n Returns\n -------\n `str`\n {'np'} only\n\n "
return self._backend | Backend used.
Returns
-------
`str`
{'np'} only | prysm/conf.py | backend | derekjgriffith/prysm | 1 | python | @property
def backend(self):
"Backend used.\n\n Returns\n -------\n `str`\n {'np'} only\n\n "
return self._backend | @property
def backend(self):
"Backend used.\n\n Returns\n -------\n `str`\n {'np'} only\n\n "
return self._backend<|docstring|>Backend used.
Returns
-------
`str`
{'np'} only<|endoftext|> |
36e77de06d3b10ac4eab604b6380765d30bfc2dc69c2ee5f40d4dd468cf98e40 | @backend.setter
def backend(self, backend):
"Set the backend used by prysm.\n\n Parameters\n ----------\n backend : `str`, {'np'}\n backend used for computations\n\n Raises\n ------\n ValueError\n invalid backend\n\n "
if isinstance(backend, str):
if (backend.lower() in ('np', 'numpy')):
backend = 'numpy'
elif (backend.lower() in ('cp', 'cu', 'cuda')):
backend = 'cupy'
exec(f'import {backend}')
self._backend = eval(backend)
else:
self._backend = backend
if self.initialized:
for obs in self.chbackend_observers:
obs(self._backend) | Set the backend used by prysm.
Parameters
----------
backend : `str`, {'np'}
backend used for computations
Raises
------
ValueError
invalid backend | prysm/conf.py | backend | derekjgriffith/prysm | 1 | python | @backend.setter
def backend(self, backend):
"Set the backend used by prysm.\n\n Parameters\n ----------\n backend : `str`, {'np'}\n backend used for computations\n\n Raises\n ------\n ValueError\n invalid backend\n\n "
if isinstance(backend, str):
if (backend.lower() in ('np', 'numpy')):
backend = 'numpy'
elif (backend.lower() in ('cp', 'cu', 'cuda')):
backend = 'cupy'
exec(f'import {backend}')
self._backend = eval(backend)
else:
self._backend = backend
if self.initialized:
for obs in self.chbackend_observers:
obs(self._backend) | @backend.setter
def backend(self, backend):
"Set the backend used by prysm.\n\n Parameters\n ----------\n backend : `str`, {'np'}\n backend used for computations\n\n Raises\n ------\n ValueError\n invalid backend\n\n "
if isinstance(backend, str):
if (backend.lower() in ('np', 'numpy')):
backend = 'numpy'
elif (backend.lower() in ('cp', 'cu', 'cuda')):
backend = 'cupy'
exec(f'import {backend}')
self._backend = eval(backend)
else:
self._backend = backend
if self.initialized:
for obs in self.chbackend_observers:
obs(self._backend)<|docstring|>Set the backend used by prysm.
Parameters
----------
backend : `str`, {'np'}
backend used for computations
Raises
------
ValueError
invalid backend<|endoftext|> |
a29f12ec3bef62d2e4ccc011e6ebb4cdd3c322cbe13b66c4603885be76f64556 | @property
def zernike_base(self):
'Zernike base.\n\n Returns\n -------\n `int`\n {0, 1}\n\n '
return self._zernike_base | Zernike base.
Returns
-------
`int`
{0, 1} | prysm/conf.py | zernike_base | derekjgriffith/prysm | 1 | python | @property
def zernike_base(self):
'Zernike base.\n\n Returns\n -------\n `int`\n {0, 1}\n\n '
return self._zernike_base | @property
def zernike_base(self):
'Zernike base.\n\n Returns\n -------\n `int`\n {0, 1}\n\n '
return self._zernike_base<|docstring|>Zernike base.
Returns
-------
`int`
{0, 1}<|endoftext|> |
fffba2f864cc30a57aa3c1068b0399a4c2eb8f6baed0fba38af38771fc64ffec | @zernike_base.setter
def zernike_base(self, base):
'Zernike base; base-0 or base-1.\n\n Parameters\n ----------\n base : `int`, {0, 1}\n first index of zernike polynomials\n\n Raises\n ------\n ValueError\n invalid base given\n\n '
if (base not in (0, 1)):
raise ValueError('By convention zernike base must be 0 or 1.')
self._zernike_base = base | Zernike base; base-0 or base-1.
Parameters
----------
base : `int`, {0, 1}
first index of zernike polynomials
Raises
------
ValueError
invalid base given | prysm/conf.py | zernike_base | derekjgriffith/prysm | 1 | python | @zernike_base.setter
def zernike_base(self, base):
'Zernike base; base-0 or base-1.\n\n Parameters\n ----------\n base : `int`, {0, 1}\n first index of zernike polynomials\n\n Raises\n ------\n ValueError\n invalid base given\n\n '
if (base not in (0, 1)):
raise ValueError('By convention zernike base must be 0 or 1.')
self._zernike_base = base | @zernike_base.setter
def zernike_base(self, base):
'Zernike base; base-0 or base-1.\n\n Parameters\n ----------\n base : `int`, {0, 1}\n first index of zernike polynomials\n\n Raises\n ------\n ValueError\n invalid base given\n\n '
if (base not in (0, 1)):
raise ValueError('By convention zernike base must be 0 or 1.')
self._zernike_base = base<|docstring|>Zernike base; base-0 or base-1.
Parameters
----------
base : `int`, {0, 1}
first index of zernike polynomials
Raises
------
ValueError
invalid base given<|endoftext|> |
48e4cab30527ff530a80749f998cc9e9ee812d1955aea406bc08d566ca4188c7 | def submitSplApp(spl_main_composite, spl_main_project_dir, streaming_service_name, service_credentials_filename, spl_params=None, dep_toolkits_list=None, job_name=None, job_group=None, data_directory=None):
'\n :param spl_main_composite: Must contain the namespace and main composite name i.e. com.ibm.streams::MainApp\n :param spl_main_project_dir: The Streams application project directory\n :param streaming_service_name: Name of the IBM® Cloud Streaming service\n :param service_credentials_filename: File containing the JSON of IBM® Cloud Streaming service credentials\n :param spl_params: SPL parameters dictionary loaded from file with json array as following\n [ { "name": "param1", "type": "rstring", "value": "paramValue1"},...]\n :param dep_toolkits_list: List of toolkits of dependencies\n :param job_name: Job name to appear in Streams console\n :param job_group: Job group, this must exist in the Streams instance to successfully submit\n :param data_directory: Application data directory\n :return: SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`\n constant passed as `ctxtype`.\n '
topo = streamsx.topology.topology.Topology(spl_main_composite.split('::')[(- 1)])
try:
credentials = json.load(open(service_credentials_filename))
except Exception as err:
print('ERROR : While processing service_credentials_filename : ', service_credentials_filename)
print('Run-time error ', err)
sys.exit(3)
vs = {'streaming-analytics': [{'name': streaming_service_name, 'credentials': credentials}]}
cfg = {}
cfg[streamsx.topology.context.ConfigParams.VCAP_SERVICES] = vs
cfg[streamsx.topology.context.ConfigParams.SERVICE_NAME] = streaming_service_name
job_config = streamsx.topology.context.JobConfig(job_name=job_name, job_group=job_group, data_directory=data_directory)
job_config.add(cfg)
streamsx.spl.toolkit.add_toolkit(topo, spl_main_project_dir)
if (dep_toolkits_list is not None):
for toolkit in dep_toolkits_list:
streamsx.spl.toolkit.add_toolkit(topo, toolkit)
splMain = streamsx.spl.op.Invoke(topo, spl_main_composite, params=spl_params)
ctx = streamsx.topology.context.submit('STREAMING_ANALYTICS_SERVICE', topo, config=cfg)
print('Submitted job to service:', streaming_service_name)
return ctx | :param spl_main_composite: Must contain the namespace and main composite name i.e. com.ibm.streams::MainApp
:param spl_main_project_dir: The Streams application project directory
:param streaming_service_name: Name of the IBM® Cloud Streaming service
:param service_credentials_filename: File containing the JSON of IBM® Cloud Streaming service credentials
:param spl_params: SPL parameters dictionary loaded from file with json array as following
[ { "name": "param1", "type": "rstring", "value": "paramValue1"},...]
:param dep_toolkits_list: List of toolkits of dependencies
:param job_name: Job name to appear in Streams console
:param job_group: Job group, this must exist in the Streams instance to successfully submit
:param data_directory: Application data directory
:return: SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`
constant passed as `ctxtype`. | scripts/submitSPL.py | submitSplApp | IBMStreams/streamsx.clickstream | 0 | python | def submitSplApp(spl_main_composite, spl_main_project_dir, streaming_service_name, service_credentials_filename, spl_params=None, dep_toolkits_list=None, job_name=None, job_group=None, data_directory=None):
'\n :param spl_main_composite: Must contain the namespace and main composite name i.e. com.ibm.streams::MainApp\n :param spl_main_project_dir: The Streams application project directory\n :param streaming_service_name: Name of the IBM® Cloud Streaming service\n :param service_credentials_filename: File containing the JSON of IBM® Cloud Streaming service credentials\n :param spl_params: SPL parameters dictionary loaded from file with json array as following\n [ { "name": "param1", "type": "rstring", "value": "paramValue1"},...]\n :param dep_toolkits_list: List of toolkits of dependencies\n :param job_name: Job name to appear in Streams console\n :param job_group: Job group, this must exist in the Streams instance to successfully submit\n :param data_directory: Application data directory\n :return: SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`\n constant passed as `ctxtype`.\n '
topo = streamsx.topology.topology.Topology(spl_main_composite.split('::')[(- 1)])
try:
credentials = json.load(open(service_credentials_filename))
except Exception as err:
print('ERROR : While processing service_credentials_filename : ', service_credentials_filename)
print('Run-time error ', err)
sys.exit(3)
vs = {'streaming-analytics': [{'name': streaming_service_name, 'credentials': credentials}]}
cfg = {}
cfg[streamsx.topology.context.ConfigParams.VCAP_SERVICES] = vs
cfg[streamsx.topology.context.ConfigParams.SERVICE_NAME] = streaming_service_name
job_config = streamsx.topology.context.JobConfig(job_name=job_name, job_group=job_group, data_directory=data_directory)
job_config.add(cfg)
streamsx.spl.toolkit.add_toolkit(topo, spl_main_project_dir)
if (dep_toolkits_list is not None):
for toolkit in dep_toolkits_list:
streamsx.spl.toolkit.add_toolkit(topo, toolkit)
splMain = streamsx.spl.op.Invoke(topo, spl_main_composite, params=spl_params)
ctx = streamsx.topology.context.submit('STREAMING_ANALYTICS_SERVICE', topo, config=cfg)
print('Submitted job to service:', streaming_service_name)
return ctx | def submitSplApp(spl_main_composite, spl_main_project_dir, streaming_service_name, service_credentials_filename, spl_params=None, dep_toolkits_list=None, job_name=None, job_group=None, data_directory=None):
'\n :param spl_main_composite: Must contain the namespace and main composite name i.e. com.ibm.streams::MainApp\n :param spl_main_project_dir: The Streams application project directory\n :param streaming_service_name: Name of the IBM® Cloud Streaming service\n :param service_credentials_filename: File containing the JSON of IBM® Cloud Streaming service credentials\n :param spl_params: SPL parameters dictionary loaded from file with json array as following\n [ { "name": "param1", "type": "rstring", "value": "paramValue1"},...]\n :param dep_toolkits_list: List of toolkits of dependencies\n :param job_name: Job name to appear in Streams console\n :param job_group: Job group, this must exist in the Streams instance to successfully submit\n :param data_directory: Application data directory\n :return: SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`\n constant passed as `ctxtype`.\n '
topo = streamsx.topology.topology.Topology(spl_main_composite.split('::')[(- 1)])
try:
credentials = json.load(open(service_credentials_filename))
except Exception as err:
print('ERROR : While processing service_credentials_filename : ', service_credentials_filename)
print('Run-time error ', err)
sys.exit(3)
vs = {'streaming-analytics': [{'name': streaming_service_name, 'credentials': credentials}]}
cfg = {}
cfg[streamsx.topology.context.ConfigParams.VCAP_SERVICES] = vs
cfg[streamsx.topology.context.ConfigParams.SERVICE_NAME] = streaming_service_name
job_config = streamsx.topology.context.JobConfig(job_name=job_name, job_group=job_group, data_directory=data_directory)
job_config.add(cfg)
streamsx.spl.toolkit.add_toolkit(topo, spl_main_project_dir)
if (dep_toolkits_list is not None):
for toolkit in dep_toolkits_list:
streamsx.spl.toolkit.add_toolkit(topo, toolkit)
splMain = streamsx.spl.op.Invoke(topo, spl_main_composite, params=spl_params)
ctx = streamsx.topology.context.submit('STREAMING_ANALYTICS_SERVICE', topo, config=cfg)
print('Submitted job to service:', streaming_service_name)
return ctx<|docstring|>:param spl_main_composite: Must contain the namespace and main composite name i.e. com.ibm.streams::MainApp
:param spl_main_project_dir: The Streams application project directory
:param streaming_service_name: Name of the IBM® Cloud Streaming service
:param service_credentials_filename: File containing the JSON of IBM® Cloud Streaming service credentials
:param spl_params: SPL parameters dictionary loaded from file with json array as following
[ { "name": "param1", "type": "rstring", "value": "paramValue1"},...]
:param dep_toolkits_list: List of toolkits of dependencies
:param job_name: Job name to appear in Streams console
:param job_group: Job group, this must exist in the Streams instance to successfully submit
:param data_directory: Application data directory
:return: SubmissionResult: Result of the submission. For details of what is contained see the :py:class:`ContextTypes`
constant passed as `ctxtype`.<|endoftext|> |
72ba5a666ca620dfd0681f64ea16b84251344df30d34031f5a48a8540ecea243 | @list_route(methods=['post'], url_path='search')
@params_valid(serializer=SearchSerializer)
def search(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/search/ 条件查询接口,获得数据地图树形结构\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":591,\n "project_id":1,\n "tag_ids":["system","login"],\n "keyword":"login"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "result": true,\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": 3\n }\n '
index_name_list = ['dataset_count']
(tree_list, virtual_data_mart_node, other_node) = dmaction.datamap_search_dgraph(params, connections['bkdata_basic_slave'], index_name_list)
data_mart_root = tree_list[0]
root_sub_list = data_mart_root['sub_list']
show_list = []
if tree_list:
for tree_root in root_sub_list:
sub_list = tree_root['sub_list']
for sub_dict in sub_list:
sub_dict[dmaction.VIRTUAL_DM_POS_FIELD] = tree_root['seq_index']
show_list.extend(sub_list)
dmaction.delete_not_kpath_node(show_list)
dmaction.add_virtual_other_node(show_list)
for show_dict in show_list:
dmaction.add_dm_pos_field(show_dict, show_dict[dmaction.VIRTUAL_DM_POS_FIELD])
for show_dict in show_list:
dmaction.add_loc_field(show_dict, show_dict.get(dmaction.DATAMAP_LOC_FIELD))
show_list.sort(key=(lambda l: (l['loc'], l['datamap_seq_index'])), reverse=False)
other_node[dmaction.VIRTUAL_DM_POS_FIELD] = 2
other_node[dmaction.DATAMAP_LOC_FIELD] = 0
show_list.append(other_node)
virtual_data_mart_node['sub_list'] = show_list
virtual_data_mart_node[dmaction.VIRTUAL_DM_POS_FIELD] = 0
virtual_data_mart_node[dmaction.DATAMAP_LOC_FIELD] = (- 1)
return Response([virtual_data_mart_node]) | @api {post} /datamanage/datamap/retrieve/search/ 条件查询接口,获得数据地图树形结构
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":591,
"project_id":1,
"tag_ids":["system","login"],
"keyword":"login"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"errors": null,
"message": "ok",
"code": "1500200",
"data": 3
} | src/api/datamanage/pro/datamap/views.py | search | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='search')
@params_valid(serializer=SearchSerializer)
def search(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/search/ 条件查询接口,获得数据地图树形结构\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":591,\n "project_id":1,\n "tag_ids":["system","login"],\n "keyword":"login"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "result": true,\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": 3\n }\n '
index_name_list = ['dataset_count']
(tree_list, virtual_data_mart_node, other_node) = dmaction.datamap_search_dgraph(params, connections['bkdata_basic_slave'], index_name_list)
data_mart_root = tree_list[0]
root_sub_list = data_mart_root['sub_list']
show_list = []
if tree_list:
for tree_root in root_sub_list:
sub_list = tree_root['sub_list']
for sub_dict in sub_list:
sub_dict[dmaction.VIRTUAL_DM_POS_FIELD] = tree_root['seq_index']
show_list.extend(sub_list)
dmaction.delete_not_kpath_node(show_list)
dmaction.add_virtual_other_node(show_list)
for show_dict in show_list:
dmaction.add_dm_pos_field(show_dict, show_dict[dmaction.VIRTUAL_DM_POS_FIELD])
for show_dict in show_list:
dmaction.add_loc_field(show_dict, show_dict.get(dmaction.DATAMAP_LOC_FIELD))
show_list.sort(key=(lambda l: (l['loc'], l['datamap_seq_index'])), reverse=False)
other_node[dmaction.VIRTUAL_DM_POS_FIELD] = 2
other_node[dmaction.DATAMAP_LOC_FIELD] = 0
show_list.append(other_node)
virtual_data_mart_node['sub_list'] = show_list
virtual_data_mart_node[dmaction.VIRTUAL_DM_POS_FIELD] = 0
virtual_data_mart_node[dmaction.DATAMAP_LOC_FIELD] = (- 1)
return Response([virtual_data_mart_node]) | @list_route(methods=['post'], url_path='search')
@params_valid(serializer=SearchSerializer)
def search(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/search/ 条件查询接口,获得数据地图树形结构\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":591,\n "project_id":1,\n "tag_ids":["system","login"],\n "keyword":"login"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "result": true,\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": 3\n }\n '
index_name_list = ['dataset_count']
(tree_list, virtual_data_mart_node, other_node) = dmaction.datamap_search_dgraph(params, connections['bkdata_basic_slave'], index_name_list)
data_mart_root = tree_list[0]
root_sub_list = data_mart_root['sub_list']
show_list = []
if tree_list:
for tree_root in root_sub_list:
sub_list = tree_root['sub_list']
for sub_dict in sub_list:
sub_dict[dmaction.VIRTUAL_DM_POS_FIELD] = tree_root['seq_index']
show_list.extend(sub_list)
dmaction.delete_not_kpath_node(show_list)
dmaction.add_virtual_other_node(show_list)
for show_dict in show_list:
dmaction.add_dm_pos_field(show_dict, show_dict[dmaction.VIRTUAL_DM_POS_FIELD])
for show_dict in show_list:
dmaction.add_loc_field(show_dict, show_dict.get(dmaction.DATAMAP_LOC_FIELD))
show_list.sort(key=(lambda l: (l['loc'], l['datamap_seq_index'])), reverse=False)
other_node[dmaction.VIRTUAL_DM_POS_FIELD] = 2
other_node[dmaction.DATAMAP_LOC_FIELD] = 0
show_list.append(other_node)
virtual_data_mart_node['sub_list'] = show_list
virtual_data_mart_node[dmaction.VIRTUAL_DM_POS_FIELD] = 0
virtual_data_mart_node[dmaction.DATAMAP_LOC_FIELD] = (- 1)
return Response([virtual_data_mart_node])<|docstring|>@api {post} /datamanage/datamap/retrieve/search/ 条件查询接口,获得数据地图树形结构
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":591,
"project_id":1,
"tag_ids":["system","login"],
"keyword":"login"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"errors": null,
"message": "ok",
"code": "1500200",
"data": 3
}<|endoftext|> |
913a4897648c32f4c5fe804f9534a68048f9bdf7b046296c755f1265110e4213 | @list_route(methods=['post'], url_path='get_basic_info')
@params_valid(serializer=BasicInfoSerializer)
def get_basic_info(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_basic_info/ 获取基础信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_info\n\n @apiParam {String} tag_code 标签名称\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "tag_code":"online",\n "me_type":"tag",\n "has_standard":1,\n "cal_type":["standard","only_standard"]\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "standard_dataset_count": 274,\n "bk_biz_count": 119,\n "data_source_count": 0,\n "dataset_count": 274,\n "project_list": [\n 4172\n ],\n "bk_biz_list": [\n 100160,\n 730,\n 1123\n ]\n },\n "result": true\n }\n '
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_FLOATING_DETAIL)
return Response(result_dict) | @api {post} /datamanage/datamap/retrieve/get_basic_info/ 获取基础信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_info
@apiParam {String} tag_code 标签名称
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"has_standard":1,
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"standard_dataset_count": 274,
"bk_biz_count": 119,
"data_source_count": 0,
"dataset_count": 274,
"project_list": [
4172
],
"bk_biz_list": [
100160,
730,
1123
]
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | get_basic_info | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='get_basic_info')
@params_valid(serializer=BasicInfoSerializer)
def get_basic_info(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_basic_info/ 获取基础信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_info\n\n @apiParam {String} tag_code 标签名称\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "has_standard":1,\n "cal_type":["standard","only_standard"]\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "standard_dataset_count": 274,\n "bk_biz_count": 119,\n "data_source_count": 0,\n "dataset_count": 274,\n "project_list": [\n 4172\n ],\n "bk_biz_list": [\n 100160,\n 730,\n 1123\n ]\n },\n "result": true\n }\n '
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_FLOATING_DETAIL)
return Response(result_dict) | @list_route(methods=['post'], url_path='get_basic_info')
@params_valid(serializer=BasicInfoSerializer)
def get_basic_info(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_basic_info/ 获取基础信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_info\n\n @apiParam {String} tag_code 标签名称\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "has_standard":1,\n "cal_type":["standard","only_standard"]\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "standard_dataset_count": 274,\n "bk_biz_count": 119,\n "data_source_count": 0,\n "dataset_count": 274,\n "project_list": [\n 4172\n ],\n "bk_biz_list": [\n 100160,\n 730,\n 1123\n ]\n },\n "result": true\n }\n '
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_FLOATING_DETAIL)
return Response(result_dict)<|docstring|>@api {post} /datamanage/datamap/retrieve/get_basic_info/ 获取基础信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_info
@apiParam {String} tag_code 标签名称
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"has_standard":1,
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"standard_dataset_count": 274,
"bk_biz_count": 119,
"data_source_count": 0,
"dataset_count": 274,
"project_list": [
4172
],
"bk_biz_list": [
100160,
730,
1123
]
},
"result": true
}<|endoftext|> |
9bdfbeb8dca8470dc52df661ebb3d45f9798f0b7b158c53b8be4e721f78e1ec2 | @list_route(methods=['post'], url_path='search_summary')
@params_valid(serializer=SearchSerializer)
def search_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/search_summary/ 查询汇总信息接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "cal_type":["standard","only_standard"]\n }\n\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_details": [\n {\n "dataset_count": 0,\n "day": "20190719"\n },\n {\n "dataset_count": 0,\n "day": "20190720"\n },\n {\n "dataset_count": 0,\n "day": "20190721"\n },\n {\n "dataset_count": 0,\n "day": "20190722"\n },\n {\n "dataset_count": 0,\n "day": "20190723"\n },\n {\n "dataset_count": 0,\n "day": "20190724"\n },\n {\n "dataset_count": 0,\n "day": "20190725"\n }\n ],\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "recent_standard_dataset_count_details": [\n {\n "standard_dataset_count": 0,\n "day": "20190719"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190720"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190721"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190722"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190723"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190724"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190725"\n }\n ],\n "data_source_count": 0,\n "recent_data_source_count_details": [\n {\n "day": "20190719",\n "data_source_count": 0\n },\n {\n "day": "20190720",\n "data_source_count": 0\n },\n {\n "day": "20190721",\n "data_source_count": 0\n },\n {\n "day": "20190722",\n "data_source_count": 0\n },\n {\n "day": "20190723",\n "data_source_count": 0\n },\n {\n "day": "20190724",\n "data_source_count": 0\n },\n {\n "day": "20190725",\n "data_source_count": 0\n }\n ],\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
result_dict = dmaction.search_summary_dgraph(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @api {post} /datamanage/datamap/retrieve/search_summary/ 查询汇总信息接口
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search_summary
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"recent_dataset_count_details": [
{
"dataset_count": 0,
"day": "20190719"
},
{
"dataset_count": 0,
"day": "20190720"
},
{
"dataset_count": 0,
"day": "20190721"
},
{
"dataset_count": 0,
"day": "20190722"
},
{
"dataset_count": 0,
"day": "20190723"
},
{
"dataset_count": 0,
"day": "20190724"
},
{
"dataset_count": 0,
"day": "20190725"
}
],
"recent_dataset_count_sum": 0,
"standard_dataset_count": 646,
"bk_biz_count": 196,
"recent_standard_dataset_count_details": [
{
"standard_dataset_count": 0,
"day": "20190719"
},
{
"standard_dataset_count": 0,
"day": "20190720"
},
{
"standard_dataset_count": 0,
"day": "20190721"
},
{
"standard_dataset_count": 0,
"day": "20190722"
},
{
"standard_dataset_count": 0,
"day": "20190723"
},
{
"standard_dataset_count": 0,
"day": "20190724"
},
{
"standard_dataset_count": 0,
"day": "20190725"
}
],
"data_source_count": 0,
"recent_data_source_count_details": [
{
"day": "20190719",
"data_source_count": 0
},
{
"day": "20190720",
"data_source_count": 0
},
{
"day": "20190721",
"data_source_count": 0
},
{
"day": "20190722",
"data_source_count": 0
},
{
"day": "20190723",
"data_source_count": 0
},
{
"day": "20190724",
"data_source_count": 0
},
{
"day": "20190725",
"data_source_count": 0
}
],
"dataset_count": 646,
"recent_standard_dataset_count_sum": 0,
"recent_data_source_count_sum": 0
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | search_summary | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='search_summary')
@params_valid(serializer=SearchSerializer)
def search_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/search_summary/ 查询汇总信息接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "cal_type":["standard","only_standard"]\n }\n\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_details": [\n {\n "dataset_count": 0,\n "day": "20190719"\n },\n {\n "dataset_count": 0,\n "day": "20190720"\n },\n {\n "dataset_count": 0,\n "day": "20190721"\n },\n {\n "dataset_count": 0,\n "day": "20190722"\n },\n {\n "dataset_count": 0,\n "day": "20190723"\n },\n {\n "dataset_count": 0,\n "day": "20190724"\n },\n {\n "dataset_count": 0,\n "day": "20190725"\n }\n ],\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "recent_standard_dataset_count_details": [\n {\n "standard_dataset_count": 0,\n "day": "20190719"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190720"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190721"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190722"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190723"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190724"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190725"\n }\n ],\n "data_source_count": 0,\n "recent_data_source_count_details": [\n {\n "day": "20190719",\n "data_source_count": 0\n },\n {\n "day": "20190720",\n "data_source_count": 0\n },\n {\n "day": "20190721",\n "data_source_count": 0\n },\n {\n "day": "20190722",\n "data_source_count": 0\n },\n {\n "day": "20190723",\n "data_source_count": 0\n },\n {\n "day": "20190724",\n "data_source_count": 0\n },\n {\n "day": "20190725",\n "data_source_count": 0\n }\n ],\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
result_dict = dmaction.search_summary_dgraph(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @list_route(methods=['post'], url_path='search_summary')
@params_valid(serializer=SearchSerializer)
def search_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/search_summary/ 查询汇总信息接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "cal_type":["standard","only_standard"]\n }\n\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_details": [\n {\n "dataset_count": 0,\n "day": "20190719"\n },\n {\n "dataset_count": 0,\n "day": "20190720"\n },\n {\n "dataset_count": 0,\n "day": "20190721"\n },\n {\n "dataset_count": 0,\n "day": "20190722"\n },\n {\n "dataset_count": 0,\n "day": "20190723"\n },\n {\n "dataset_count": 0,\n "day": "20190724"\n },\n {\n "dataset_count": 0,\n "day": "20190725"\n }\n ],\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "recent_standard_dataset_count_details": [\n {\n "standard_dataset_count": 0,\n "day": "20190719"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190720"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190721"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190722"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190723"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190724"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190725"\n }\n ],\n "data_source_count": 0,\n "recent_data_source_count_details": [\n {\n "day": "20190719",\n "data_source_count": 0\n },\n {\n "day": "20190720",\n "data_source_count": 0\n },\n {\n "day": "20190721",\n "data_source_count": 0\n },\n {\n "day": "20190722",\n "data_source_count": 0\n },\n {\n "day": "20190723",\n "data_source_count": 0\n },\n {\n "day": "20190724",\n "data_source_count": 0\n },\n {\n "day": "20190725",\n "data_source_count": 0\n }\n ],\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
result_dict = dmaction.search_summary_dgraph(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict)<|docstring|>@api {post} /datamanage/datamap/retrieve/search_summary/ 查询汇总信息接口
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search_summary
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"recent_dataset_count_details": [
{
"dataset_count": 0,
"day": "20190719"
},
{
"dataset_count": 0,
"day": "20190720"
},
{
"dataset_count": 0,
"day": "20190721"
},
{
"dataset_count": 0,
"day": "20190722"
},
{
"dataset_count": 0,
"day": "20190723"
},
{
"dataset_count": 0,
"day": "20190724"
},
{
"dataset_count": 0,
"day": "20190725"
}
],
"recent_dataset_count_sum": 0,
"standard_dataset_count": 646,
"bk_biz_count": 196,
"recent_standard_dataset_count_details": [
{
"standard_dataset_count": 0,
"day": "20190719"
},
{
"standard_dataset_count": 0,
"day": "20190720"
},
{
"standard_dataset_count": 0,
"day": "20190721"
},
{
"standard_dataset_count": 0,
"day": "20190722"
},
{
"standard_dataset_count": 0,
"day": "20190723"
},
{
"standard_dataset_count": 0,
"day": "20190724"
},
{
"standard_dataset_count": 0,
"day": "20190725"
}
],
"data_source_count": 0,
"recent_data_source_count_details": [
{
"day": "20190719",
"data_source_count": 0
},
{
"day": "20190720",
"data_source_count": 0
},
{
"day": "20190721",
"data_source_count": 0
},
{
"day": "20190722",
"data_source_count": 0
},
{
"day": "20190723",
"data_source_count": 0
},
{
"day": "20190724",
"data_source_count": 0
},
{
"day": "20190725",
"data_source_count": 0
}
],
"dataset_count": 646,
"recent_standard_dataset_count_sum": 0,
"recent_data_source_count_sum": 0
},
"result": true
}<|endoftext|> |
f5eb2bda1281dd2234148529b8fef9cda4dd16afc5e259dde01fbb9bcf592ba4 | @list_route(methods=['post'], url_path='datamap_summary')
@params_valid(serializer=BasicListSerializer)
def datamap_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/datamap_summary/ 查询汇总信息接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "cal_type":["standard","only_standard"]\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "data_source_count": 0,\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
platform = params.get('platform', 'all')
if (platform == 'tdw'):
result_dict = DatamanageApi.get_data_dict_count(params).data
result_dict['standard_dataset_count'] = 0
return Response(result_dict)
result_dict = dmaction.datamap_summary(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @api {post} /datamanage/datamap/retrieve/datamap_summary/ 查询汇总信息接口
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search_summary
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"recent_dataset_count_sum": 0,
"standard_dataset_count": 646,
"bk_biz_count": 196,
"data_source_count": 0,
"dataset_count": 646,
"recent_standard_dataset_count_sum": 0,
"recent_data_source_count_sum": 0
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | datamap_summary | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='datamap_summary')
@params_valid(serializer=BasicListSerializer)
def datamap_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/datamap_summary/ 查询汇总信息接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "cal_type":["standard","only_standard"]\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "data_source_count": 0,\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
platform = params.get('platform', 'all')
if (platform == 'tdw'):
result_dict = DatamanageApi.get_data_dict_count(params).data
result_dict['standard_dataset_count'] = 0
return Response(result_dict)
result_dict = dmaction.datamap_summary(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @list_route(methods=['post'], url_path='datamap_summary')
@params_valid(serializer=BasicListSerializer)
def datamap_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/datamap_summary/ 查询汇总信息接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "cal_type":["standard","only_standard"]\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "data_source_count": 0,\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
platform = params.get('platform', 'all')
if (platform == 'tdw'):
result_dict = DatamanageApi.get_data_dict_count(params).data
result_dict['standard_dataset_count'] = 0
return Response(result_dict)
result_dict = dmaction.datamap_summary(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict)<|docstring|>@api {post} /datamanage/datamap/retrieve/datamap_summary/ 查询汇总信息接口
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search_summary
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"recent_dataset_count_sum": 0,
"standard_dataset_count": 646,
"bk_biz_count": 196,
"data_source_count": 0,
"dataset_count": 646,
"recent_standard_dataset_count_sum": 0,
"recent_data_source_count_sum": 0
},
"result": true
}<|endoftext|> |
69aaf7c61ab766d46864060258abec561419537aa49a5510baea0cf7de2c6cdd | @list_route(methods=['post'], url_path='datamap_recent_summary')
@params_valid(serializer=SearchSerializer)
def datamap_recent_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/datamap_recent_summary/ 查询汇总最近7天新增数据量接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "cal_type":["standard","only_standard"]\n }\n\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_details": [\n {\n "dataset_count": 0,\n "day": "20190719"\n },\n {\n "dataset_count": 0,\n "day": "20190720"\n },\n {\n "dataset_count": 0,\n "day": "20190721"\n },\n {\n "dataset_count": 0,\n "day": "20190722"\n },\n {\n "dataset_count": 0,\n "day": "20190723"\n },\n {\n "dataset_count": 0,\n "day": "20190724"\n },\n {\n "dataset_count": 0,\n "day": "20190725"\n }\n ],\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "recent_standard_dataset_count_details": [\n {\n "standard_dataset_count": 0,\n "day": "20190719"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190720"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190721"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190722"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190723"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190724"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190725"\n }\n ],\n "data_source_count": 0,\n "recent_data_source_count_details": [\n {\n "day": "20190719",\n "data_source_count": 0\n },\n {\n "day": "20190720",\n "data_source_count": 0\n },\n {\n "day": "20190721",\n "data_source_count": 0\n },\n {\n "day": "20190722",\n "data_source_count": 0\n },\n {\n "day": "20190723",\n "data_source_count": 0\n },\n {\n "day": "20190724",\n "data_source_count": 0\n },\n {\n "day": "20190725",\n "data_source_count": 0\n }\n ],\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
result_dict = dmaction.datamap_recent_summary(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @api {post} /datamanage/datamap/retrieve/datamap_recent_summary/ 查询汇总最近7天新增数据量接口
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search_summary
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"recent_dataset_count_details": [
{
"dataset_count": 0,
"day": "20190719"
},
{
"dataset_count": 0,
"day": "20190720"
},
{
"dataset_count": 0,
"day": "20190721"
},
{
"dataset_count": 0,
"day": "20190722"
},
{
"dataset_count": 0,
"day": "20190723"
},
{
"dataset_count": 0,
"day": "20190724"
},
{
"dataset_count": 0,
"day": "20190725"
}
],
"recent_dataset_count_sum": 0,
"standard_dataset_count": 646,
"bk_biz_count": 196,
"recent_standard_dataset_count_details": [
{
"standard_dataset_count": 0,
"day": "20190719"
},
{
"standard_dataset_count": 0,
"day": "20190720"
},
{
"standard_dataset_count": 0,
"day": "20190721"
},
{
"standard_dataset_count": 0,
"day": "20190722"
},
{
"standard_dataset_count": 0,
"day": "20190723"
},
{
"standard_dataset_count": 0,
"day": "20190724"
},
{
"standard_dataset_count": 0,
"day": "20190725"
}
],
"data_source_count": 0,
"recent_data_source_count_details": [
{
"day": "20190719",
"data_source_count": 0
},
{
"day": "20190720",
"data_source_count": 0
},
{
"day": "20190721",
"data_source_count": 0
},
{
"day": "20190722",
"data_source_count": 0
},
{
"day": "20190723",
"data_source_count": 0
},
{
"day": "20190724",
"data_source_count": 0
},
{
"day": "20190725",
"data_source_count": 0
}
],
"dataset_count": 646,
"recent_standard_dataset_count_sum": 0,
"recent_data_source_count_sum": 0
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | datamap_recent_summary | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='datamap_recent_summary')
@params_valid(serializer=SearchSerializer)
def datamap_recent_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/datamap_recent_summary/ 查询汇总最近7天新增数据量接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "cal_type":["standard","only_standard"]\n }\n\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_details": [\n {\n "dataset_count": 0,\n "day": "20190719"\n },\n {\n "dataset_count": 0,\n "day": "20190720"\n },\n {\n "dataset_count": 0,\n "day": "20190721"\n },\n {\n "dataset_count": 0,\n "day": "20190722"\n },\n {\n "dataset_count": 0,\n "day": "20190723"\n },\n {\n "dataset_count": 0,\n "day": "20190724"\n },\n {\n "dataset_count": 0,\n "day": "20190725"\n }\n ],\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "recent_standard_dataset_count_details": [\n {\n "standard_dataset_count": 0,\n "day": "20190719"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190720"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190721"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190722"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190723"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190724"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190725"\n }\n ],\n "data_source_count": 0,\n "recent_data_source_count_details": [\n {\n "day": "20190719",\n "data_source_count": 0\n },\n {\n "day": "20190720",\n "data_source_count": 0\n },\n {\n "day": "20190721",\n "data_source_count": 0\n },\n {\n "day": "20190722",\n "data_source_count": 0\n },\n {\n "day": "20190723",\n "data_source_count": 0\n },\n {\n "day": "20190724",\n "data_source_count": 0\n },\n {\n "day": "20190725",\n "data_source_count": 0\n }\n ],\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
result_dict = dmaction.datamap_recent_summary(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @list_route(methods=['post'], url_path='datamap_recent_summary')
@params_valid(serializer=SearchSerializer)
def datamap_recent_summary(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/datamap_recent_summary/ 查询汇总最近7天新增数据量接口\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName search_summary\n\n @apiParam {Integer} bk_biz_id 业务\n @apiParam {Integer} project_id 项目\n @apiParam {List} tag_ids 标签code列表\n @apiParam {String} keyword 搜索关键词\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "cal_type":["standard","only_standard"]\n }\n\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "recent_dataset_count_details": [\n {\n "dataset_count": 0,\n "day": "20190719"\n },\n {\n "dataset_count": 0,\n "day": "20190720"\n },\n {\n "dataset_count": 0,\n "day": "20190721"\n },\n {\n "dataset_count": 0,\n "day": "20190722"\n },\n {\n "dataset_count": 0,\n "day": "20190723"\n },\n {\n "dataset_count": 0,\n "day": "20190724"\n },\n {\n "dataset_count": 0,\n "day": "20190725"\n }\n ],\n "recent_dataset_count_sum": 0,\n "standard_dataset_count": 646,\n "bk_biz_count": 196,\n "recent_standard_dataset_count_details": [\n {\n "standard_dataset_count": 0,\n "day": "20190719"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190720"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190721"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190722"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190723"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190724"\n },\n {\n "standard_dataset_count": 0,\n "day": "20190725"\n }\n ],\n "data_source_count": 0,\n "recent_data_source_count_details": [\n {\n "day": "20190719",\n "data_source_count": 0\n },\n {\n "day": "20190720",\n "data_source_count": 0\n },\n {\n "day": "20190721",\n "data_source_count": 0\n },\n {\n "day": "20190722",\n "data_source_count": 0\n },\n {\n "day": "20190723",\n "data_source_count": 0\n },\n {\n "day": "20190724",\n "data_source_count": 0\n },\n {\n "day": "20190725",\n "data_source_count": 0\n }\n ],\n "dataset_count": 646,\n "recent_standard_dataset_count_sum": 0,\n "recent_data_source_count_sum": 0\n },\n "result": true\n }\n '
result_dict = dmaction.datamap_recent_summary(params, connections['bkdata_basic_slave'])
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict)<|docstring|>@api {post} /datamanage/datamap/retrieve/datamap_recent_summary/ 查询汇总最近7天新增数据量接口
@apiVersion 0.1.0
@apiGroup DataMap
@apiName search_summary
@apiParam {Integer} bk_biz_id 业务
@apiParam {Integer} project_id 项目
@apiParam {List} tag_ids 标签code列表
@apiParam {String} keyword 搜索关键词
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"cal_type":["standard","only_standard"]
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"recent_dataset_count_details": [
{
"dataset_count": 0,
"day": "20190719"
},
{
"dataset_count": 0,
"day": "20190720"
},
{
"dataset_count": 0,
"day": "20190721"
},
{
"dataset_count": 0,
"day": "20190722"
},
{
"dataset_count": 0,
"day": "20190723"
},
{
"dataset_count": 0,
"day": "20190724"
},
{
"dataset_count": 0,
"day": "20190725"
}
],
"recent_dataset_count_sum": 0,
"standard_dataset_count": 646,
"bk_biz_count": 196,
"recent_standard_dataset_count_details": [
{
"standard_dataset_count": 0,
"day": "20190719"
},
{
"standard_dataset_count": 0,
"day": "20190720"
},
{
"standard_dataset_count": 0,
"day": "20190721"
},
{
"standard_dataset_count": 0,
"day": "20190722"
},
{
"standard_dataset_count": 0,
"day": "20190723"
},
{
"standard_dataset_count": 0,
"day": "20190724"
},
{
"standard_dataset_count": 0,
"day": "20190725"
}
],
"data_source_count": 0,
"recent_data_source_count_details": [
{
"day": "20190719",
"data_source_count": 0
},
{
"day": "20190720",
"data_source_count": 0
},
{
"day": "20190721",
"data_source_count": 0
},
{
"day": "20190722",
"data_source_count": 0
},
{
"day": "20190723",
"data_source_count": 0
},
{
"day": "20190724",
"data_source_count": 0
},
{
"day": "20190725",
"data_source_count": 0
}
],
"dataset_count": 646,
"recent_standard_dataset_count_sum": 0,
"recent_data_source_count_sum": 0
},
"result": true
}<|endoftext|> |
858392c0315b51875bd4060e7c0dbd3f4eafa049f300d1a686ecc67877305811 | @list_route(methods=['post'], url_path='get_basic_list')
@params_valid(serializer=BasicListSerializer)
def get_basic_list(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_basic_list/ 获取基础列表信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "bk_biz_count": 119,\n "dataset_count": 2,\n "data_source_count": 0,\n "count":2,\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ],\n\n },\n "result": true\n }\n '
extra_retrieve_dict = params.get('extra_retrieve')
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=False, need_all_data_dict_list=True)
dmaction.parse_basic_list_dgraph_result(result_dict)
if extra_retrieve_dict:
dmaction.get_detail_via_erp(result_dict, extra_retrieve_dict)
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @api {post} /datamanage/datamap/retrieve/get_basic_list/ 获取基础列表信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_list
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"bk_biz_count": 119,
"dataset_count": 2,
"data_source_count": 0,
"count":2,
"results": [
{"data_set_type":"result_table",
"data_set_id":"591_durant1115",
"is_standard":0},
{"data_set_type":"raw_data",
"data_set_id":"123",
"is_standard":0}
],
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | get_basic_list | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='get_basic_list')
@params_valid(serializer=BasicListSerializer)
def get_basic_list(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_basic_list/ 获取基础列表信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "bk_biz_count": 119,\n "dataset_count": 2,\n "data_source_count": 0,\n "count":2,\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ],\n\n },\n "result": true\n }\n '
extra_retrieve_dict = params.get('extra_retrieve')
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=False, need_all_data_dict_list=True)
dmaction.parse_basic_list_dgraph_result(result_dict)
if extra_retrieve_dict:
dmaction.get_detail_via_erp(result_dict, extra_retrieve_dict)
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict) | @list_route(methods=['post'], url_path='get_basic_list')
@params_valid(serializer=BasicListSerializer)
def get_basic_list(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_basic_list/ 获取基础列表信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "project_count": 1,\n "bk_biz_count": 119,\n "dataset_count": 2,\n "data_source_count": 0,\n "count":2,\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ],\n\n },\n "result": true\n }\n '
extra_retrieve_dict = params.get('extra_retrieve')
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=False, need_all_data_dict_list=True)
dmaction.parse_basic_list_dgraph_result(result_dict)
if extra_retrieve_dict:
dmaction.get_detail_via_erp(result_dict, extra_retrieve_dict)
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict)<|docstring|>@api {post} /datamanage/datamap/retrieve/get_basic_list/ 获取基础列表信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_list
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"project_count": 1,
"bk_biz_count": 119,
"dataset_count": 2,
"data_source_count": 0,
"count":2,
"results": [
{"data_set_type":"result_table",
"data_set_id":"591_durant1115",
"is_standard":0},
{"data_set_type":"raw_data",
"data_set_id":"123",
"is_standard":0}
],
},
"result": true
}<|endoftext|> |
b99d23d474ad87fa3057a447d527e978bb5012a88ce76c77a48785788683b70f | @list_route(methods=['post'], url_path='get_data_dict_list')
@params_valid(serializer=BasicListSerializer)
def get_data_dict_list(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_data_dict_list/ 获取数据字典列表详情信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ]\n\n },\n "result": true\n }\n '
extra_retrieve_dict = params.get('extra_retrieve')
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=True)
if result_dict.get('data_set_list', []):
if (len(result_dict.get('data_set_list', [])) != 1):
data_set_list = [(each_dataset.get('data_set_id').encode('utf-8') if (each_dataset.get('data_set_type') == 'result_table') else str(each_dataset.get('data_set_id'))) for each_dataset in result_dict.get('data_set_list', [])]
data_set_tuple = tuple(data_set_list)
sql = "select data_set_id from dm_task_detail where active=1 and data_set_id in {}\n and standard_version_id in(select b.id standard_version_id from dm_standard_config a,\n dm_standard_version_config b,tag_target c where b.standard_version_status='online'\n and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1\n and c.tag_code=c.source_tag_code and c.target_type='standard')".format(data_set_tuple)
else:
sql = "select data_set_id from dm_task_detail where active=1 and data_set_id in ('{}')\n and standard_version_id in(select b.id standard_version_id from dm_standard_config a,\n dm_standard_version_config b,tag_target c where b.standard_version_status='online'\n and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1\n and c.tag_code=c.source_tag_code and c.target_type='standard')".format(result_dict.get('data_set_list')[0].get('data_set_id'))
logger.info(('standard sql:%s' % sql))
stan_data_set_list_tmp = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], sql)
stan_data_set_list = [each_dataset['data_set_id'] for each_dataset in stan_data_set_list_tmp]
for each_dataset in result_dict.get('data_set_list', []):
if (each_dataset['data_set_id'] in stan_data_set_list):
each_dataset['is_standard'] = 1
dmaction.parse_basic_list_dgraph_result(result_dict)
if extra_retrieve_dict:
dmaction.get_detail_via_erp(result_dict, extra_retrieve_dict)
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict['results']) | @api {post} /datamanage/datamap/retrieve/get_data_dict_list/ 获取数据字典列表详情信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_list
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"results": [
{"data_set_type":"result_table",
"data_set_id":"591_durant1115",
"is_standard":0},
{"data_set_type":"raw_data",
"data_set_id":"123",
"is_standard":0}
]
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | get_data_dict_list | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='get_data_dict_list')
@params_valid(serializer=BasicListSerializer)
def get_data_dict_list(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_data_dict_list/ 获取数据字典列表详情信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ]\n\n },\n "result": true\n }\n '
extra_retrieve_dict = params.get('extra_retrieve')
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=True)
if result_dict.get('data_set_list', []):
if (len(result_dict.get('data_set_list', [])) != 1):
data_set_list = [(each_dataset.get('data_set_id').encode('utf-8') if (each_dataset.get('data_set_type') == 'result_table') else str(each_dataset.get('data_set_id'))) for each_dataset in result_dict.get('data_set_list', [])]
data_set_tuple = tuple(data_set_list)
sql = "select data_set_id from dm_task_detail where active=1 and data_set_id in {}\n and standard_version_id in(select b.id standard_version_id from dm_standard_config a,\n dm_standard_version_config b,tag_target c where b.standard_version_status='online'\n and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1\n and c.tag_code=c.source_tag_code and c.target_type='standard')".format(data_set_tuple)
else:
sql = "select data_set_id from dm_task_detail where active=1 and data_set_id in ('{}')\n and standard_version_id in(select b.id standard_version_id from dm_standard_config a,\n dm_standard_version_config b,tag_target c where b.standard_version_status='online'\n and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1\n and c.tag_code=c.source_tag_code and c.target_type='standard')".format(result_dict.get('data_set_list')[0].get('data_set_id'))
logger.info(('standard sql:%s' % sql))
stan_data_set_list_tmp = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], sql)
stan_data_set_list = [each_dataset['data_set_id'] for each_dataset in stan_data_set_list_tmp]
for each_dataset in result_dict.get('data_set_list', []):
if (each_dataset['data_set_id'] in stan_data_set_list):
each_dataset['is_standard'] = 1
dmaction.parse_basic_list_dgraph_result(result_dict)
if extra_retrieve_dict:
dmaction.get_detail_via_erp(result_dict, extra_retrieve_dict)
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict['results']) | @list_route(methods=['post'], url_path='get_data_dict_list')
@params_valid(serializer=BasicListSerializer)
def get_data_dict_list(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_data_dict_list/ 获取数据字典列表详情信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ]\n\n },\n "result": true\n }\n '
extra_retrieve_dict = params.get('extra_retrieve')
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=True)
if result_dict.get('data_set_list', []):
if (len(result_dict.get('data_set_list', [])) != 1):
data_set_list = [(each_dataset.get('data_set_id').encode('utf-8') if (each_dataset.get('data_set_type') == 'result_table') else str(each_dataset.get('data_set_id'))) for each_dataset in result_dict.get('data_set_list', [])]
data_set_tuple = tuple(data_set_list)
sql = "select data_set_id from dm_task_detail where active=1 and data_set_id in {}\n and standard_version_id in(select b.id standard_version_id from dm_standard_config a,\n dm_standard_version_config b,tag_target c where b.standard_version_status='online'\n and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1\n and c.tag_code=c.source_tag_code and c.target_type='standard')".format(data_set_tuple)
else:
sql = "select data_set_id from dm_task_detail where active=1 and data_set_id in ('{}')\n and standard_version_id in(select b.id standard_version_id from dm_standard_config a,\n dm_standard_version_config b,tag_target c where b.standard_version_status='online'\n and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1\n and c.tag_code=c.source_tag_code and c.target_type='standard')".format(result_dict.get('data_set_list')[0].get('data_set_id'))
logger.info(('standard sql:%s' % sql))
stan_data_set_list_tmp = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], sql)
stan_data_set_list = [each_dataset['data_set_id'] for each_dataset in stan_data_set_list_tmp]
for each_dataset in result_dict.get('data_set_list', []):
if (each_dataset['data_set_id'] in stan_data_set_list):
each_dataset['is_standard'] = 1
dmaction.parse_basic_list_dgraph_result(result_dict)
if extra_retrieve_dict:
dmaction.get_detail_via_erp(result_dict, extra_retrieve_dict)
dmaction.clear_bk_biz_id_list(result_dict)
return Response(result_dict['results'])<|docstring|>@api {post} /datamanage/datamap/retrieve/get_data_dict_list/ 获取数据字典列表详情信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_list
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"results": [
{"data_set_type":"result_table",
"data_set_id":"591_durant1115",
"is_standard":0},
{"data_set_type":"raw_data",
"data_set_id":"123",
"is_standard":0}
]
},
"result": true
}<|endoftext|> |
98f6eb32f149441a4ef432ce57bc17aa95e00bc706b812b8998488f214bd862c | @list_route(methods=['post'], url_path='get_data_dict_count')
@params_valid(serializer=BasicListSerializer)
def get_data_dict_count(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_data_dict_count/ 获取数据字典列表统计信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":"",\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ]\n\n },\n "result": true\n }\n '
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=False)
dmaction.parse_basic_list_dgraph_result(result_dict)
dmaction.clear_bk_biz_id_list(result_dict)
result_dict.pop('results', None)
return Response(result_dict) | @api {post} /datamanage/datamap/retrieve/get_data_dict_count/ 获取数据字典列表统计信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_list
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"results": [
{"data_set_type":"result_table",
"data_set_id":"591_durant1115",
"is_standard":0},
{"data_set_type":"raw_data",
"data_set_id":"123",
"is_standard":0}
]
},
"result": true
} | src/api/datamanage/pro/datamap/views.py | get_data_dict_count | Chromico/bk-base | 84 | python | @list_route(methods=['post'], url_path='get_data_dict_count')
@params_valid(serializer=BasicListSerializer)
def get_data_dict_count(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_data_dict_count/ 获取数据字典列表统计信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ]\n\n },\n "result": true\n }\n '
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=False)
dmaction.parse_basic_list_dgraph_result(result_dict)
dmaction.clear_bk_biz_id_list(result_dict)
result_dict.pop('results', None)
return Response(result_dict) | @list_route(methods=['post'], url_path='get_data_dict_count')
@params_valid(serializer=BasicListSerializer)
def get_data_dict_count(self, request, params):
'\n @api {post} /datamanage/datamap/retrieve/get_data_dict_count/ 获取数据字典列表统计信息\n @apiVersion 0.1.0\n @apiGroup DataMap\n @apiName get_basic_list\n\n @apiParamExample {json} 参数样例:\n {\n "bk_biz_id":null,\n "project_id":null,\n "tag_ids":[],\n "keyword":,\n "tag_code":"online",\n "me_type":"tag",\n "cal_type":["standard","only_standard"],\n "page":1,\n "page_size":10,\n "data_set_type":"all",//result_table、raw_data\n "created_by":"xiaoming"\n }\n\n @apiSuccessExample Success-Response:\n HTTP/1.1 200 OK\n {\n "errors": null,\n "message": "ok",\n "code": "1500200",\n "data": {\n "results": [\n {"data_set_type":"result_table",\n "data_set_id":"591_durant1115",\n "is_standard":0},\n {"data_set_type":"raw_data",\n "data_set_id":"123",\n "is_standard":0}\n ]\n\n },\n "result": true\n }\n '
params.pop('has_standard', None)
result_dict = dmaction.floating_window_query_dgraph(params, connections['bkdata_basic_slave'], dmaction.NEED_DATA_SET_ID_DETAIL, need_only_uids=False)
dmaction.parse_basic_list_dgraph_result(result_dict)
dmaction.clear_bk_biz_id_list(result_dict)
result_dict.pop('results', None)
return Response(result_dict)<|docstring|>@api {post} /datamanage/datamap/retrieve/get_data_dict_count/ 获取数据字典列表统计信息
@apiVersion 0.1.0
@apiGroup DataMap
@apiName get_basic_list
@apiParamExample {json} 参数样例:
{
"bk_biz_id":null,
"project_id":null,
"tag_ids":[],
"keyword":"",
"tag_code":"online",
"me_type":"tag",
"cal_type":["standard","only_standard"],
"page":1,
"page_size":10,
"data_set_type":"all",//result_table、raw_data
"created_by":"xiaoming"
}
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{
"errors": null,
"message": "ok",
"code": "1500200",
"data": {
"results": [
{"data_set_type":"result_table",
"data_set_id":"591_durant1115",
"is_standard":0},
{"data_set_type":"raw_data",
"data_set_id":"123",
"is_standard":0}
]
},
"result": true
}<|endoftext|> |
2fb53c560f9dfbb5537ad66413829a52eae914b3348e378aea05fc2519d12944 | def reverse_between(head, m, n):
'\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n 1 ≤ m ≤ n ≤ length of list.\n '
if (not head):
return None
dummy = ListNode(0)
dummy.next = head
pp = dummy
pos = 1
while (pos < m):
pp = pp.next
pos += 1
p = pp.next
while (pos < n):
pn = p.next
p.next = pn.next
pn.next = pp.next
pp.next = pn
pos += 1
return dummy.next | :type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
1 ≤ m ≤ n ≤ length of list. | crack-data-structures-and-algorithms/leetcode/python-impl/reverse_linked_list_II_q92.py | reverse_between | Watch-Later/Eureka | 20 | python | def reverse_between(head, m, n):
'\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n 1 ≤ m ≤ n ≤ length of list.\n '
if (not head):
return None
dummy = ListNode(0)
dummy.next = head
pp = dummy
pos = 1
while (pos < m):
pp = pp.next
pos += 1
p = pp.next
while (pos < n):
pn = p.next
p.next = pn.next
pn.next = pp.next
pp.next = pn
pos += 1
return dummy.next | def reverse_between(head, m, n):
'\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n 1 ≤ m ≤ n ≤ length of list.\n '
if (not head):
return None
dummy = ListNode(0)
dummy.next = head
pp = dummy
pos = 1
while (pos < m):
pp = pp.next
pos += 1
p = pp.next
while (pos < n):
pn = p.next
p.next = pn.next
pn.next = pp.next
pp.next = pn
pos += 1
return dummy.next<|docstring|>:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
1 ≤ m ≤ n ≤ length of list.<|endoftext|> |
dc7f4f9b3022f5170362993cc3e8e030c8648cbba3b6bc40c9106c87119fd3af | @classmethod
def api_field_from_django_field(cls, f, default=CharField):
'\n Overrides default field handling to support custom GeometryApiField.\n '
if isinstance(f, GeometryField):
return GeometryApiField
return super(ModelResource, cls).api_field_from_django_field(f, default) | Overrides default field handling to support custom GeometryApiField. | tastypie/contrib/gis/resources.py | api_field_from_django_field | pexip/os-django-tastypie | 1,570 | python | @classmethod
def api_field_from_django_field(cls, f, default=CharField):
'\n \n '
if isinstance(f, GeometryField):
return GeometryApiField
return super(ModelResource, cls).api_field_from_django_field(f, default) | @classmethod
def api_field_from_django_field(cls, f, default=CharField):
'\n \n '
if isinstance(f, GeometryField):
return GeometryApiField
return super(ModelResource, cls).api_field_from_django_field(f, default)<|docstring|>Overrides default field handling to support custom GeometryApiField.<|endoftext|> |
fee3f1e2d8e6f1682ad4cdbbe964950c71fab559ce722354b981f82dbb895d32 | @property
def gyro(self):
'\n x, y, z angular momentum tuple floats, rescaled appropriately for\n range selected in rad/s\n '
raw = self.gyro_raw
return tuple((radians((self.scale * v)) for v in raw)) | x, y, z angular momentum tuple floats, rescaled appropriately for
range selected in rad/s | l3gd20.py | gyro | Abrown111/T17_SLI_Payload | 0 | python | @property
def gyro(self):
'\n x, y, z angular momentum tuple floats, rescaled appropriately for\n range selected in rad/s\n '
raw = self.gyro_raw
return tuple((radians((self.scale * v)) for v in raw)) | @property
def gyro(self):
'\n x, y, z angular momentum tuple floats, rescaled appropriately for\n range selected in rad/s\n '
raw = self.gyro_raw
return tuple((radians((self.scale * v)) for v in raw))<|docstring|>x, y, z angular momentum tuple floats, rescaled appropriately for
range selected in rad/s<|endoftext|> |
1f9f65a73b4335136d2aafe6c20696698b224dedbd6f4e552acf7ac901a754e9 | def write_register(self, register, value):
'\n Update a register with a byte value\n\n :param int register: which device register to write\n :param value: a byte to write\n '
self.buffer[0] = register
self.buffer[1] = value
self.i2c.writeto(self.device_address, self.buffer) | Update a register with a byte value
:param int register: which device register to write
:param value: a byte to write | l3gd20.py | write_register | Abrown111/T17_SLI_Payload | 0 | python | def write_register(self, register, value):
'\n Update a register with a byte value\n\n :param int register: which device register to write\n :param value: a byte to write\n '
self.buffer[0] = register
self.buffer[1] = value
self.i2c.writeto(self.device_address, self.buffer) | def write_register(self, register, value):
'\n Update a register with a byte value\n\n :param int register: which device register to write\n :param value: a byte to write\n '
self.buffer[0] = register
self.buffer[1] = value
self.i2c.writeto(self.device_address, self.buffer)<|docstring|>Update a register with a byte value
:param int register: which device register to write
:param value: a byte to write<|endoftext|> |
8404a20daa967e48c75ae3af5174be44b0a6f9fac625fa8a3afe4532b4f8a33f | def read_register(self, register):
'\n Returns a byte value from a register\n\n :param register: the register to read a byte\n '
self.buffer[0] = register
self.i2c.writeto(self.device_address, self.buffer)
self.i2c.readfrom_into(self.device_address, self.buffer)
return self.buffer[1] | Returns a byte value from a register
:param register: the register to read a byte | l3gd20.py | read_register | Abrown111/T17_SLI_Payload | 0 | python | def read_register(self, register):
'\n Returns a byte value from a register\n\n :param register: the register to read a byte\n '
self.buffer[0] = register
self.i2c.writeto(self.device_address, self.buffer)
self.i2c.readfrom_into(self.device_address, self.buffer)
return self.buffer[1] | def read_register(self, register):
'\n Returns a byte value from a register\n\n :param register: the register to read a byte\n '
self.buffer[0] = register
self.i2c.writeto(self.device_address, self.buffer)
self.i2c.readfrom_into(self.device_address, self.buffer)
return self.buffer[1]<|docstring|>Returns a byte value from a register
:param register: the register to read a byte<|endoftext|> |
08819c73094572f39d68019bcade602b4e8c2ef62401806352a41f2ea0d1becd | @click.option('--release', '-r', required=True, help='Version of Arcus CAPI to run')
@click.command(name='pull')
def pull(release):
" Pull Arcus CAPI from Breqwatr's private repository "
image = f'breqwatr/arcus-capi:{release}'
ecr.pull(image) | Pull Arcus CAPI from Breqwatr's private repository | voithos/cli/service/arcus/capi.py | pull | waqasnazir03/voithos | 3 | python | @click.option('--release', '-r', required=True, help='Version of Arcus CAPI to run')
@click.command(name='pull')
def pull(release):
" "
image = f'breqwatr/arcus-capi:{release}'
ecr.pull(image) | @click.option('--release', '-r', required=True, help='Version of Arcus CAPI to run')
@click.command(name='pull')
def pull(release):
" "
image = f'breqwatr/arcus-capi:{release}'
ecr.pull(image)<|docstring|>Pull Arcus CAPI from Breqwatr's private repository<|endoftext|> |
9d1f7bfc432471d85bbd60b81dfd9adf5fbb70721772e1ff84ab624a988b6a48 | @click.option('--release', '-r', required=True, help='Version of Arcus CAPI to run')
@click.option('--kubeconfig', '-k', required=True, help='Path to kubeconfig file')
@click.option('--openrc', '-o', required=True, help='Path to OpenStack openrc file')
@click.option('--cacert', required=False, help='Optional path to CA certificate file')
@click.command(name='start')
def start(release, kubeconfig, openrc, cacert):
' Launch the arcus-capi service '
click.echo('starting arcus capi')
arcus_capi.start(release=release, kubeconfig_path=kubeconfig, openrc_path=openrc, cacert=cacert) | Launch the arcus-capi service | voithos/cli/service/arcus/capi.py | start | waqasnazir03/voithos | 3 | python | @click.option('--release', '-r', required=True, help='Version of Arcus CAPI to run')
@click.option('--kubeconfig', '-k', required=True, help='Path to kubeconfig file')
@click.option('--openrc', '-o', required=True, help='Path to OpenStack openrc file')
@click.option('--cacert', required=False, help='Optional path to CA certificate file')
@click.command(name='start')
def start(release, kubeconfig, openrc, cacert):
' '
click.echo('starting arcus capi')
arcus_capi.start(release=release, kubeconfig_path=kubeconfig, openrc_path=openrc, cacert=cacert) | @click.option('--release', '-r', required=True, help='Version of Arcus CAPI to run')
@click.option('--kubeconfig', '-k', required=True, help='Path to kubeconfig file')
@click.option('--openrc', '-o', required=True, help='Path to OpenStack openrc file')
@click.option('--cacert', required=False, help='Optional path to CA certificate file')
@click.command(name='start')
def start(release, kubeconfig, openrc, cacert):
' '
click.echo('starting arcus capi')
arcus_capi.start(release=release, kubeconfig_path=kubeconfig, openrc_path=openrc, cacert=cacert)<|docstring|>Launch the arcus-capi service<|endoftext|> |
bbb16cb42b2ea0549c673c00bfcb9bd930d81043a1ae1cc89e3ba1a9106b74b3 | def get_capi_group():
' return the arcus group function '
@click.group(name='capi')
def capi_group():
' Arcus Cluster-API service '
capi_group.add_command(pull)
capi_group.add_command(start)
return capi_group | return the arcus group function | voithos/cli/service/arcus/capi.py | get_capi_group | waqasnazir03/voithos | 3 | python | def get_capi_group():
' '
@click.group(name='capi')
def capi_group():
' Arcus Cluster-API service '
capi_group.add_command(pull)
capi_group.add_command(start)
return capi_group | def get_capi_group():
' '
@click.group(name='capi')
def capi_group():
' Arcus Cluster-API service '
capi_group.add_command(pull)
capi_group.add_command(start)
return capi_group<|docstring|>return the arcus group function<|endoftext|> |
b5f81012070f8d0d69acd85c576729683be6a9cb78d206ab8a878bfeb899e062 | @click.group(name='capi')
def capi_group():
' Arcus Cluster-API service ' | Arcus Cluster-API service | voithos/cli/service/arcus/capi.py | capi_group | waqasnazir03/voithos | 3 | python | @click.group(name='capi')
def capi_group():
' ' | @click.group(name='capi')
def capi_group():
' '<|docstring|>Arcus Cluster-API service<|endoftext|> |
63929ab71ec463c742e3fda9efc43a1aadb6bb8d661419d83c5cfb23e88ad697 | def __init__(self, epsilon=0.05, init_dict=None, init_str=None):
'Constructor of the class. Can be use in three ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for pyleecan type, -1 will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary with property names as keys\n - __init__ (init_str = s) s must be a string\n s is the file path to load\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object'
if (init_str is not None):
init_dict = load_init_dict(init_str)[1]
if (init_dict is not None):
assert (type(init_dict) is dict)
if ('epsilon' in list(init_dict.keys())):
epsilon = init_dict['epsilon']
self.parent = None
self.epsilon = epsilon
self._freeze() | Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object | pyleecan/Classes/RefCell.py | __init__ | harshasunder-1/pyleecan | 2 | python | def __init__(self, epsilon=0.05, init_dict=None, init_str=None):
'Constructor of the class. Can be use in three ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for pyleecan type, -1 will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary with property names as keys\n - __init__ (init_str = s) s must be a string\n s is the file path to load\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object'
if (init_str is not None):
init_dict = load_init_dict(init_str)[1]
if (init_dict is not None):
assert (type(init_dict) is dict)
if ('epsilon' in list(init_dict.keys())):
epsilon = init_dict['epsilon']
self.parent = None
self.epsilon = epsilon
self._freeze() | def __init__(self, epsilon=0.05, init_dict=None, init_str=None):
'Constructor of the class. Can be use in three ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for pyleecan type, -1 will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary with property names as keys\n - __init__ (init_str = s) s must be a string\n s is the file path to load\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object'
if (init_str is not None):
init_dict = load_init_dict(init_str)[1]
if (init_dict is not None):
assert (type(init_dict) is dict)
if ('epsilon' in list(init_dict.keys())):
epsilon = init_dict['epsilon']
self.parent = None
self.epsilon = epsilon
self._freeze()<|docstring|>Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object<|endoftext|> |
f43a02c304d73b93d4c8670bd9e20e21aab6131aa99371121ab5bb37bf7f5ea3 | def __str__(self):
'Convert this object in a readeable string (for print)'
RefCell_str = ''
if (self.parent is None):
RefCell_str += ('parent = None ' + linesep)
else:
RefCell_str += ((('parent = ' + str(type(self.parent))) + ' object') + linesep)
RefCell_str += (('epsilon = ' + str(self.epsilon)) + linesep)
return RefCell_str | Convert this object in a readeable string (for print) | pyleecan/Classes/RefCell.py | __str__ | harshasunder-1/pyleecan | 2 | python | def __str__(self):
RefCell_str =
if (self.parent is None):
RefCell_str += ('parent = None ' + linesep)
else:
RefCell_str += ((('parent = ' + str(type(self.parent))) + ' object') + linesep)
RefCell_str += (('epsilon = ' + str(self.epsilon)) + linesep)
return RefCell_str | def __str__(self):
RefCell_str =
if (self.parent is None):
RefCell_str += ('parent = None ' + linesep)
else:
RefCell_str += ((('parent = ' + str(type(self.parent))) + ' object') + linesep)
RefCell_str += (('epsilon = ' + str(self.epsilon)) + linesep)
return RefCell_str<|docstring|>Convert this object in a readeable string (for print)<|endoftext|> |
d950403c2f6222b5277623fcb64909912a4fe3741b76a971c58ac27b64a6e50b | def __eq__(self, other):
'Compare two objects (skip parent)'
if (type(other) != type(self)):
return False
if (other.epsilon != self.epsilon):
return False
return True | Compare two objects (skip parent) | pyleecan/Classes/RefCell.py | __eq__ | harshasunder-1/pyleecan | 2 | python | def __eq__(self, other):
if (type(other) != type(self)):
return False
if (other.epsilon != self.epsilon):
return False
return True | def __eq__(self, other):
if (type(other) != type(self)):
return False
if (other.epsilon != self.epsilon):
return False
return True<|docstring|>Compare two objects (skip parent)<|endoftext|> |
44db347202efc8be96ce40a11bf8d05a319c4bf77c52dfb39936ceeb8376ef30 | def as_dict(self):
'Convert this object in a json seriable dict (can be use in __init__)'
RefCell_dict = dict()
RefCell_dict['epsilon'] = self.epsilon
RefCell_dict['__class__'] = 'RefCell'
return RefCell_dict | Convert this object in a json seriable dict (can be use in __init__) | pyleecan/Classes/RefCell.py | as_dict | harshasunder-1/pyleecan | 2 | python | def as_dict(self):
RefCell_dict = dict()
RefCell_dict['epsilon'] = self.epsilon
RefCell_dict['__class__'] = 'RefCell'
return RefCell_dict | def as_dict(self):
RefCell_dict = dict()
RefCell_dict['epsilon'] = self.epsilon
RefCell_dict['__class__'] = 'RefCell'
return RefCell_dict<|docstring|>Convert this object in a json seriable dict (can be use in __init__)<|endoftext|> |
740ab7e86130bb903e7cdc3a0aa8225ef3c58687be2c55226237fd5075921ce0 | def _set_None(self):
'Set all the properties to None (except pyleecan object)'
self.epsilon = None | Set all the properties to None (except pyleecan object) | pyleecan/Classes/RefCell.py | _set_None | harshasunder-1/pyleecan | 2 | python | def _set_None(self):
self.epsilon = None | def _set_None(self):
self.epsilon = None<|docstring|>Set all the properties to None (except pyleecan object)<|endoftext|> |
ffb7ff655135963508bf0ced1865a7d69e0fedaa0a8ea3de7029464dc3e6f9c8 | def _get_epsilon(self):
'getter of epsilon'
return self._epsilon | getter of epsilon | pyleecan/Classes/RefCell.py | _get_epsilon | harshasunder-1/pyleecan | 2 | python | def _get_epsilon(self):
return self._epsilon | def _get_epsilon(self):
return self._epsilon<|docstring|>getter of epsilon<|endoftext|> |
cf82d9123209e4b26fca228ff1e242b41c2ab3c35032325694a2368297b3c81a | def _set_epsilon(self, value):
'setter of epsilon'
check_var('epsilon', value, 'float', Vmin=0.0)
self._epsilon = value | setter of epsilon | pyleecan/Classes/RefCell.py | _set_epsilon | harshasunder-1/pyleecan | 2 | python | def _set_epsilon(self, value):
check_var('epsilon', value, 'float', Vmin=0.0)
self._epsilon = value | def _set_epsilon(self, value):
check_var('epsilon', value, 'float', Vmin=0.0)
self._epsilon = value<|docstring|>setter of epsilon<|endoftext|> |
aa12c524e925a5e9670c5f9d66ae2d50d63d41dceb4644c098548addbaddd60e | def count_atoms(geom_file: str) -> int:
" Counts number of lines in file starting with 'atom' to allow for use of 'atom' or 'atom_frac'.\n NOTE: It is important lines containing atom coordinates have been deleted (not commented out) to create the defect.\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Number of atoms identified in file (int)\n "
atom_num = 0
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('atom', line):
atom_num += 1
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return atom_num | Counts number of lines in file starting with 'atom' to allow for use of 'atom' or 'atom_frac'.
NOTE: It is important lines containing atom coordinates have been deleted (not commented out) to create the defect.
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Number of atoms identified in file (int) | DefectSupercellAnalyses.py | count_atoms | skw32/DefectCorrectionsNotebook | 4 | python | def count_atoms(geom_file: str) -> int:
" Counts number of lines in file starting with 'atom' to allow for use of 'atom' or 'atom_frac'.\n NOTE: It is important lines containing atom coordinates have been deleted (not commented out) to create the defect.\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Number of atoms identified in file (int)\n "
atom_num = 0
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('atom', line):
atom_num += 1
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return atom_num | def count_atoms(geom_file: str) -> int:
" Counts number of lines in file starting with 'atom' to allow for use of 'atom' or 'atom_frac'.\n NOTE: It is important lines containing atom coordinates have been deleted (not commented out) to create the defect.\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Number of atoms identified in file (int)\n "
atom_num = 0
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('atom', line):
atom_num += 1
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return atom_num<|docstring|>Counts number of lines in file starting with 'atom' to allow for use of 'atom' or 'atom_frac'.
NOTE: It is important lines containing atom coordinates have been deleted (not commented out) to create the defect.
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Number of atoms identified in file (int)<|endoftext|> |
23556f7cd76efa4d316b526e4a216866a2d70969449755c0b1456f88b517efa8 | def read_lattice_vectors(geom_file: str) -> list:
" Function searches for lattice vectors using string 'lattice_vector'\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n lists for x, y and z components of a1, a2 and a3 lattice vectors\n E.g. x_vecs[1], y_vecs[1], z_vecs[1] would be x, y, z components of a2\n "
x_vecs = []
y_vecs = []
z_vecs = []
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('lattice_vector', line):
words = line.split()
x_vecs.append(float(words[1]))
y_vecs.append(float(words[2]))
z_vecs.append(float(words[3]))
if (line == None):
logger.info(('Warning! - No lattice vectors found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return (x_vecs, y_vecs, z_vecs) | Function searches for lattice vectors using string 'lattice_vector'
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
lists for x, y and z components of a1, a2 and a3 lattice vectors
E.g. x_vecs[1], y_vecs[1], z_vecs[1] would be x, y, z components of a2 | DefectSupercellAnalyses.py | read_lattice_vectors | skw32/DefectCorrectionsNotebook | 4 | python | def read_lattice_vectors(geom_file: str) -> list:
" Function searches for lattice vectors using string 'lattice_vector'\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n lists for x, y and z components of a1, a2 and a3 lattice vectors\n E.g. x_vecs[1], y_vecs[1], z_vecs[1] would be x, y, z components of a2\n "
x_vecs = []
y_vecs = []
z_vecs = []
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('lattice_vector', line):
words = line.split()
x_vecs.append(float(words[1]))
y_vecs.append(float(words[2]))
z_vecs.append(float(words[3]))
if (line == None):
logger.info(('Warning! - No lattice vectors found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return (x_vecs, y_vecs, z_vecs) | def read_lattice_vectors(geom_file: str) -> list:
" Function searches for lattice vectors using string 'lattice_vector'\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n lists for x, y and z components of a1, a2 and a3 lattice vectors\n E.g. x_vecs[1], y_vecs[1], z_vecs[1] would be x, y, z components of a2\n "
x_vecs = []
y_vecs = []
z_vecs = []
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('lattice_vector', line):
words = line.split()
x_vecs.append(float(words[1]))
y_vecs.append(float(words[2]))
z_vecs.append(float(words[3]))
if (line == None):
logger.info(('Warning! - No lattice vectors found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return (x_vecs, y_vecs, z_vecs)<|docstring|>Function searches for lattice vectors using string 'lattice_vector'
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
lists for x, y and z components of a1, a2 and a3 lattice vectors
E.g. x_vecs[1], y_vecs[1], z_vecs[1] would be x, y, z components of a2<|endoftext|> |
9f56927311d3f18e03d18c91aea590ccad7fba2c787a88d03bd2abc8c8517e79 | def lattice_vectors_array(geom_file: str) -> tuple:
" Function searches for lattice vectors using string 'lattice_vector' and returns them as numpy array.\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n Each component of the lattice vectors as elements of a 3x3 numpy array\n "
latt_vec_array = np.zeros([3, 3])
try:
with open(geom_file, 'r') as f:
i = 0
for line in f:
if re.search('lattice_vector', line):
words = line.split()
latt_vec_array[i][0] = float(words[1])
latt_vec_array[i][1] = float(words[2])
latt_vec_array[i][2] = float(words[3])
i += 1
if (line == None):
logger.info(('Warning! - No lattice vectors found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return latt_vec_array | Function searches for lattice vectors using string 'lattice_vector' and returns them as numpy array.
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Each component of the lattice vectors as elements of a 3x3 numpy array | DefectSupercellAnalyses.py | lattice_vectors_array | skw32/DefectCorrectionsNotebook | 4 | python | def lattice_vectors_array(geom_file: str) -> tuple:
" Function searches for lattice vectors using string 'lattice_vector' and returns them as numpy array.\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n Each component of the lattice vectors as elements of a 3x3 numpy array\n "
latt_vec_array = np.zeros([3, 3])
try:
with open(geom_file, 'r') as f:
i = 0
for line in f:
if re.search('lattice_vector', line):
words = line.split()
latt_vec_array[i][0] = float(words[1])
latt_vec_array[i][1] = float(words[2])
latt_vec_array[i][2] = float(words[3])
i += 1
if (line == None):
logger.info(('Warning! - No lattice vectors found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return latt_vec_array | def lattice_vectors_array(geom_file: str) -> tuple:
" Function searches for lattice vectors using string 'lattice_vector' and returns them as numpy array.\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n Each component of the lattice vectors as elements of a 3x3 numpy array\n "
latt_vec_array = np.zeros([3, 3])
try:
with open(geom_file, 'r') as f:
i = 0
for line in f:
if re.search('lattice_vector', line):
words = line.split()
latt_vec_array[i][0] = float(words[1])
latt_vec_array[i][1] = float(words[2])
latt_vec_array[i][2] = float(words[3])
i += 1
if (line == None):
logger.info(('Warning! - No lattice vectors found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return latt_vec_array<|docstring|>Function searches for lattice vectors using string 'lattice_vector' and returns them as numpy array.
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Each component of the lattice vectors as elements of a 3x3 numpy array<|endoftext|> |
d039224cd4bfb94f505ab12f07807788d39e96700534d62a8de29ddb30ae80e4 | def get_supercell_dimensions(geom_file: str) -> list:
" Take maximum of each direction to be supercell dimension for orthogonal unit cells\n (allowing for some numerical noise in off-diagonals)\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n List 'supercell_dims' where x = supercell_dims[0], y = supercell_dims[0], z = supercell_dims[2]\n "
(x_vecs, y_vecs, z_vecs) = read_lattice_vectors(geom_file)
supercell_dims = []
supercell_dims.append(max(x_vecs))
supercell_dims.append(max(y_vecs))
supercell_dims.append(max(z_vecs))
return supercell_dims | Take maximum of each direction to be supercell dimension for orthogonal unit cells
(allowing for some numerical noise in off-diagonals)
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
List 'supercell_dims' where x = supercell_dims[0], y = supercell_dims[0], z = supercell_dims[2] | DefectSupercellAnalyses.py | get_supercell_dimensions | skw32/DefectCorrectionsNotebook | 4 | python | def get_supercell_dimensions(geom_file: str) -> list:
" Take maximum of each direction to be supercell dimension for orthogonal unit cells\n (allowing for some numerical noise in off-diagonals)\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n List 'supercell_dims' where x = supercell_dims[0], y = supercell_dims[0], z = supercell_dims[2]\n "
(x_vecs, y_vecs, z_vecs) = read_lattice_vectors(geom_file)
supercell_dims = []
supercell_dims.append(max(x_vecs))
supercell_dims.append(max(y_vecs))
supercell_dims.append(max(z_vecs))
return supercell_dims | def get_supercell_dimensions(geom_file: str) -> list:
" Take maximum of each direction to be supercell dimension for orthogonal unit cells\n (allowing for some numerical noise in off-diagonals)\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n List 'supercell_dims' where x = supercell_dims[0], y = supercell_dims[0], z = supercell_dims[2]\n "
(x_vecs, y_vecs, z_vecs) = read_lattice_vectors(geom_file)
supercell_dims = []
supercell_dims.append(max(x_vecs))
supercell_dims.append(max(y_vecs))
supercell_dims.append(max(z_vecs))
return supercell_dims<|docstring|>Take maximum of each direction to be supercell dimension for orthogonal unit cells
(allowing for some numerical noise in off-diagonals)
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
List 'supercell_dims' where x = supercell_dims[0], y = supercell_dims[0], z = supercell_dims[2]<|endoftext|> |
51e6d528f028da69a7098381b5880ec4264f32e39d5b67c8a546590c64062159 | def read_atom_coords(geom_file: str) -> list:
" Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format\n If coordinates are fractional, these are converted to Cartesian coordinates\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n List of lists for all atom coordinates where atom_coords[row][col]\n Columns are: x, y, z, species and each row is a different atom\n "
atom_coords = []
latvec = lattice_vectors_array(geom_file)
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('atom', line):
words = line.split()
if (words[0] == 'atom_frac'):
cart_coords = (((float(words[1]) * latvec[(0, :)]) + (float(words[2]) * latvec[(1, :)])) + (float(words[3]) * latvec[(2, :)]))
atom_coords.append((cart_coords[0], cart_coords[1], cart_coords[2], str(words[4])))
else:
atom_coords.append((float(words[1]), float(words[2]), float(words[3]), str(words[4])))
if (line == None):
logger.info(('Warning! - No atom coordinates found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return atom_coords | Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format
If coordinates are fractional, these are converted to Cartesian coordinates
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
List of lists for all atom coordinates where atom_coords[row][col]
Columns are: x, y, z, species and each row is a different atom | DefectSupercellAnalyses.py | read_atom_coords | skw32/DefectCorrectionsNotebook | 4 | python | def read_atom_coords(geom_file: str) -> list:
" Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format\n If coordinates are fractional, these are converted to Cartesian coordinates\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n List of lists for all atom coordinates where atom_coords[row][col]\n Columns are: x, y, z, species and each row is a different atom\n "
atom_coords = []
latvec = lattice_vectors_array(geom_file)
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('atom', line):
words = line.split()
if (words[0] == 'atom_frac'):
cart_coords = (((float(words[1]) * latvec[(0, :)]) + (float(words[2]) * latvec[(1, :)])) + (float(words[3]) * latvec[(2, :)]))
atom_coords.append((cart_coords[0], cart_coords[1], cart_coords[2], str(words[4])))
else:
atom_coords.append((float(words[1]), float(words[2]), float(words[3]), str(words[4])))
if (line == None):
logger.info(('Warning! - No atom coordinates found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return atom_coords | def read_atom_coords(geom_file: str) -> list:
" Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format\n If coordinates are fractional, these are converted to Cartesian coordinates\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n\n Returns: \n List of lists for all atom coordinates where atom_coords[row][col]\n Columns are: x, y, z, species and each row is a different atom\n "
atom_coords = []
latvec = lattice_vectors_array(geom_file)
try:
with open(geom_file, 'r') as f:
for line in f:
if re.search('atom', line):
words = line.split()
if (words[0] == 'atom_frac'):
cart_coords = (((float(words[1]) * latvec[(0, :)]) + (float(words[2]) * latvec[(1, :)])) + (float(words[3]) * latvec[(2, :)]))
atom_coords.append((cart_coords[0], cart_coords[1], cart_coords[2], str(words[4])))
else:
atom_coords.append((float(words[1]), float(words[2]), float(words[3]), str(words[4])))
if (line == None):
logger.info(('Warning! - No atom coordinates found in ' + str(geom_file)))
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return atom_coords<|docstring|>Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format
If coordinates are fractional, these are converted to Cartesian coordinates
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
List of lists for all atom coordinates where atom_coords[row][col]
Columns are: x, y, z, species and each row is a different atom<|endoftext|> |
f664afb2b6ac396db76a62985431552b66ad168928c01b8164d0c0a5e6e1388a | def coords_to_array(coord_list: list) -> tuple:
"\n Args: \n coords_list: list of atomic coordinates outputted from 'read_atom_coords' function\n\n Returns: \n Only the coordinates (not also species type as in read_atom_coords) as a numpy array\n "
coords_array = np.zeros([len(coord_list), 3])
for i in range(len(coord_list)):
(coords_array[i][0], coords_array[i][1], coords_array[i][2]) = (coord_list[i][0], coord_list[i][1], coord_list[i][2])
return coords_array | Args:
coords_list: list of atomic coordinates outputted from 'read_atom_coords' function
Returns:
Only the coordinates (not also species type as in read_atom_coords) as a numpy array | DefectSupercellAnalyses.py | coords_to_array | skw32/DefectCorrectionsNotebook | 4 | python | def coords_to_array(coord_list: list) -> tuple:
"\n Args: \n coords_list: list of atomic coordinates outputted from 'read_atom_coords' function\n\n Returns: \n Only the coordinates (not also species type as in read_atom_coords) as a numpy array\n "
coords_array = np.zeros([len(coord_list), 3])
for i in range(len(coord_list)):
(coords_array[i][0], coords_array[i][1], coords_array[i][2]) = (coord_list[i][0], coord_list[i][1], coord_list[i][2])
return coords_array | def coords_to_array(coord_list: list) -> tuple:
"\n Args: \n coords_list: list of atomic coordinates outputted from 'read_atom_coords' function\n\n Returns: \n Only the coordinates (not also species type as in read_atom_coords) as a numpy array\n "
coords_array = np.zeros([len(coord_list), 3])
for i in range(len(coord_list)):
(coords_array[i][0], coords_array[i][1], coords_array[i][2]) = (coord_list[i][0], coord_list[i][1], coord_list[i][2])
return coords_array<|docstring|>Args:
coords_list: list of atomic coordinates outputted from 'read_atom_coords' function
Returns:
Only the coordinates (not also species type as in read_atom_coords) as a numpy array<|endoftext|> |
7c45968fe942370a3db6f20245a3fccabbcacb2e561dcece8abd5a140249961b | def frac_coords_convert(geom_file: str) -> tuple:
'\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Inverts lattice vectors to use for conversion from Cartesian to fractional coordinates.\n '
latvec = lattice_vectors_array(geom_file)
super_mat = latvec.transpose()
super_invmat = np.linalg.inv(super_mat)
def wrap_vec(v):
relvec = np.dot(super_invmat, v)
wrapvec = (((relvec + 1e-05) % 1.0) - 1e-05)
return np.dot(super_mat, wrapvec)
return (super_invmat, wrap_vec) | Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Inverts lattice vectors to use for conversion from Cartesian to fractional coordinates. | DefectSupercellAnalyses.py | frac_coords_convert | skw32/DefectCorrectionsNotebook | 4 | python | def frac_coords_convert(geom_file: str) -> tuple:
'\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Inverts lattice vectors to use for conversion from Cartesian to fractional coordinates.\n '
latvec = lattice_vectors_array(geom_file)
super_mat = latvec.transpose()
super_invmat = np.linalg.inv(super_mat)
def wrap_vec(v):
relvec = np.dot(super_invmat, v)
wrapvec = (((relvec + 1e-05) % 1.0) - 1e-05)
return np.dot(super_mat, wrapvec)
return (super_invmat, wrap_vec) | def frac_coords_convert(geom_file: str) -> tuple:
'\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Inverts lattice vectors to use for conversion from Cartesian to fractional coordinates.\n '
latvec = lattice_vectors_array(geom_file)
super_mat = latvec.transpose()
super_invmat = np.linalg.inv(super_mat)
def wrap_vec(v):
relvec = np.dot(super_invmat, v)
wrapvec = (((relvec + 1e-05) % 1.0) - 1e-05)
return np.dot(super_mat, wrapvec)
return (super_invmat, wrap_vec)<|docstring|>Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Inverts lattice vectors to use for conversion from Cartesian to fractional coordinates.<|endoftext|> |
4c2afccf600a775c860b8db5810308fd23c34763c1f580d8cfb5a9fdcd867d2b | def read_atom_coords_frac(geom_file: str) -> tuple:
" Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format\n If coordinates are not already fractional, they are converted to fractional for compatibility with CoFFEE code routines\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Numpy array of fractional coordinates\n "
(super_invmat, wrap_vec) = frac_coords_convert(geom_file)
coord_num = count_atoms(geom_file)
coords = np.zeros([coord_num, 3])
frac_coords = np.zeros([coord_num, 3])
try:
with open(geom_file, 'r') as f:
i = 0
for line in f:
if re.search('atom', line):
words = line.split()
if (words[0] != 'atom_frac'):
coords[(i, :)] = (float(words[1]), float(words[2]), float(words[3]))
frac_coords[(i, :)] = wrap_vec(coords[(i, :)])
frac_coords[(i, :)] = np.dot(super_invmat, coords[(i, :)])
else:
frac_coords[(i, :)] = (float(words[1]), float(words[2]), float(words[3]))
if (line == None):
logger.info(('Warning! - No atom coordinates found in ' + str(geom_file)))
i += 1
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return frac_coords | Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format
If coordinates are not already fractional, they are converted to fractional for compatibility with CoFFEE code routines
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Numpy array of fractional coordinates | DefectSupercellAnalyses.py | read_atom_coords_frac | skw32/DefectCorrectionsNotebook | 4 | python | def read_atom_coords_frac(geom_file: str) -> tuple:
" Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format\n If coordinates are not already fractional, they are converted to fractional for compatibility with CoFFEE code routines\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Numpy array of fractional coordinates\n "
(super_invmat, wrap_vec) = frac_coords_convert(geom_file)
coord_num = count_atoms(geom_file)
coords = np.zeros([coord_num, 3])
frac_coords = np.zeros([coord_num, 3])
try:
with open(geom_file, 'r') as f:
i = 0
for line in f:
if re.search('atom', line):
words = line.split()
if (words[0] != 'atom_frac'):
coords[(i, :)] = (float(words[1]), float(words[2]), float(words[3]))
frac_coords[(i, :)] = wrap_vec(coords[(i, :)])
frac_coords[(i, :)] = np.dot(super_invmat, coords[(i, :)])
else:
frac_coords[(i, :)] = (float(words[1]), float(words[2]), float(words[3]))
if (line == None):
logger.info(('Warning! - No atom coordinates found in ' + str(geom_file)))
i += 1
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return frac_coords | def read_atom_coords_frac(geom_file: str) -> tuple:
" Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format\n If coordinates are not already fractional, they are converted to fractional for compatibility with CoFFEE code routines\n\n Args: \n geom_file: input crystal geometry file in format for FHI-aims (geometry.in)\n \n Returns: \n Numpy array of fractional coordinates\n "
(super_invmat, wrap_vec) = frac_coords_convert(geom_file)
coord_num = count_atoms(geom_file)
coords = np.zeros([coord_num, 3])
frac_coords = np.zeros([coord_num, 3])
try:
with open(geom_file, 'r') as f:
i = 0
for line in f:
if re.search('atom', line):
words = line.split()
if (words[0] != 'atom_frac'):
coords[(i, :)] = (float(words[1]), float(words[2]), float(words[3]))
frac_coords[(i, :)] = wrap_vec(coords[(i, :)])
frac_coords[(i, :)] = np.dot(super_invmat, coords[(i, :)])
else:
frac_coords[(i, :)] = (float(words[1]), float(words[2]), float(words[3]))
if (line == None):
logger.info(('Warning! - No atom coordinates found in ' + str(geom_file)))
i += 1
except IOError:
logger.info(('Could not open ' + str(geom_file)))
return frac_coords<|docstring|>Function searches for atom using string 'atom' to allow for either 'atom' or 'atom_frac' in the file format
If coordinates are not already fractional, they are converted to fractional for compatibility with CoFFEE code routines
Args:
geom_file: input crystal geometry file in format for FHI-aims (geometry.in)
Returns:
Numpy array of fractional coordinates<|endoftext|> |
c66be6afe507474c628c4b475995e4f2432ff40c1bbc78eafcd47349c011b261 | def find_defect_type(host_coords: list, defect_coords: list) -> str:
" Compares number of atoms in defect and host supercells to determine type of defect\n host_atom_num == defect_atom_num+1 --> vacancy\n host_atom_num == defect_atom_num-1 --> interstitial\n host_atom_num == defect_atom_num --> antisite\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function \n \n Returns: \n defect_type (vacancy, interstitial, antisite) as a string\n "
host_atom_num = len(host_coords)
defect_atom_num = len(defect_coords)
if (host_atom_num == (defect_atom_num + 1)):
defect_type = 'vacancy'
elif (host_atom_num == (defect_atom_num - 1)):
defect_type = 'interstitial'
elif (host_atom_num == defect_atom_num):
defect_type = 'antisite'
else:
logger.info('Error finding defect type')
return defect_type | Compares number of atoms in defect and host supercells to determine type of defect
host_atom_num == defect_atom_num+1 --> vacancy
host_atom_num == defect_atom_num-1 --> interstitial
host_atom_num == defect_atom_num --> antisite
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
defect_type (vacancy, interstitial, antisite) as a string | DefectSupercellAnalyses.py | find_defect_type | skw32/DefectCorrectionsNotebook | 4 | python | def find_defect_type(host_coords: list, defect_coords: list) -> str:
" Compares number of atoms in defect and host supercells to determine type of defect\n host_atom_num == defect_atom_num+1 --> vacancy\n host_atom_num == defect_atom_num-1 --> interstitial\n host_atom_num == defect_atom_num --> antisite\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function \n \n Returns: \n defect_type (vacancy, interstitial, antisite) as a string\n "
host_atom_num = len(host_coords)
defect_atom_num = len(defect_coords)
if (host_atom_num == (defect_atom_num + 1)):
defect_type = 'vacancy'
elif (host_atom_num == (defect_atom_num - 1)):
defect_type = 'interstitial'
elif (host_atom_num == defect_atom_num):
defect_type = 'antisite'
else:
logger.info('Error finding defect type')
return defect_type | def find_defect_type(host_coords: list, defect_coords: list) -> str:
" Compares number of atoms in defect and host supercells to determine type of defect\n host_atom_num == defect_atom_num+1 --> vacancy\n host_atom_num == defect_atom_num-1 --> interstitial\n host_atom_num == defect_atom_num --> antisite\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function \n \n Returns: \n defect_type (vacancy, interstitial, antisite) as a string\n "
host_atom_num = len(host_coords)
defect_atom_num = len(defect_coords)
if (host_atom_num == (defect_atom_num + 1)):
defect_type = 'vacancy'
elif (host_atom_num == (defect_atom_num - 1)):
defect_type = 'interstitial'
elif (host_atom_num == defect_atom_num):
defect_type = 'antisite'
else:
logger.info('Error finding defect type')
return defect_type<|docstring|>Compares number of atoms in defect and host supercells to determine type of defect
host_atom_num == defect_atom_num+1 --> vacancy
host_atom_num == defect_atom_num-1 --> interstitial
host_atom_num == defect_atom_num --> antisite
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
defect_type (vacancy, interstitial, antisite) as a string<|endoftext|> |
3b3d5a7ca060f75f5b9ebd2efd0ce0e2d85e62dee3a39ea33e90414af920d0b3 | def count_species(host_coords: list, defect_coords: list) -> list:
' Reads through species in atom_coords[row][3] for host and defect supercells\n Assumption is made that only intrinsic defects are present, hence same atom types are present in host and defect supercells\n TZ: This assumption is not always work, and extrinsic defects is normal in the ``antisite" case, and ``interstitials". \n As a result, I change the species count based on the defect supercell: \n For intrinsic defects, it will be the same as before: \n However, for extrinsic defects, it will count the extrinsic species, and the number in the host would be zero. \n\n Args: \n host_coords: lists of coordinates of host supercell obtained with \'read_atom_coords\' function \n defect_coords: lists of coordinates of defect supercell obtained with \'read_atom_coords\' function\n\n Returns: \n First function output is a list of all different species present in the host supercell\n Next two outputs are the number of each of these species in the host and defect supercell, in the same order\n '
species = []
current_species = defect_coords[0][3]
species.append(defect_coords[0][3])
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] != current_species):
species.append(defect_coords[i][3])
current_species = defect_coords[i][3]
species = list(set(species))
host_species_nums = []
for j in range(0, len(species)):
species_count = 0
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species[j]):
species_count += 1
host_species_nums.append(int(species_count))
defect_species_nums = []
for j in range(0, len(species)):
species_count = 0
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species[j]):
species_count += 1
defect_species_nums.append(int(species_count))
return (species, host_species_nums, defect_species_nums) | Reads through species in atom_coords[row][3] for host and defect supercells
Assumption is made that only intrinsic defects are present, hence same atom types are present in host and defect supercells
TZ: This assumption is not always work, and extrinsic defects is normal in the ``antisite" case, and ``interstitials".
As a result, I change the species count based on the defect supercell:
For intrinsic defects, it will be the same as before:
However, for extrinsic defects, it will count the extrinsic species, and the number in the host would be zero.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
First function output is a list of all different species present in the host supercell
Next two outputs are the number of each of these species in the host and defect supercell, in the same order | DefectSupercellAnalyses.py | count_species | skw32/DefectCorrectionsNotebook | 4 | python | def count_species(host_coords: list, defect_coords: list) -> list:
' Reads through species in atom_coords[row][3] for host and defect supercells\n Assumption is made that only intrinsic defects are present, hence same atom types are present in host and defect supercells\n TZ: This assumption is not always work, and extrinsic defects is normal in the ``antisite" case, and ``interstitials". \n As a result, I change the species count based on the defect supercell: \n For intrinsic defects, it will be the same as before: \n However, for extrinsic defects, it will count the extrinsic species, and the number in the host would be zero. \n\n Args: \n host_coords: lists of coordinates of host supercell obtained with \'read_atom_coords\' function \n defect_coords: lists of coordinates of defect supercell obtained with \'read_atom_coords\' function\n\n Returns: \n First function output is a list of all different species present in the host supercell\n Next two outputs are the number of each of these species in the host and defect supercell, in the same order\n '
species = []
current_species = defect_coords[0][3]
species.append(defect_coords[0][3])
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] != current_species):
species.append(defect_coords[i][3])
current_species = defect_coords[i][3]
species = list(set(species))
host_species_nums = []
for j in range(0, len(species)):
species_count = 0
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species[j]):
species_count += 1
host_species_nums.append(int(species_count))
defect_species_nums = []
for j in range(0, len(species)):
species_count = 0
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species[j]):
species_count += 1
defect_species_nums.append(int(species_count))
return (species, host_species_nums, defect_species_nums) | def count_species(host_coords: list, defect_coords: list) -> list:
' Reads through species in atom_coords[row][3] for host and defect supercells\n Assumption is made that only intrinsic defects are present, hence same atom types are present in host and defect supercells\n TZ: This assumption is not always work, and extrinsic defects is normal in the ``antisite" case, and ``interstitials". \n As a result, I change the species count based on the defect supercell: \n For intrinsic defects, it will be the same as before: \n However, for extrinsic defects, it will count the extrinsic species, and the number in the host would be zero. \n\n Args: \n host_coords: lists of coordinates of host supercell obtained with \'read_atom_coords\' function \n defect_coords: lists of coordinates of defect supercell obtained with \'read_atom_coords\' function\n\n Returns: \n First function output is a list of all different species present in the host supercell\n Next two outputs are the number of each of these species in the host and defect supercell, in the same order\n '
species = []
current_species = defect_coords[0][3]
species.append(defect_coords[0][3])
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] != current_species):
species.append(defect_coords[i][3])
current_species = defect_coords[i][3]
species = list(set(species))
host_species_nums = []
for j in range(0, len(species)):
species_count = 0
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species[j]):
species_count += 1
host_species_nums.append(int(species_count))
defect_species_nums = []
for j in range(0, len(species)):
species_count = 0
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species[j]):
species_count += 1
defect_species_nums.append(int(species_count))
return (species, host_species_nums, defect_species_nums)<|docstring|>Reads through species in atom_coords[row][3] for host and defect supercells
Assumption is made that only intrinsic defects are present, hence same atom types are present in host and defect supercells
TZ: This assumption is not always work, and extrinsic defects is normal in the ``antisite" case, and ``interstitials".
As a result, I change the species count based on the defect supercell:
For intrinsic defects, it will be the same as before:
However, for extrinsic defects, it will count the extrinsic species, and the number in the host would be zero.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
First function output is a list of all different species present in the host supercell
Next two outputs are the number of each of these species in the host and defect supercell, in the same order<|endoftext|> |
45434ac9f180e829ad46b7ab2a1280ec8325988974e2d2697cfaa5ce3b205828 | def find_vacancy(host_coords: list, defect_coords: list) -> str:
" Find species where count is one less in defect supercell than in host supercell.\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n vacancy species as a string\n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_vac = 'no vacancy'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] + 1)):
species_vac = species[i]
if (species_vac == 'no vacancy'):
logger.info('Error finding vacancy')
return species_vac | Find species where count is one less in defect supercell than in host supercell.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
vacancy species as a string | DefectSupercellAnalyses.py | find_vacancy | skw32/DefectCorrectionsNotebook | 4 | python | def find_vacancy(host_coords: list, defect_coords: list) -> str:
" Find species where count is one less in defect supercell than in host supercell.\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n vacancy species as a string\n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_vac = 'no vacancy'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] + 1)):
species_vac = species[i]
if (species_vac == 'no vacancy'):
logger.info('Error finding vacancy')
return species_vac | def find_vacancy(host_coords: list, defect_coords: list) -> str:
" Find species where count is one less in defect supercell than in host supercell.\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n vacancy species as a string\n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_vac = 'no vacancy'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] + 1)):
species_vac = species[i]
if (species_vac == 'no vacancy'):
logger.info('Error finding vacancy')
return species_vac<|docstring|>Find species where count is one less in defect supercell than in host supercell.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
vacancy species as a string<|endoftext|> |
be0192cbdce387f358a072fa8347fc2a9e749f30ec5cac74a3df39493e108d9c | def find_interstitial(host_coords: list, defect_coords: list) -> str:
" Find species where count is one more in defect supercell than in host supercell.\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function \n\n Returns: \n interstitial species as a string\n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_int = 'no interstitial'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] - 1)):
species_int = species[i]
if (species_int == 'no interstitial'):
logger.info('Error finding interstitial')
return species_int | Find species where count is one more in defect supercell than in host supercell.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
interstitial species as a string | DefectSupercellAnalyses.py | find_interstitial | skw32/DefectCorrectionsNotebook | 4 | python | def find_interstitial(host_coords: list, defect_coords: list) -> str:
" Find species where count is one more in defect supercell than in host supercell.\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function \n\n Returns: \n interstitial species as a string\n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_int = 'no interstitial'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] - 1)):
species_int = species[i]
if (species_int == 'no interstitial'):
logger.info('Error finding interstitial')
return species_int | def find_interstitial(host_coords: list, defect_coords: list) -> str:
" Find species where count is one more in defect supercell than in host supercell.\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function \n\n Returns: \n interstitial species as a string\n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_int = 'no interstitial'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] - 1)):
species_int = species[i]
if (species_int == 'no interstitial'):
logger.info('Error finding interstitial')
return species_int<|docstring|>Find species where count is one more in defect supercell than in host supercell.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
interstitial species as a string<|endoftext|> |
b7b40631408b4c370b6f20e5a5c0406473146c0c51e73e590b1989ae1a1029e0 | def find_antisite(host_coords: list, defect_coords: list) -> str:
" Find species where count is one less in defect supercell than in host (species_out).\n Find species where count is one more in defect supercell than in host (species_in).\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Two strings, the first is the species added into the defect supercell to make the antisite defect \n and the second is the species removed from the host\n \n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_in = 'no species in'
species_out = 'no species out'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] - 1)):
species_in = species[i]
if (host_species_nums[i] == (defect_species_nums[i] + 1)):
species_out = species[i]
if ((species_in == 'no species in') or (species_out == 'no species out')):
logger.info('Error finding antisite')
return (species_in, species_out) | Find species where count is one less in defect supercell than in host (species_out).
Find species where count is one more in defect supercell than in host (species_in).
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Two strings, the first is the species added into the defect supercell to make the antisite defect
and the second is the species removed from the host | DefectSupercellAnalyses.py | find_antisite | skw32/DefectCorrectionsNotebook | 4 | python | def find_antisite(host_coords: list, defect_coords: list) -> str:
" Find species where count is one less in defect supercell than in host (species_out).\n Find species where count is one more in defect supercell than in host (species_in).\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Two strings, the first is the species added into the defect supercell to make the antisite defect \n and the second is the species removed from the host\n \n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_in = 'no species in'
species_out = 'no species out'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] - 1)):
species_in = species[i]
if (host_species_nums[i] == (defect_species_nums[i] + 1)):
species_out = species[i]
if ((species_in == 'no species in') or (species_out == 'no species out')):
logger.info('Error finding antisite')
return (species_in, species_out) | def find_antisite(host_coords: list, defect_coords: list) -> str:
" Find species where count is one less in defect supercell than in host (species_out).\n Find species where count is one more in defect supercell than in host (species_in).\n\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Two strings, the first is the species added into the defect supercell to make the antisite defect \n and the second is the species removed from the host\n \n "
(species, host_species_nums, defect_species_nums) = count_species(host_coords, defect_coords)
species_in = 'no species in'
species_out = 'no species out'
for i in range(0, len(species)):
if (host_species_nums[i] == (defect_species_nums[i] - 1)):
species_in = species[i]
if (host_species_nums[i] == (defect_species_nums[i] + 1)):
species_out = species[i]
if ((species_in == 'no species in') or (species_out == 'no species out')):
logger.info('Error finding antisite')
return (species_in, species_out)<|docstring|>Find species where count is one less in defect supercell than in host (species_out).
Find species where count is one more in defect supercell than in host (species_in).
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Two strings, the first is the species added into the defect supercell to make the antisite defect
and the second is the species removed from the host<|endoftext|> |
2431d0ad6c4904e0242bca3ffd0d18fd032cc783ba6d0ff73cc7c5a10ac535b5 | def vacancy_coords(host_coords: list, defect_coords: list) -> tuple:
"\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for a vacancy is defined as the line number in the perfect host supercell of the atom missing in the vacancy supercell\n "
species_vac = find_vacancy(host_coords, defect_coords)
host_vac_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_vac):
host_vac_coords.append(host_coords[i][:3])
defect_vac_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_vac):
defect_vac_coords.append(defect_coords[i][:3])
all_closest_species = []
for (x_host, y_host, z_host) in host_vac_coords:
closest_species = None
min_distance = None
for (x_defect, y_defect, z_defect) in defect_vac_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_defect, y_defect, z_defect]
all_closest_species.append((closest_species + [min_distance]))
(x_vac, y_vac, z_vac) = host_vac_coords[np.argmax([i[3] for i in all_closest_species])][:3]
'\n max_dist = 0\n for i in range(0, len(all_closest_species)):\n if (all_closest_species[i][3] > max_dist):\n x_vac, y_vac, z_vac = host_coords[i][:3]\n max_dist = all_closest_species[i][3]\n '
tmp = host_vac_coords[np.argmax([i[3] for i in all_closest_species])][:3]
for i in range(0, len(host_coords)):
if (host_coords[i][0:3] == tmp):
defect_line = i
return (species_vac, x_vac, y_vac, z_vac, defect_line) | Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect
defect_line for a vacancy is defined as the line number in the perfect host supercell of the atom missing in the vacancy supercell | DefectSupercellAnalyses.py | vacancy_coords | skw32/DefectCorrectionsNotebook | 4 | python | def vacancy_coords(host_coords: list, defect_coords: list) -> tuple:
"\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for a vacancy is defined as the line number in the perfect host supercell of the atom missing in the vacancy supercell\n "
species_vac = find_vacancy(host_coords, defect_coords)
host_vac_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_vac):
host_vac_coords.append(host_coords[i][:3])
defect_vac_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_vac):
defect_vac_coords.append(defect_coords[i][:3])
all_closest_species = []
for (x_host, y_host, z_host) in host_vac_coords:
closest_species = None
min_distance = None
for (x_defect, y_defect, z_defect) in defect_vac_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_defect, y_defect, z_defect]
all_closest_species.append((closest_species + [min_distance]))
(x_vac, y_vac, z_vac) = host_vac_coords[np.argmax([i[3] for i in all_closest_species])][:3]
'\n max_dist = 0\n for i in range(0, len(all_closest_species)):\n if (all_closest_species[i][3] > max_dist):\n x_vac, y_vac, z_vac = host_coords[i][:3]\n max_dist = all_closest_species[i][3]\n '
tmp = host_vac_coords[np.argmax([i[3] for i in all_closest_species])][:3]
for i in range(0, len(host_coords)):
if (host_coords[i][0:3] == tmp):
defect_line = i
return (species_vac, x_vac, y_vac, z_vac, defect_line) | def vacancy_coords(host_coords: list, defect_coords: list) -> tuple:
"\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for a vacancy is defined as the line number in the perfect host supercell of the atom missing in the vacancy supercell\n "
species_vac = find_vacancy(host_coords, defect_coords)
host_vac_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_vac):
host_vac_coords.append(host_coords[i][:3])
defect_vac_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_vac):
defect_vac_coords.append(defect_coords[i][:3])
all_closest_species = []
for (x_host, y_host, z_host) in host_vac_coords:
closest_species = None
min_distance = None
for (x_defect, y_defect, z_defect) in defect_vac_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_defect, y_defect, z_defect]
all_closest_species.append((closest_species + [min_distance]))
(x_vac, y_vac, z_vac) = host_vac_coords[np.argmax([i[3] for i in all_closest_species])][:3]
'\n max_dist = 0\n for i in range(0, len(all_closest_species)):\n if (all_closest_species[i][3] > max_dist):\n x_vac, y_vac, z_vac = host_coords[i][:3]\n max_dist = all_closest_species[i][3]\n '
tmp = host_vac_coords[np.argmax([i[3] for i in all_closest_species])][:3]
for i in range(0, len(host_coords)):
if (host_coords[i][0:3] == tmp):
defect_line = i
return (species_vac, x_vac, y_vac, z_vac, defect_line)<|docstring|>Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect
defect_line for a vacancy is defined as the line number in the perfect host supercell of the atom missing in the vacancy supercell<|endoftext|> |
4e3e7501b65b2fbd4e9a2cda83c6a5f92e67edb6f12d66b8ee4a9ae1b27ff10a | def interstitial_coords(host_coords: list, defect_coords: list) -> tuple:
"\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for an interstitial is defined as the line number in the defect supercell of the atom not present in the host supercell\n "
species_int = find_interstitial(host_coords, defect_coords)
host_int_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_int):
host_int_coords.append(host_coords[i][:3])
defect_int_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_int):
defect_int_coords.append(defect_coords[i][:3])
all_closest_species = []
for (x_defect, y_defect, z_defect) in defect_int_coords:
closest_species = None
min_distance = None
for (x_host, y_host, z_host) in host_int_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_host, y_host, z_host]
all_closest_species.append((closest_species + [min_distance]))
(x_int, y_int, z_int) = defect_int_coords[np.argmax([i[3] for i in all_closest_species])][:3]
tmp = defect_int_coords[np.argmax([i[3] for i in all_closest_species])][:3]
for i in range(0, len(defect_coords)):
if (defect_coords[i][0:3] == tmp):
defect_line = i
return (species_int, x_int, y_int, z_int, defect_line) | Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect
defect_line for an interstitial is defined as the line number in the defect supercell of the atom not present in the host supercell | DefectSupercellAnalyses.py | interstitial_coords | skw32/DefectCorrectionsNotebook | 4 | python | def interstitial_coords(host_coords: list, defect_coords: list) -> tuple:
"\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for an interstitial is defined as the line number in the defect supercell of the atom not present in the host supercell\n "
species_int = find_interstitial(host_coords, defect_coords)
host_int_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_int):
host_int_coords.append(host_coords[i][:3])
defect_int_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_int):
defect_int_coords.append(defect_coords[i][:3])
all_closest_species = []
for (x_defect, y_defect, z_defect) in defect_int_coords:
closest_species = None
min_distance = None
for (x_host, y_host, z_host) in host_int_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_host, y_host, z_host]
all_closest_species.append((closest_species + [min_distance]))
(x_int, y_int, z_int) = defect_int_coords[np.argmax([i[3] for i in all_closest_species])][:3]
tmp = defect_int_coords[np.argmax([i[3] for i in all_closest_species])][:3]
for i in range(0, len(defect_coords)):
if (defect_coords[i][0:3] == tmp):
defect_line = i
return (species_int, x_int, y_int, z_int, defect_line) | def interstitial_coords(host_coords: list, defect_coords: list) -> tuple:
"\n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for an interstitial is defined as the line number in the defect supercell of the atom not present in the host supercell\n "
species_int = find_interstitial(host_coords, defect_coords)
host_int_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_int):
host_int_coords.append(host_coords[i][:3])
defect_int_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_int):
defect_int_coords.append(defect_coords[i][:3])
all_closest_species = []
for (x_defect, y_defect, z_defect) in defect_int_coords:
closest_species = None
min_distance = None
for (x_host, y_host, z_host) in host_int_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_host, y_host, z_host]
all_closest_species.append((closest_species + [min_distance]))
(x_int, y_int, z_int) = defect_int_coords[np.argmax([i[3] for i in all_closest_species])][:3]
tmp = defect_int_coords[np.argmax([i[3] for i in all_closest_species])][:3]
for i in range(0, len(defect_coords)):
if (defect_coords[i][0:3] == tmp):
defect_line = i
return (species_int, x_int, y_int, z_int, defect_line)<|docstring|>Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect
defect_line for an interstitial is defined as the line number in the defect supercell of the atom not present in the host supercell<|endoftext|> |
f21cd29b6251760300abc4819a0a7acd451a29191beeb39aa767fc1500796d78 | def antisite_coords(host_coords: list, defect_coords: list) -> tuple:
" NOTE: TZ: The way to find the defect is worng for some specific structures, we should find a better way to find the defect atoms. \n I try to fixed that within the antisite_coords first, probably similar job should be done in the other routines, i.e. vacancies, and interstitial.\n \n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for an antisite is defined as the line number in the defect supercell of the atom not present in the host supercell \n "
(species_in, species_out) = find_antisite(host_coords, defect_coords)
host_in_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_in):
host_in_coords.append(host_coords[i][:3])
defect_in_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_in):
defect_in_coords.append(defect_coords[i][:3])
if (not host_in_coords):
(x_in, y_in, z_in) = defect_in_coords[0][:3]
else:
all_closest_species = []
for (x_defect, y_defect, z_defect) in defect_in_coords:
closest_species = None
min_distance = None
for (x_host, y_host, z_host) in host_in_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_defect, y_defect, z_defect]
all_closest_species.append((closest_species + [min_distance]))
(x_in, y_in, z_in) = defect_in_coords[np.argmax([i[3] for i in all_closest_species])][:3]
tmp = (x_in, y_in, z_in)
for i in range(0, len(defect_coords)):
if (defect_coords[i][0:3] == tmp):
defect_line = i
return (species_in, species_out, x_in, y_in, z_in, defect_line) | NOTE: TZ: The way to find the defect is worng for some specific structures, we should find a better way to find the defect atoms.
I try to fixed that within the antisite_coords first, probably similar job should be done in the other routines, i.e. vacancies, and interstitial.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect
defect_line for an antisite is defined as the line number in the defect supercell of the atom not present in the host supercell | DefectSupercellAnalyses.py | antisite_coords | skw32/DefectCorrectionsNotebook | 4 | python | def antisite_coords(host_coords: list, defect_coords: list) -> tuple:
" NOTE: TZ: The way to find the defect is worng for some specific structures, we should find a better way to find the defect atoms. \n I try to fixed that within the antisite_coords first, probably similar job should be done in the other routines, i.e. vacancies, and interstitial.\n \n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for an antisite is defined as the line number in the defect supercell of the atom not present in the host supercell \n "
(species_in, species_out) = find_antisite(host_coords, defect_coords)
host_in_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_in):
host_in_coords.append(host_coords[i][:3])
defect_in_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_in):
defect_in_coords.append(defect_coords[i][:3])
if (not host_in_coords):
(x_in, y_in, z_in) = defect_in_coords[0][:3]
else:
all_closest_species = []
for (x_defect, y_defect, z_defect) in defect_in_coords:
closest_species = None
min_distance = None
for (x_host, y_host, z_host) in host_in_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_defect, y_defect, z_defect]
all_closest_species.append((closest_species + [min_distance]))
(x_in, y_in, z_in) = defect_in_coords[np.argmax([i[3] for i in all_closest_species])][:3]
tmp = (x_in, y_in, z_in)
for i in range(0, len(defect_coords)):
if (defect_coords[i][0:3] == tmp):
defect_line = i
return (species_in, species_out, x_in, y_in, z_in, defect_line) | def antisite_coords(host_coords: list, defect_coords: list) -> tuple:
" NOTE: TZ: The way to find the defect is worng for some specific structures, we should find a better way to find the defect atoms. \n I try to fixed that within the antisite_coords first, probably similar job should be done in the other routines, i.e. vacancies, and interstitial.\n \n Args: \n host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function \n defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function\n\n Returns: \n Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect\n defect_line for an antisite is defined as the line number in the defect supercell of the atom not present in the host supercell \n "
(species_in, species_out) = find_antisite(host_coords, defect_coords)
host_in_coords = []
for i in range(0, len(host_coords)):
if (host_coords[i][3] == species_in):
host_in_coords.append(host_coords[i][:3])
defect_in_coords = []
for i in range(0, len(defect_coords)):
if (defect_coords[i][3] == species_in):
defect_in_coords.append(defect_coords[i][:3])
if (not host_in_coords):
(x_in, y_in, z_in) = defect_in_coords[0][:3]
else:
all_closest_species = []
for (x_defect, y_defect, z_defect) in defect_in_coords:
closest_species = None
min_distance = None
for (x_host, y_host, z_host) in host_in_coords:
distance_to_defect = sqrt((((abs((x_host - x_defect)) * abs((x_host - x_defect))) + (abs((y_host - y_defect)) * abs((y_host - y_defect)))) + (abs((z_host - z_defect)) * abs((z_host - z_defect)))))
if ((min_distance is None) or (distance_to_defect < min_distance)):
min_distance = distance_to_defect
closest_species = [x_defect, y_defect, z_defect]
all_closest_species.append((closest_species + [min_distance]))
(x_in, y_in, z_in) = defect_in_coords[np.argmax([i[3] for i in all_closest_species])][:3]
tmp = (x_in, y_in, z_in)
for i in range(0, len(defect_coords)):
if (defect_coords[i][0:3] == tmp):
defect_line = i
return (species_in, species_out, x_in, y_in, z_in, defect_line)<|docstring|>NOTE: TZ: The way to find the defect is worng for some specific structures, we should find a better way to find the defect atoms.
I try to fixed that within the antisite_coords first, probably similar job should be done in the other routines, i.e. vacancies, and interstitial.
Args:
host_coords: lists of coordinates of host supercell obtained with 'read_atom_coords' function
defect_coords: lists of coordinates of defect supercell obtained with 'read_atom_coords' function
Returns:
Vacancy species as string, vacancy coordinates in the perfect host supercell and the line in the geometry file for the defect
defect_line for an antisite is defined as the line number in the defect supercell of the atom not present in the host supercell<|endoftext|> |
eebd7aaf4d67d6e862b1e6c7dfae39eaecbd975b2da37dc98b5842405c074f12 | def defect_to_boundary(x_defect: float, y_defect: float, z_defect: float, supercell_x: float, supercell_y: float, supercell_z: float) -> float:
'\n Args: \n x_defect: x-coordinate of defect within the supercell\n y_defect: y-coordinate of defect within the supercell\n z_defect: z-coordinate of defect within the supercell\n supercell_x: dimension of supercell along the x-direction\n supercell_y: dimension of supercell along the y-direction\n supercell_z: dimension of supercell along the z-direction\n (All are extracted as part of the notebook workflow and above are their default names in the notebook)\n \n Returns: \n Closest distances in the x, y and z-directions of the defect to any of the supercell boundaries\n '
x_min = (x_defect if (x_defect <= (supercell_x / 2.0)) else (supercell_x - x_defect))
y_min = (y_defect if (y_defect <= (supercell_y / 2.0)) else (supercell_y - y_defect))
z_min = (z_defect if (z_defect <= (supercell_z / 2.0)) else (supercell_z - z_defect))
return (x_min, y_min, z_min) | Args:
x_defect: x-coordinate of defect within the supercell
y_defect: y-coordinate of defect within the supercell
z_defect: z-coordinate of defect within the supercell
supercell_x: dimension of supercell along the x-direction
supercell_y: dimension of supercell along the y-direction
supercell_z: dimension of supercell along the z-direction
(All are extracted as part of the notebook workflow and above are their default names in the notebook)
Returns:
Closest distances in the x, y and z-directions of the defect to any of the supercell boundaries | DefectSupercellAnalyses.py | defect_to_boundary | skw32/DefectCorrectionsNotebook | 4 | python | def defect_to_boundary(x_defect: float, y_defect: float, z_defect: float, supercell_x: float, supercell_y: float, supercell_z: float) -> float:
'\n Args: \n x_defect: x-coordinate of defect within the supercell\n y_defect: y-coordinate of defect within the supercell\n z_defect: z-coordinate of defect within the supercell\n supercell_x: dimension of supercell along the x-direction\n supercell_y: dimension of supercell along the y-direction\n supercell_z: dimension of supercell along the z-direction\n (All are extracted as part of the notebook workflow and above are their default names in the notebook)\n \n Returns: \n Closest distances in the x, y and z-directions of the defect to any of the supercell boundaries\n '
x_min = (x_defect if (x_defect <= (supercell_x / 2.0)) else (supercell_x - x_defect))
y_min = (y_defect if (y_defect <= (supercell_y / 2.0)) else (supercell_y - y_defect))
z_min = (z_defect if (z_defect <= (supercell_z / 2.0)) else (supercell_z - z_defect))
return (x_min, y_min, z_min) | def defect_to_boundary(x_defect: float, y_defect: float, z_defect: float, supercell_x: float, supercell_y: float, supercell_z: float) -> float:
'\n Args: \n x_defect: x-coordinate of defect within the supercell\n y_defect: y-coordinate of defect within the supercell\n z_defect: z-coordinate of defect within the supercell\n supercell_x: dimension of supercell along the x-direction\n supercell_y: dimension of supercell along the y-direction\n supercell_z: dimension of supercell along the z-direction\n (All are extracted as part of the notebook workflow and above are their default names in the notebook)\n \n Returns: \n Closest distances in the x, y and z-directions of the defect to any of the supercell boundaries\n '
x_min = (x_defect if (x_defect <= (supercell_x / 2.0)) else (supercell_x - x_defect))
y_min = (y_defect if (y_defect <= (supercell_y / 2.0)) else (supercell_y - y_defect))
z_min = (z_defect if (z_defect <= (supercell_z / 2.0)) else (supercell_z - z_defect))
return (x_min, y_min, z_min)<|docstring|>Args:
x_defect: x-coordinate of defect within the supercell
y_defect: y-coordinate of defect within the supercell
z_defect: z-coordinate of defect within the supercell
supercell_x: dimension of supercell along the x-direction
supercell_y: dimension of supercell along the y-direction
supercell_z: dimension of supercell along the z-direction
(All are extracted as part of the notebook workflow and above are their default names in the notebook)
Returns:
Closest distances in the x, y and z-directions of the defect to any of the supercell boundaries<|endoftext|> |
1d3cdbd0e8da619fe2a6a4df9b26cb1b2fafa07b295e1b90ad76e2eef1e38d81 | def lsmr(A, b, damp=0.0, atol=1e-06, btol=1e-06, conlim=100000000.0, maxiter=None, show=False):
'Iterative solver for least-squares problems.\n\n lsmr solves the system of linear equations ``Ax = b``. If the system\n is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.\n A is a rectangular matrix of dimension m-by-n, where all cases are\n allowed: m = n, m > n, or m < n. B is a vector of length m.\n The matrix A may be dense or sparse (usually sparse).\n\n Parameters\n ----------\n A : {matrix, sparse matrix, ndarray, LinearOperator}\n Matrix A in the linear system.\n b : array_like, shape (m,)\n Vector b in the linear system.\n damp : float\n Damping factor for regularized least-squares. `lsmr` solves\n the regularized least-squares problem::\n\n min ||(b) - ( A )x||\n ||(0) (damp*I) ||_2\n\n where damp is a scalar. If damp is None or 0, the system\n is solved without regularization.\n atol, btol : float, optional\n Stopping tolerances. `lsmr` continues iterations until a\n certain backward error estimate is smaller than some quantity\n depending on atol and btol. Let ``r = b - Ax`` be the\n residual vector for the current approximate solution ``x``.\n If ``Ax = b`` seems to be consistent, ``lsmr`` terminates\n when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.\n Otherwise, lsmr terminates when ``norm(A^{T} r) <=\n atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),\n the final ``norm(r)`` should be accurate to about 6\n digits. (The final x will usually have fewer correct digits,\n depending on ``cond(A)`` and the size of LAMBDA.) If `atol`\n or `btol` is None, a default value of 1.0e-6 will be used.\n Ideally, they should be estimates of the relative error in the\n entries of A and B respectively. For example, if the entries\n of `A` have 7 correct digits, set atol = 1e-7. This prevents\n the algorithm from doing unnecessary work beyond the\n uncertainty of the input data.\n conlim : float, optional\n `lsmr` terminates if an estimate of ``cond(A)`` exceeds\n `conlim`. For compatible systems ``Ax = b``, conlim could be\n as large as 1.0e+12 (say). For least-squares problems,\n `conlim` should be less than 1.0e+8. If `conlim` is None, the\n default value is 1e+8. Maximum precision can be obtained by\n setting ``atol = btol = conlim = 0``, but the number of\n iterations may then be excessive.\n maxiter : int, optional\n `lsmr` terminates if the number of iterations reaches\n `maxiter`. The default is ``maxiter = min(m, n)``. For\n ill-conditioned systems, a larger value of `maxiter` may be\n needed.\n show : bool, optional\n Print iterations logs if ``show=True``.\n\n Returns\n -------\n x : ndarray of float\n Least-square solution returned.\n istop : int\n istop gives the reason for stopping::\n\n istop = 0 means x=0 is a solution.\n = 1 means x is an approximate solution to A*x = B,\n according to atol and btol.\n = 2 means x approximately solves the least-squares problem\n according to atol.\n = 3 means COND(A) seems to be greater than CONLIM.\n = 4 is the same as 1 with atol = btol = eps (machine\n precision)\n = 5 is the same as 2 with atol = eps.\n = 6 is the same as 3 with CONLIM = 1/eps.\n = 7 means ITN reached maxiter before the other stopping\n conditions were satisfied.\n\n itn : int\n Number of iterations used.\n normr : float\n ``norm(b-Ax)``\n normar : float\n ``norm(A^T (b - Ax))``\n norma : float\n ``norm(A)``\n conda : float\n Condition number of A.\n normx : float\n ``norm(x)``\n\n Notes\n -----\n\n .. versionadded:: 0.11.0\n\n References\n ----------\n .. [1] D. C.-L. Fong and M. A. Saunders,\n "LSMR: An iterative algorithm for sparse least-squares problems",\n SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.\n http://arxiv.org/abs/1006.0758\n .. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/\n\n '
A = aslinearoperator(A)
b = atleast_1d(b)
if (b.ndim > 1):
b = b.squeeze()
msg = ('The exact solution is x = 0 ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm Ar'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20
pcount = 0
(m, n) = A.shape
minDim = min([m, n])
if (maxiter is None):
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(('The matrix A has %8g rows and %8g cols' % (m, n)))
print(('damp = %20.14e\n' % damp))
print(('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)))
print(('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)))
u = b
beta = norm(u)
v = zeros(n)
alpha = 0
if (beta > 0):
u = ((1 / beta) * u)
v = A.rmatvec(u)
alpha = norm(v)
if (alpha > 0):
v = ((1 / alpha) * v)
itn = 0
zetabar = (alpha * beta)
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n)
x = zeros(n)
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
normA2 = (alpha * alpha)
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
normb = beta
istop = 0
ctol = 0
if (conlim > 0):
ctol = (1 / conlim)
normr = beta
normar = (alpha * beta)
if (normar == 0):
if show:
print(msg[0])
return (x, istop, itn, normr, normar, normA, condA, normx)
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = (alpha / beta)
str1 = ('%6g %12.5e' % (itn, x[0]))
str2 = (' %10.3e %10.3e' % (normr, normar))
str3 = (' %8.1e %8.1e' % (test1, test2))
print(''.join([str1, str2, str3]))
while (itn < maxiter):
itn = (itn + 1)
u = (A.matvec(v) - (alpha * u))
beta = norm(u)
if (beta > 0):
u = ((1 / beta) * u)
v = (A.rmatvec(u) - (beta * v))
alpha = norm(v)
if (alpha > 0):
v = ((1 / alpha) * v)
(chat, shat, alphahat) = _sym_ortho(alphabar, damp)
rhoold = rho
(c, s, rho) = _sym_ortho(alphahat, beta)
thetanew = (s * alpha)
alphabar = (c * alpha)
rhobarold = rhobar
zetaold = zeta
thetabar = (sbar * rho)
rhotemp = (cbar * rho)
(cbar, sbar, rhobar) = _sym_ortho((cbar * rho), thetanew)
zeta = (cbar * zetabar)
zetabar = ((- sbar) * zetabar)
hbar = (h - (((thetabar * rho) / (rhoold * rhobarold)) * hbar))
x = (x + ((zeta / (rho * rhobar)) * hbar))
h = (v - ((thetanew / rho) * h))
betaacute = (chat * betadd)
betacheck = ((- shat) * betadd)
betahat = (c * betaacute)
betadd = ((- s) * betaacute)
thetatildeold = thetatilde
(ctildeold, stildeold, rhotildeold) = _sym_ortho(rhodold, thetabar)
thetatilde = (stildeold * rhobar)
rhodold = (ctildeold * rhobar)
betad = (((- stildeold) * betad) + (ctildeold * betahat))
tautildeold = ((zetaold - (thetatildeold * tautildeold)) / rhotildeold)
taud = ((zeta - (thetatilde * tautildeold)) / rhodold)
d = (d + (betacheck * betacheck))
normr = sqrt(((d + ((betad - taud) ** 2)) + (betadd * betadd)))
normA2 = (normA2 + (beta * beta))
normA = sqrt(normA2)
normA2 = (normA2 + (alpha * alpha))
maxrbar = max(maxrbar, rhobarold)
if (itn > 1):
minrbar = min(minrbar, rhobarold)
condA = (max(maxrbar, rhotemp) / min(minrbar, rhotemp))
normar = abs(zetabar)
normx = norm(x)
test1 = (normr / normb)
if ((normA * normr) != 0):
test2 = (normar / (normA * normr))
else:
test2 = infty
test3 = (1 / condA)
t1 = (test1 / (1 + ((normA * normx) / normb)))
rtol = (btol + (((atol * normA) * normx) / normb))
if (itn >= maxiter):
istop = 7
if ((1 + test3) <= 1):
istop = 6
if ((1 + test2) <= 1):
istop = 5
if ((1 + t1) <= 1):
istop = 4
if (test3 <= ctol):
istop = 3
if (test2 <= atol):
istop = 2
if (test1 <= rtol):
istop = 1
if show:
if ((n <= 40) or (itn <= 10) or (itn >= (maxiter - 10)) or ((itn % 10) == 0) or (test3 <= (1.1 * ctol)) or (test2 <= (1.1 * atol)) or (test1 <= (1.1 * rtol)) or (istop != 0)):
if (pcount >= pfreq):
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = (pcount + 1)
str1 = ('%6g %12.5e' % (itn, x[0]))
str2 = (' %10.3e %10.3e' % (normr, normar))
str3 = (' %8.1e %8.1e' % (test1, test2))
str4 = (' %8.1e %8.1e' % (normA, condA))
print(''.join([str1, str2, str3, str4]))
if (istop > 0):
break
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print(('istop =%8g normr =%8.1e' % (istop, normr)))
print((' normA =%8.1e normAr =%8.1e' % (normA, normar)))
print(('itn =%8g condA =%8.1e' % (itn, condA)))
print((' normx =%8.1e' % normx))
print(str1, str2)
print(str3, str4)
return (x, istop, itn, normr, normar, normA, condA, normx) | Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
b : array_like, shape (m,)
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^{T} r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool, optional
Print iterations logs if ``show=True``.
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^T (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
http://arxiv.org/abs/1006.0758
.. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/ | contrib/python/scipy/scipy/sparse/linalg/isolve/lsmr.py | lsmr | Trollgeir/catboost | 6,989 | python | def lsmr(A, b, damp=0.0, atol=1e-06, btol=1e-06, conlim=100000000.0, maxiter=None, show=False):
'Iterative solver for least-squares problems.\n\n lsmr solves the system of linear equations ``Ax = b``. If the system\n is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.\n A is a rectangular matrix of dimension m-by-n, where all cases are\n allowed: m = n, m > n, or m < n. B is a vector of length m.\n The matrix A may be dense or sparse (usually sparse).\n\n Parameters\n ----------\n A : {matrix, sparse matrix, ndarray, LinearOperator}\n Matrix A in the linear system.\n b : array_like, shape (m,)\n Vector b in the linear system.\n damp : float\n Damping factor for regularized least-squares. `lsmr` solves\n the regularized least-squares problem::\n\n min ||(b) - ( A )x||\n ||(0) (damp*I) ||_2\n\n where damp is a scalar. If damp is None or 0, the system\n is solved without regularization.\n atol, btol : float, optional\n Stopping tolerances. `lsmr` continues iterations until a\n certain backward error estimate is smaller than some quantity\n depending on atol and btol. Let ``r = b - Ax`` be the\n residual vector for the current approximate solution ``x``.\n If ``Ax = b`` seems to be consistent, ``lsmr`` terminates\n when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.\n Otherwise, lsmr terminates when ``norm(A^{T} r) <=\n atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),\n the final ``norm(r)`` should be accurate to about 6\n digits. (The final x will usually have fewer correct digits,\n depending on ``cond(A)`` and the size of LAMBDA.) If `atol`\n or `btol` is None, a default value of 1.0e-6 will be used.\n Ideally, they should be estimates of the relative error in the\n entries of A and B respectively. For example, if the entries\n of `A` have 7 correct digits, set atol = 1e-7. This prevents\n the algorithm from doing unnecessary work beyond the\n uncertainty of the input data.\n conlim : float, optional\n `lsmr` terminates if an estimate of ``cond(A)`` exceeds\n `conlim`. For compatible systems ``Ax = b``, conlim could be\n as large as 1.0e+12 (say). For least-squares problems,\n `conlim` should be less than 1.0e+8. If `conlim` is None, the\n default value is 1e+8. Maximum precision can be obtained by\n setting ``atol = btol = conlim = 0``, but the number of\n iterations may then be excessive.\n maxiter : int, optional\n `lsmr` terminates if the number of iterations reaches\n `maxiter`. The default is ``maxiter = min(m, n)``. For\n ill-conditioned systems, a larger value of `maxiter` may be\n needed.\n show : bool, optional\n Print iterations logs if ``show=True``.\n\n Returns\n -------\n x : ndarray of float\n Least-square solution returned.\n istop : int\n istop gives the reason for stopping::\n\n istop = 0 means x=0 is a solution.\n = 1 means x is an approximate solution to A*x = B,\n according to atol and btol.\n = 2 means x approximately solves the least-squares problem\n according to atol.\n = 3 means COND(A) seems to be greater than CONLIM.\n = 4 is the same as 1 with atol = btol = eps (machine\n precision)\n = 5 is the same as 2 with atol = eps.\n = 6 is the same as 3 with CONLIM = 1/eps.\n = 7 means ITN reached maxiter before the other stopping\n conditions were satisfied.\n\n itn : int\n Number of iterations used.\n normr : float\n ``norm(b-Ax)``\n normar : float\n ``norm(A^T (b - Ax))``\n norma : float\n ``norm(A)``\n conda : float\n Condition number of A.\n normx : float\n ``norm(x)``\n\n Notes\n -----\n\n .. versionadded:: 0.11.0\n\n References\n ----------\n .. [1] D. C.-L. Fong and M. A. Saunders,\n "LSMR: An iterative algorithm for sparse least-squares problems",\n SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.\n http://arxiv.org/abs/1006.0758\n .. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/\n\n '
A = aslinearoperator(A)
b = atleast_1d(b)
if (b.ndim > 1):
b = b.squeeze()
msg = ('The exact solution is x = 0 ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm Ar'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20
pcount = 0
(m, n) = A.shape
minDim = min([m, n])
if (maxiter is None):
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(('The matrix A has %8g rows and %8g cols' % (m, n)))
print(('damp = %20.14e\n' % damp))
print(('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)))
print(('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)))
u = b
beta = norm(u)
v = zeros(n)
alpha = 0
if (beta > 0):
u = ((1 / beta) * u)
v = A.rmatvec(u)
alpha = norm(v)
if (alpha > 0):
v = ((1 / alpha) * v)
itn = 0
zetabar = (alpha * beta)
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n)
x = zeros(n)
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
normA2 = (alpha * alpha)
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
normb = beta
istop = 0
ctol = 0
if (conlim > 0):
ctol = (1 / conlim)
normr = beta
normar = (alpha * beta)
if (normar == 0):
if show:
print(msg[0])
return (x, istop, itn, normr, normar, normA, condA, normx)
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = (alpha / beta)
str1 = ('%6g %12.5e' % (itn, x[0]))
str2 = (' %10.3e %10.3e' % (normr, normar))
str3 = (' %8.1e %8.1e' % (test1, test2))
print(.join([str1, str2, str3]))
while (itn < maxiter):
itn = (itn + 1)
u = (A.matvec(v) - (alpha * u))
beta = norm(u)
if (beta > 0):
u = ((1 / beta) * u)
v = (A.rmatvec(u) - (beta * v))
alpha = norm(v)
if (alpha > 0):
v = ((1 / alpha) * v)
(chat, shat, alphahat) = _sym_ortho(alphabar, damp)
rhoold = rho
(c, s, rho) = _sym_ortho(alphahat, beta)
thetanew = (s * alpha)
alphabar = (c * alpha)
rhobarold = rhobar
zetaold = zeta
thetabar = (sbar * rho)
rhotemp = (cbar * rho)
(cbar, sbar, rhobar) = _sym_ortho((cbar * rho), thetanew)
zeta = (cbar * zetabar)
zetabar = ((- sbar) * zetabar)
hbar = (h - (((thetabar * rho) / (rhoold * rhobarold)) * hbar))
x = (x + ((zeta / (rho * rhobar)) * hbar))
h = (v - ((thetanew / rho) * h))
betaacute = (chat * betadd)
betacheck = ((- shat) * betadd)
betahat = (c * betaacute)
betadd = ((- s) * betaacute)
thetatildeold = thetatilde
(ctildeold, stildeold, rhotildeold) = _sym_ortho(rhodold, thetabar)
thetatilde = (stildeold * rhobar)
rhodold = (ctildeold * rhobar)
betad = (((- stildeold) * betad) + (ctildeold * betahat))
tautildeold = ((zetaold - (thetatildeold * tautildeold)) / rhotildeold)
taud = ((zeta - (thetatilde * tautildeold)) / rhodold)
d = (d + (betacheck * betacheck))
normr = sqrt(((d + ((betad - taud) ** 2)) + (betadd * betadd)))
normA2 = (normA2 + (beta * beta))
normA = sqrt(normA2)
normA2 = (normA2 + (alpha * alpha))
maxrbar = max(maxrbar, rhobarold)
if (itn > 1):
minrbar = min(minrbar, rhobarold)
condA = (max(maxrbar, rhotemp) / min(minrbar, rhotemp))
normar = abs(zetabar)
normx = norm(x)
test1 = (normr / normb)
if ((normA * normr) != 0):
test2 = (normar / (normA * normr))
else:
test2 = infty
test3 = (1 / condA)
t1 = (test1 / (1 + ((normA * normx) / normb)))
rtol = (btol + (((atol * normA) * normx) / normb))
if (itn >= maxiter):
istop = 7
if ((1 + test3) <= 1):
istop = 6
if ((1 + test2) <= 1):
istop = 5
if ((1 + t1) <= 1):
istop = 4
if (test3 <= ctol):
istop = 3
if (test2 <= atol):
istop = 2
if (test1 <= rtol):
istop = 1
if show:
if ((n <= 40) or (itn <= 10) or (itn >= (maxiter - 10)) or ((itn % 10) == 0) or (test3 <= (1.1 * ctol)) or (test2 <= (1.1 * atol)) or (test1 <= (1.1 * rtol)) or (istop != 0)):
if (pcount >= pfreq):
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = (pcount + 1)
str1 = ('%6g %12.5e' % (itn, x[0]))
str2 = (' %10.3e %10.3e' % (normr, normar))
str3 = (' %8.1e %8.1e' % (test1, test2))
str4 = (' %8.1e %8.1e' % (normA, condA))
print(.join([str1, str2, str3, str4]))
if (istop > 0):
break
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print(('istop =%8g normr =%8.1e' % (istop, normr)))
print((' normA =%8.1e normAr =%8.1e' % (normA, normar)))
print(('itn =%8g condA =%8.1e' % (itn, condA)))
print((' normx =%8.1e' % normx))
print(str1, str2)
print(str3, str4)
return (x, istop, itn, normr, normar, normA, condA, normx) | def lsmr(A, b, damp=0.0, atol=1e-06, btol=1e-06, conlim=100000000.0, maxiter=None, show=False):
'Iterative solver for least-squares problems.\n\n lsmr solves the system of linear equations ``Ax = b``. If the system\n is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.\n A is a rectangular matrix of dimension m-by-n, where all cases are\n allowed: m = n, m > n, or m < n. B is a vector of length m.\n The matrix A may be dense or sparse (usually sparse).\n\n Parameters\n ----------\n A : {matrix, sparse matrix, ndarray, LinearOperator}\n Matrix A in the linear system.\n b : array_like, shape (m,)\n Vector b in the linear system.\n damp : float\n Damping factor for regularized least-squares. `lsmr` solves\n the regularized least-squares problem::\n\n min ||(b) - ( A )x||\n ||(0) (damp*I) ||_2\n\n where damp is a scalar. If damp is None or 0, the system\n is solved without regularization.\n atol, btol : float, optional\n Stopping tolerances. `lsmr` continues iterations until a\n certain backward error estimate is smaller than some quantity\n depending on atol and btol. Let ``r = b - Ax`` be the\n residual vector for the current approximate solution ``x``.\n If ``Ax = b`` seems to be consistent, ``lsmr`` terminates\n when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.\n Otherwise, lsmr terminates when ``norm(A^{T} r) <=\n atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),\n the final ``norm(r)`` should be accurate to about 6\n digits. (The final x will usually have fewer correct digits,\n depending on ``cond(A)`` and the size of LAMBDA.) If `atol`\n or `btol` is None, a default value of 1.0e-6 will be used.\n Ideally, they should be estimates of the relative error in the\n entries of A and B respectively. For example, if the entries\n of `A` have 7 correct digits, set atol = 1e-7. This prevents\n the algorithm from doing unnecessary work beyond the\n uncertainty of the input data.\n conlim : float, optional\n `lsmr` terminates if an estimate of ``cond(A)`` exceeds\n `conlim`. For compatible systems ``Ax = b``, conlim could be\n as large as 1.0e+12 (say). For least-squares problems,\n `conlim` should be less than 1.0e+8. If `conlim` is None, the\n default value is 1e+8. Maximum precision can be obtained by\n setting ``atol = btol = conlim = 0``, but the number of\n iterations may then be excessive.\n maxiter : int, optional\n `lsmr` terminates if the number of iterations reaches\n `maxiter`. The default is ``maxiter = min(m, n)``. For\n ill-conditioned systems, a larger value of `maxiter` may be\n needed.\n show : bool, optional\n Print iterations logs if ``show=True``.\n\n Returns\n -------\n x : ndarray of float\n Least-square solution returned.\n istop : int\n istop gives the reason for stopping::\n\n istop = 0 means x=0 is a solution.\n = 1 means x is an approximate solution to A*x = B,\n according to atol and btol.\n = 2 means x approximately solves the least-squares problem\n according to atol.\n = 3 means COND(A) seems to be greater than CONLIM.\n = 4 is the same as 1 with atol = btol = eps (machine\n precision)\n = 5 is the same as 2 with atol = eps.\n = 6 is the same as 3 with CONLIM = 1/eps.\n = 7 means ITN reached maxiter before the other stopping\n conditions were satisfied.\n\n itn : int\n Number of iterations used.\n normr : float\n ``norm(b-Ax)``\n normar : float\n ``norm(A^T (b - Ax))``\n norma : float\n ``norm(A)``\n conda : float\n Condition number of A.\n normx : float\n ``norm(x)``\n\n Notes\n -----\n\n .. versionadded:: 0.11.0\n\n References\n ----------\n .. [1] D. C.-L. Fong and M. A. Saunders,\n "LSMR: An iterative algorithm for sparse least-squares problems",\n SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.\n http://arxiv.org/abs/1006.0758\n .. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/\n\n '
A = aslinearoperator(A)
b = atleast_1d(b)
if (b.ndim > 1):
b = b.squeeze()
msg = ('The exact solution is x = 0 ', 'Ax - b is small enough, given atol, btol ', 'The least-squares solution is good enough, given atol ', 'The estimate of cond(Abar) has exceeded conlim ', 'Ax - b is small enough for this machine ', 'The least-squares solution is good enough for this machine', 'Cond(Abar) seems to be too large for this machine ', 'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm Ar'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20
pcount = 0
(m, n) = A.shape
minDim = min([m, n])
if (maxiter is None):
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(('The matrix A has %8g rows and %8g cols' % (m, n)))
print(('damp = %20.14e\n' % damp))
print(('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)))
print(('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)))
u = b
beta = norm(u)
v = zeros(n)
alpha = 0
if (beta > 0):
u = ((1 / beta) * u)
v = A.rmatvec(u)
alpha = norm(v)
if (alpha > 0):
v = ((1 / alpha) * v)
itn = 0
zetabar = (alpha * beta)
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n)
x = zeros(n)
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
normA2 = (alpha * alpha)
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
normb = beta
istop = 0
ctol = 0
if (conlim > 0):
ctol = (1 / conlim)
normr = beta
normar = (alpha * beta)
if (normar == 0):
if show:
print(msg[0])
return (x, istop, itn, normr, normar, normA, condA, normx)
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = (alpha / beta)
str1 = ('%6g %12.5e' % (itn, x[0]))
str2 = (' %10.3e %10.3e' % (normr, normar))
str3 = (' %8.1e %8.1e' % (test1, test2))
print(.join([str1, str2, str3]))
while (itn < maxiter):
itn = (itn + 1)
u = (A.matvec(v) - (alpha * u))
beta = norm(u)
if (beta > 0):
u = ((1 / beta) * u)
v = (A.rmatvec(u) - (beta * v))
alpha = norm(v)
if (alpha > 0):
v = ((1 / alpha) * v)
(chat, shat, alphahat) = _sym_ortho(alphabar, damp)
rhoold = rho
(c, s, rho) = _sym_ortho(alphahat, beta)
thetanew = (s * alpha)
alphabar = (c * alpha)
rhobarold = rhobar
zetaold = zeta
thetabar = (sbar * rho)
rhotemp = (cbar * rho)
(cbar, sbar, rhobar) = _sym_ortho((cbar * rho), thetanew)
zeta = (cbar * zetabar)
zetabar = ((- sbar) * zetabar)
hbar = (h - (((thetabar * rho) / (rhoold * rhobarold)) * hbar))
x = (x + ((zeta / (rho * rhobar)) * hbar))
h = (v - ((thetanew / rho) * h))
betaacute = (chat * betadd)
betacheck = ((- shat) * betadd)
betahat = (c * betaacute)
betadd = ((- s) * betaacute)
thetatildeold = thetatilde
(ctildeold, stildeold, rhotildeold) = _sym_ortho(rhodold, thetabar)
thetatilde = (stildeold * rhobar)
rhodold = (ctildeold * rhobar)
betad = (((- stildeold) * betad) + (ctildeold * betahat))
tautildeold = ((zetaold - (thetatildeold * tautildeold)) / rhotildeold)
taud = ((zeta - (thetatilde * tautildeold)) / rhodold)
d = (d + (betacheck * betacheck))
normr = sqrt(((d + ((betad - taud) ** 2)) + (betadd * betadd)))
normA2 = (normA2 + (beta * beta))
normA = sqrt(normA2)
normA2 = (normA2 + (alpha * alpha))
maxrbar = max(maxrbar, rhobarold)
if (itn > 1):
minrbar = min(minrbar, rhobarold)
condA = (max(maxrbar, rhotemp) / min(minrbar, rhotemp))
normar = abs(zetabar)
normx = norm(x)
test1 = (normr / normb)
if ((normA * normr) != 0):
test2 = (normar / (normA * normr))
else:
test2 = infty
test3 = (1 / condA)
t1 = (test1 / (1 + ((normA * normx) / normb)))
rtol = (btol + (((atol * normA) * normx) / normb))
if (itn >= maxiter):
istop = 7
if ((1 + test3) <= 1):
istop = 6
if ((1 + test2) <= 1):
istop = 5
if ((1 + t1) <= 1):
istop = 4
if (test3 <= ctol):
istop = 3
if (test2 <= atol):
istop = 2
if (test1 <= rtol):
istop = 1
if show:
if ((n <= 40) or (itn <= 10) or (itn >= (maxiter - 10)) or ((itn % 10) == 0) or (test3 <= (1.1 * ctol)) or (test2 <= (1.1 * atol)) or (test1 <= (1.1 * rtol)) or (istop != 0)):
if (pcount >= pfreq):
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = (pcount + 1)
str1 = ('%6g %12.5e' % (itn, x[0]))
str2 = (' %10.3e %10.3e' % (normr, normar))
str3 = (' %8.1e %8.1e' % (test1, test2))
str4 = (' %8.1e %8.1e' % (normA, condA))
print(.join([str1, str2, str3, str4]))
if (istop > 0):
break
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print(('istop =%8g normr =%8.1e' % (istop, normr)))
print((' normA =%8.1e normAr =%8.1e' % (normA, normar)))
print(('itn =%8g condA =%8.1e' % (itn, condA)))
print((' normx =%8.1e' % normx))
print(str1, str2)
print(str3, str4)
return (x, istop, itn, normr, normar, normA, condA, normx)<|docstring|>Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
b : array_like, shape (m,)
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^{T} r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool, optional
Print iterations logs if ``show=True``.
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^T (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
http://arxiv.org/abs/1006.0758
.. [2] LSMR Software, http://web.stanford.edu/group/SOL/software/lsmr/<|endoftext|> |
48e73c6642d4969173a05f3fc51bbe7fdc6fbbf53063fbf62f70856038edb460 | def _ensure_indexes(self, collection):
'Ensures that all indexes are created.'
collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True)
collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) | Ensures that all indexes are created. | zaqar/storage/mongodb/messages.py | _ensure_indexes | openstack/zaqar | 97 | python | def _ensure_indexes(self, collection):
collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True)
collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) | def _ensure_indexes(self, collection):
collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True)
collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True)<|docstring|>Ensures that all indexes are created.<|endoftext|> |
e6b1cb1650298f935ae214834a3b9d051502e175e25dbfa798649470c34de14b | def _collection(self, queue_name, project=None):
'Get a partitioned collection instance.'
return self._collections[utils.get_partition(self._num_partitions, queue_name, project)] | Get a partitioned collection instance. | zaqar/storage/mongodb/messages.py | _collection | openstack/zaqar | 97 | python | def _collection(self, queue_name, project=None):
return self._collections[utils.get_partition(self._num_partitions, queue_name, project)] | def _collection(self, queue_name, project=None):
return self._collections[utils.get_partition(self._num_partitions, queue_name, project)]<|docstring|>Get a partitioned collection instance.<|endoftext|> |
82275fb60a28e5f727ccfdd5ce919ac3cc99e2a630575b597e66cdd60528d0ee | def _backoff_sleep(self, attempt):
'Sleep between retries using a jitter algorithm.\n\n Mitigates thrashing between multiple parallel requests, and\n creates backpressure on clients to slow down the rate\n at which they submit requests.\n\n :param attempt: current attempt number, zero-based\n '
conf = self.driver.mongodb_conf
seconds = utils.calculate_backoff(attempt, conf.max_attempts, conf.max_retry_sleep, conf.max_retry_jitter)
time.sleep(seconds) | Sleep between retries using a jitter algorithm.
Mitigates thrashing between multiple parallel requests, and
creates backpressure on clients to slow down the rate
at which they submit requests.
:param attempt: current attempt number, zero-based | zaqar/storage/mongodb/messages.py | _backoff_sleep | openstack/zaqar | 97 | python | def _backoff_sleep(self, attempt):
'Sleep between retries using a jitter algorithm.\n\n Mitigates thrashing between multiple parallel requests, and\n creates backpressure on clients to slow down the rate\n at which they submit requests.\n\n :param attempt: current attempt number, zero-based\n '
conf = self.driver.mongodb_conf
seconds = utils.calculate_backoff(attempt, conf.max_attempts, conf.max_retry_sleep, conf.max_retry_jitter)
time.sleep(seconds) | def _backoff_sleep(self, attempt):
'Sleep between retries using a jitter algorithm.\n\n Mitigates thrashing between multiple parallel requests, and\n creates backpressure on clients to slow down the rate\n at which they submit requests.\n\n :param attempt: current attempt number, zero-based\n '
conf = self.driver.mongodb_conf
seconds = utils.calculate_backoff(attempt, conf.max_attempts, conf.max_retry_sleep, conf.max_retry_jitter)
time.sleep(seconds)<|docstring|>Sleep between retries using a jitter algorithm.
Mitigates thrashing between multiple parallel requests, and
creates backpressure on clients to slow down the rate
at which they submit requests.
:param attempt: current attempt number, zero-based<|endoftext|> |
b8ee61734b5b8b557870f868a4962e9f36ffd6aba39c1e52cb156897748777e9 | def _purge_queue(self, queue_name, project=None):
'Removes all messages from the queue.\n\n Warning: Only use this when deleting the queue; otherwise\n you can cause a side-effect of reseting the marker counter\n which can cause clients to miss tons of messages.\n\n If the queue does not exist, this method fails silently.\n\n :param queue_name: name of the queue to purge\n :param project: ID of the project to which the queue belongs\n '
scope = utils.scope_queue_name(queue_name, project)
collection = self._collection(queue_name, project)
collection.delete_many({PROJ_QUEUE: scope}) | Removes all messages from the queue.
Warning: Only use this when deleting the queue; otherwise
you can cause a side-effect of reseting the marker counter
which can cause clients to miss tons of messages.
If the queue does not exist, this method fails silently.
:param queue_name: name of the queue to purge
:param project: ID of the project to which the queue belongs | zaqar/storage/mongodb/messages.py | _purge_queue | openstack/zaqar | 97 | python | def _purge_queue(self, queue_name, project=None):
'Removes all messages from the queue.\n\n Warning: Only use this when deleting the queue; otherwise\n you can cause a side-effect of reseting the marker counter\n which can cause clients to miss tons of messages.\n\n If the queue does not exist, this method fails silently.\n\n :param queue_name: name of the queue to purge\n :param project: ID of the project to which the queue belongs\n '
scope = utils.scope_queue_name(queue_name, project)
collection = self._collection(queue_name, project)
collection.delete_many({PROJ_QUEUE: scope}) | def _purge_queue(self, queue_name, project=None):
'Removes all messages from the queue.\n\n Warning: Only use this when deleting the queue; otherwise\n you can cause a side-effect of reseting the marker counter\n which can cause clients to miss tons of messages.\n\n If the queue does not exist, this method fails silently.\n\n :param queue_name: name of the queue to purge\n :param project: ID of the project to which the queue belongs\n '
scope = utils.scope_queue_name(queue_name, project)
collection = self._collection(queue_name, project)
collection.delete_many({PROJ_QUEUE: scope})<|docstring|>Removes all messages from the queue.
Warning: Only use this when deleting the queue; otherwise
you can cause a side-effect of reseting the marker counter
which can cause clients to miss tons of messages.
If the queue does not exist, this method fails silently.
:param queue_name: name of the queue to purge
:param project: ID of the project to which the queue belongs<|endoftext|> |
bd89ea52e898f2f804ab7542c07c4a5bae1b73baa022900bf008a9be02351e14 | def _list(self, queue_name, project=None, marker=None, echo=False, client_uuid=None, projection=None, include_claimed=False, include_delayed=False, sort=1, limit=None):
'Message document listing helper.\n\n :param queue_name: Name of the queue to list\n :param project: (Default None) Project `queue_name` belongs to. If\n not specified, queries the "global" namespace/project.\n :param marker: (Default None) Message marker from which to start\n iterating. If not specified, starts with the first message\n available in the queue.\n :param echo: (Default False) Whether to return messages that match\n client_uuid\n :param client_uuid: (Default None) UUID for the client that\n originated this request\n :param projection: (Default None) a list of field names that should be\n returned in the result set or a dict specifying the fields to\n include or exclude\n :param include_claimed: (Default False) Whether to include\n claimed messages, not just active ones\n :param include_delayed: (Default False) Whether to include\n delayed messages, not just active ones\n :param sort: (Default 1) Sort order for the listing. Pass 1 for\n ascending (oldest message first), or -1 for descending (newest\n message first).\n :param limit: (Default None) The maximum number of messages\n to list. The results may include fewer messages than the\n requested `limit` if not enough are available. If limit is\n not specified\n\n :returns: Generator yielding up to `limit` messages.\n '
if (sort not in (1, (- 1))):
raise ValueError(u'sort must be either 1 (ascending) or -1 (descending)')
now = timeutils.utcnow_ts()
query = {PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'tx': None}
if (not echo):
query['u'] = {'$ne': client_uuid}
if (marker is not None):
query['k'] = {'$gt': marker}
collection = self._collection(queue_name, project)
if (not include_claimed):
query['c.e'] = {'$lte': now}
if (not include_delayed):
query['$or'] = [{'d': {'$lte': now}}, {'d': {'$exists': False}}]
cursor = collection.find(query, projection=projection, sort=[('k', sort)])
if (limit is not None):
cursor.limit(limit)
return cursor.hint(ACTIVE_INDEX_FIELDS) | Message document listing helper.
:param queue_name: Name of the queue to list
:param project: (Default None) Project `queue_name` belongs to. If
not specified, queries the "global" namespace/project.
:param marker: (Default None) Message marker from which to start
iterating. If not specified, starts with the first message
available in the queue.
:param echo: (Default False) Whether to return messages that match
client_uuid
:param client_uuid: (Default None) UUID for the client that
originated this request
:param projection: (Default None) a list of field names that should be
returned in the result set or a dict specifying the fields to
include or exclude
:param include_claimed: (Default False) Whether to include
claimed messages, not just active ones
:param include_delayed: (Default False) Whether to include
delayed messages, not just active ones
:param sort: (Default 1) Sort order for the listing. Pass 1 for
ascending (oldest message first), or -1 for descending (newest
message first).
:param limit: (Default None) The maximum number of messages
to list. The results may include fewer messages than the
requested `limit` if not enough are available. If limit is
not specified
:returns: Generator yielding up to `limit` messages. | zaqar/storage/mongodb/messages.py | _list | openstack/zaqar | 97 | python | def _list(self, queue_name, project=None, marker=None, echo=False, client_uuid=None, projection=None, include_claimed=False, include_delayed=False, sort=1, limit=None):
'Message document listing helper.\n\n :param queue_name: Name of the queue to list\n :param project: (Default None) Project `queue_name` belongs to. If\n not specified, queries the "global" namespace/project.\n :param marker: (Default None) Message marker from which to start\n iterating. If not specified, starts with the first message\n available in the queue.\n :param echo: (Default False) Whether to return messages that match\n client_uuid\n :param client_uuid: (Default None) UUID for the client that\n originated this request\n :param projection: (Default None) a list of field names that should be\n returned in the result set or a dict specifying the fields to\n include or exclude\n :param include_claimed: (Default False) Whether to include\n claimed messages, not just active ones\n :param include_delayed: (Default False) Whether to include\n delayed messages, not just active ones\n :param sort: (Default 1) Sort order for the listing. Pass 1 for\n ascending (oldest message first), or -1 for descending (newest\n message first).\n :param limit: (Default None) The maximum number of messages\n to list. The results may include fewer messages than the\n requested `limit` if not enough are available. If limit is\n not specified\n\n :returns: Generator yielding up to `limit` messages.\n '
if (sort not in (1, (- 1))):
raise ValueError(u'sort must be either 1 (ascending) or -1 (descending)')
now = timeutils.utcnow_ts()
query = {PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'tx': None}
if (not echo):
query['u'] = {'$ne': client_uuid}
if (marker is not None):
query['k'] = {'$gt': marker}
collection = self._collection(queue_name, project)
if (not include_claimed):
query['c.e'] = {'$lte': now}
if (not include_delayed):
query['$or'] = [{'d': {'$lte': now}}, {'d': {'$exists': False}}]
cursor = collection.find(query, projection=projection, sort=[('k', sort)])
if (limit is not None):
cursor.limit(limit)
return cursor.hint(ACTIVE_INDEX_FIELDS) | def _list(self, queue_name, project=None, marker=None, echo=False, client_uuid=None, projection=None, include_claimed=False, include_delayed=False, sort=1, limit=None):
'Message document listing helper.\n\n :param queue_name: Name of the queue to list\n :param project: (Default None) Project `queue_name` belongs to. If\n not specified, queries the "global" namespace/project.\n :param marker: (Default None) Message marker from which to start\n iterating. If not specified, starts with the first message\n available in the queue.\n :param echo: (Default False) Whether to return messages that match\n client_uuid\n :param client_uuid: (Default None) UUID for the client that\n originated this request\n :param projection: (Default None) a list of field names that should be\n returned in the result set or a dict specifying the fields to\n include or exclude\n :param include_claimed: (Default False) Whether to include\n claimed messages, not just active ones\n :param include_delayed: (Default False) Whether to include\n delayed messages, not just active ones\n :param sort: (Default 1) Sort order for the listing. Pass 1 for\n ascending (oldest message first), or -1 for descending (newest\n message first).\n :param limit: (Default None) The maximum number of messages\n to list. The results may include fewer messages than the\n requested `limit` if not enough are available. If limit is\n not specified\n\n :returns: Generator yielding up to `limit` messages.\n '
if (sort not in (1, (- 1))):
raise ValueError(u'sort must be either 1 (ascending) or -1 (descending)')
now = timeutils.utcnow_ts()
query = {PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'tx': None}
if (not echo):
query['u'] = {'$ne': client_uuid}
if (marker is not None):
query['k'] = {'$gt': marker}
collection = self._collection(queue_name, project)
if (not include_claimed):
query['c.e'] = {'$lte': now}
if (not include_delayed):
query['$or'] = [{'d': {'$lte': now}}, {'d': {'$exists': False}}]
cursor = collection.find(query, projection=projection, sort=[('k', sort)])
if (limit is not None):
cursor.limit(limit)
return cursor.hint(ACTIVE_INDEX_FIELDS)<|docstring|>Message document listing helper.
:param queue_name: Name of the queue to list
:param project: (Default None) Project `queue_name` belongs to. If
not specified, queries the "global" namespace/project.
:param marker: (Default None) Message marker from which to start
iterating. If not specified, starts with the first message
available in the queue.
:param echo: (Default False) Whether to return messages that match
client_uuid
:param client_uuid: (Default None) UUID for the client that
originated this request
:param projection: (Default None) a list of field names that should be
returned in the result set or a dict specifying the fields to
include or exclude
:param include_claimed: (Default False) Whether to include
claimed messages, not just active ones
:param include_delayed: (Default False) Whether to include
delayed messages, not just active ones
:param sort: (Default 1) Sort order for the listing. Pass 1 for
ascending (oldest message first), or -1 for descending (newest
message first).
:param limit: (Default None) The maximum number of messages
to list. The results may include fewer messages than the
requested `limit` if not enough are available. If limit is
not specified
:returns: Generator yielding up to `limit` messages.<|endoftext|> |
8ce7b4ae0ad22da0089109a94faf78281c2e0699053d1ac64dc8e53f1e603c8c | def _count(self, queue_name, project=None, include_claimed=False):
"Return total number of messages in a queue.\n\n This method is designed to very quickly count the number\n of messages in a given queue. Expired messages are not\n counted, of course. If the queue does not exist, the\n count will always be 0.\n\n Note: Some expired messages may be included in the count if\n they haven't been GC'd yet. This is done for performance.\n "
query = {PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'tx': None}
if (not include_claimed):
query['c.e'] = {'$lte': timeutils.utcnow_ts()}
collection = self._collection(queue_name, project)
return collection.count(filter=query, hint=COUNTING_INDEX_FIELDS) | Return total number of messages in a queue.
This method is designed to very quickly count the number
of messages in a given queue. Expired messages are not
counted, of course. If the queue does not exist, the
count will always be 0.
Note: Some expired messages may be included in the count if
they haven't been GC'd yet. This is done for performance. | zaqar/storage/mongodb/messages.py | _count | openstack/zaqar | 97 | python | def _count(self, queue_name, project=None, include_claimed=False):
"Return total number of messages in a queue.\n\n This method is designed to very quickly count the number\n of messages in a given queue. Expired messages are not\n counted, of course. If the queue does not exist, the\n count will always be 0.\n\n Note: Some expired messages may be included in the count if\n they haven't been GC'd yet. This is done for performance.\n "
query = {PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'tx': None}
if (not include_claimed):
query['c.e'] = {'$lte': timeutils.utcnow_ts()}
collection = self._collection(queue_name, project)
return collection.count(filter=query, hint=COUNTING_INDEX_FIELDS) | def _count(self, queue_name, project=None, include_claimed=False):
"Return total number of messages in a queue.\n\n This method is designed to very quickly count the number\n of messages in a given queue. Expired messages are not\n counted, of course. If the queue does not exist, the\n count will always be 0.\n\n Note: Some expired messages may be included in the count if\n they haven't been GC'd yet. This is done for performance.\n "
query = {PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'tx': None}
if (not include_claimed):
query['c.e'] = {'$lte': timeutils.utcnow_ts()}
collection = self._collection(queue_name, project)
return collection.count(filter=query, hint=COUNTING_INDEX_FIELDS)<|docstring|>Return total number of messages in a queue.
This method is designed to very quickly count the number
of messages in a given queue. Expired messages are not
counted, of course. If the queue does not exist, the
count will always be 0.
Note: Some expired messages may be included in the count if
they haven't been GC'd yet. This is done for performance.<|endoftext|> |
71d5e2d1c027006ebd9c3e844058a9b039187fb6911020c3a2f2f9748da1c6d1 | def _inc_counter(self, queue_name, project=None, amount=1, window=None):
"Increments the message counter and returns the new value.\n\n :param queue_name: Name of the queue to which the counter is scoped\n :param project: Queue's project name\n :param amount: (Default 1) Amount by which to increment the counter\n :param window: (Default None) A time window, in seconds, that\n must have elapsed since the counter was last updated, in\n order to increment the counter.\n\n :returns: Updated message counter value, or None if window\n was specified, and the counter has already been updated\n within the specified time period.\n\n :raises QueueDoesNotExist: if not found\n "
if hasattr(self._queue_ctrl, '_inc_counter'):
return self._queue_ctrl._inc_counter(queue_name, project, amount, window)
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(queue_name, project)
if (window is not None):
threshold = (now - window)
query['c.t'] = {'$lt': threshold}
while True:
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(query, update, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error')
if (doc is None):
if (window is None):
message = u'Failed to increment the message counter for queue %(name)s and project %(project)s'
message %= dict(name=queue_name, project=project)
LOG.warning(message)
raise errors.QueueDoesNotExist(queue_name, project)
return None
return doc['c']['v'] | Increments the message counter and returns the new value.
:param queue_name: Name of the queue to which the counter is scoped
:param project: Queue's project name
:param amount: (Default 1) Amount by which to increment the counter
:param window: (Default None) A time window, in seconds, that
must have elapsed since the counter was last updated, in
order to increment the counter.
:returns: Updated message counter value, or None if window
was specified, and the counter has already been updated
within the specified time period.
:raises QueueDoesNotExist: if not found | zaqar/storage/mongodb/messages.py | _inc_counter | openstack/zaqar | 97 | python | def _inc_counter(self, queue_name, project=None, amount=1, window=None):
"Increments the message counter and returns the new value.\n\n :param queue_name: Name of the queue to which the counter is scoped\n :param project: Queue's project name\n :param amount: (Default 1) Amount by which to increment the counter\n :param window: (Default None) A time window, in seconds, that\n must have elapsed since the counter was last updated, in\n order to increment the counter.\n\n :returns: Updated message counter value, or None if window\n was specified, and the counter has already been updated\n within the specified time period.\n\n :raises QueueDoesNotExist: if not found\n "
if hasattr(self._queue_ctrl, '_inc_counter'):
return self._queue_ctrl._inc_counter(queue_name, project, amount, window)
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(queue_name, project)
if (window is not None):
threshold = (now - window)
query['c.t'] = {'$lt': threshold}
while True:
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(query, update, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error')
if (doc is None):
if (window is None):
message = u'Failed to increment the message counter for queue %(name)s and project %(project)s'
message %= dict(name=queue_name, project=project)
LOG.warning(message)
raise errors.QueueDoesNotExist(queue_name, project)
return None
return doc['c']['v'] | def _inc_counter(self, queue_name, project=None, amount=1, window=None):
"Increments the message counter and returns the new value.\n\n :param queue_name: Name of the queue to which the counter is scoped\n :param project: Queue's project name\n :param amount: (Default 1) Amount by which to increment the counter\n :param window: (Default None) A time window, in seconds, that\n must have elapsed since the counter was last updated, in\n order to increment the counter.\n\n :returns: Updated message counter value, or None if window\n was specified, and the counter has already been updated\n within the specified time period.\n\n :raises QueueDoesNotExist: if not found\n "
if hasattr(self._queue_ctrl, '_inc_counter'):
return self._queue_ctrl._inc_counter(queue_name, project, amount, window)
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(queue_name, project)
if (window is not None):
threshold = (now - window)
query['c.t'] = {'$lt': threshold}
while True:
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(query, update, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error')
if (doc is None):
if (window is None):
message = u'Failed to increment the message counter for queue %(name)s and project %(project)s'
message %= dict(name=queue_name, project=project)
LOG.warning(message)
raise errors.QueueDoesNotExist(queue_name, project)
return None
return doc['c']['v']<|docstring|>Increments the message counter and returns the new value.
:param queue_name: Name of the queue to which the counter is scoped
:param project: Queue's project name
:param amount: (Default 1) Amount by which to increment the counter
:param window: (Default None) A time window, in seconds, that
must have elapsed since the counter was last updated, in
order to increment the counter.
:returns: Updated message counter value, or None if window
was specified, and the counter has already been updated
within the specified time period.
:raises QueueDoesNotExist: if not found<|endoftext|> |
240258ce5d9c039fbb53d87aad8ac6d074022932adff869b1c238ae0ec61f465 | def _get_counter(self, queue_name, project=None):
"Retrieves the current message counter value for a given queue.\n\n This helper is used to generate monotonic pagination\n markers that are saved as part of the message\n document.\n\n Note 1: Markers are scoped per-queue and so are *not*\n globally unique or globally ordered.\n\n Note 2: If two or more requests to this method are made\n in parallel, this method will return the same counter\n value. This is done intentionally so that the caller\n can detect a parallel message post, allowing it to\n mitigate race conditions between producer and\n observer clients.\n\n :param queue_name: Name of the queue to which the counter is scoped\n :param project: Queue's project\n :returns: current message counter as an integer\n "
if hasattr(self._queue_ctrl, '_get_counter'):
return self._queue_ctrl._get_counter(queue_name, project)
update = {'$inc': {'c.v': 0, 'c.t': 0}}
query = _get_scoped_query(queue_name, project)
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(query, update, upsert=True, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0})
return doc['c']['v']
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error') | Retrieves the current message counter value for a given queue.
This helper is used to generate monotonic pagination
markers that are saved as part of the message
document.
Note 1: Markers are scoped per-queue and so are *not*
globally unique or globally ordered.
Note 2: If two or more requests to this method are made
in parallel, this method will return the same counter
value. This is done intentionally so that the caller
can detect a parallel message post, allowing it to
mitigate race conditions between producer and
observer clients.
:param queue_name: Name of the queue to which the counter is scoped
:param project: Queue's project
:returns: current message counter as an integer | zaqar/storage/mongodb/messages.py | _get_counter | openstack/zaqar | 97 | python | def _get_counter(self, queue_name, project=None):
"Retrieves the current message counter value for a given queue.\n\n This helper is used to generate monotonic pagination\n markers that are saved as part of the message\n document.\n\n Note 1: Markers are scoped per-queue and so are *not*\n globally unique or globally ordered.\n\n Note 2: If two or more requests to this method are made\n in parallel, this method will return the same counter\n value. This is done intentionally so that the caller\n can detect a parallel message post, allowing it to\n mitigate race conditions between producer and\n observer clients.\n\n :param queue_name: Name of the queue to which the counter is scoped\n :param project: Queue's project\n :returns: current message counter as an integer\n "
if hasattr(self._queue_ctrl, '_get_counter'):
return self._queue_ctrl._get_counter(queue_name, project)
update = {'$inc': {'c.v': 0, 'c.t': 0}}
query = _get_scoped_query(queue_name, project)
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(query, update, upsert=True, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0})
return doc['c']['v']
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error') | def _get_counter(self, queue_name, project=None):
"Retrieves the current message counter value for a given queue.\n\n This helper is used to generate monotonic pagination\n markers that are saved as part of the message\n document.\n\n Note 1: Markers are scoped per-queue and so are *not*\n globally unique or globally ordered.\n\n Note 2: If two or more requests to this method are made\n in parallel, this method will return the same counter\n value. This is done intentionally so that the caller\n can detect a parallel message post, allowing it to\n mitigate race conditions between producer and\n observer clients.\n\n :param queue_name: Name of the queue to which the counter is scoped\n :param project: Queue's project\n :returns: current message counter as an integer\n "
if hasattr(self._queue_ctrl, '_get_counter'):
return self._queue_ctrl._get_counter(queue_name, project)
update = {'$inc': {'c.v': 0, 'c.t': 0}}
query = _get_scoped_query(queue_name, project)
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(query, update, upsert=True, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0})
return doc['c']['v']
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error')<|docstring|>Retrieves the current message counter value for a given queue.
This helper is used to generate monotonic pagination
markers that are saved as part of the message
document.
Note 1: Markers are scoped per-queue and so are *not*
globally unique or globally ordered.
Note 2: If two or more requests to this method are made
in parallel, this method will return the same counter
value. This is done intentionally so that the caller
can detect a parallel message post, allowing it to
mitigate race conditions between producer and
observer clients.
:param queue_name: Name of the queue to which the counter is scoped
:param project: Queue's project
:returns: current message counter as an integer<|endoftext|> |
21f7fc0de37ea50f1b97734bee476e3bfad1285d5e4bfa3855ec2d8b3bfa2188 | def _ensure_indexes(self, collection):
'Ensures that all indexes are created.'
collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True)
collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', unique=True, background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) | Ensures that all indexes are created. | zaqar/storage/mongodb/messages.py | _ensure_indexes | openstack/zaqar | 97 | python | def _ensure_indexes(self, collection):
collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True)
collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', unique=True, background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) | def _ensure_indexes(self, collection):
collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True)
collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', unique=True, background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True)<|docstring|>Ensures that all indexes are created.<|endoftext|> |
0333d062075838fdd345612fc22d7c311acdbd1101a8d9175cd1d27f3f7d0d35 | def __init__(self, width=512, height=512, vertical_fov=90, device_idx=0, rendering_settings=MeshRendererSettings(), simulator=None):
'\n :param width: width of the renderer output\n :param height: width of the renderer output\n :param vertical_fov: vertical field of view for the renderer\n :param device_idx: which GPU to run the renderer on\n :param render_settings: rendering settings\n :param simulator: Simulator object.\n '
self.simulator = simulator
self.rendering_settings = rendering_settings
self.shaderProgram = None
self.windowShaderProgram = None
self.fbo = None
(self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d) = (None, None, None, None)
(self.color_tex_scene_flow, self.color_tex_optical_flow, self.color_tex_ins_seg) = (None, None, None)
self.depth_tex = None
self.VAOs = []
self.VBOs = []
self.textures = []
self.objects = []
self.visual_objects = []
self.vertex_data = []
self.shapes = []
self.width = width
self.height = height
self.faces = []
self.instances = []
self.fisheye = rendering_settings.use_fisheye
self.optimized = rendering_settings.optimized
self.texture_files = {}
self.enable_shadow = rendering_settings.enable_shadow
self.platform = platform.system()
self.optimization_process_executed = False
self.pose_trans_array = None
self.pose_rot_array = None
self.last_trans_array = None
self.last_rot_array = None
self.text_manager = TextManager(self)
self.texts = []
device = None
'\n device_idx is the major id\n device is the minor id\n you can get it from nvidia-smi -a\n\n The minor number for the device is such that the Nvidia device node file for each GPU will have the form\n /dev/nvidia[minor number]. Available only on Linux platform.\n\n TODO: add device management for windows platform.\n '
if os.environ.get('GIBSON_DEVICE_ID', None):
device = int(os.environ.get('GIBSON_DEVICE_ID'))
logging.info('GIBSON_DEVICE_ID environment variable has been manually set. Using device {} for rendering'.format(device))
elif (self.platform != 'Windows'):
available_devices = get_available_devices()
if (device_idx < len(available_devices)):
device = available_devices[device_idx]
logging.info('Using device {} for rendering'.format(device))
else:
logging.info('Device index is larger than number of devices, falling back to use 0')
logging.info('If you have trouble using EGL, please visit our trouble shooting guide', 'at http://svl.stanford.edu/igibson/docs/issues.html')
device = 0
self.device_idx = device_idx
self.device_minor = device
self.msaa = rendering_settings.msaa
if ((self.platform == 'Darwin') and self.optimized):
logging.error('Optimized renderer is not supported on Mac')
exit()
if (self.platform == 'Darwin'):
from igibson.render.mesh_renderer import GLFWRendererContext
self.r = GLFWRendererContext.GLFWRendererContext(width, height, int(self.rendering_settings.glfw_gl_version[0]), int(self.rendering_settings.glfw_gl_version[1]), self.rendering_settings.show_glfw_window, rendering_settings.fullscreen)
elif (self.platform == 'Windows'):
from igibson.render.mesh_renderer import VRRendererContext
self.r = VRRendererContext.VRRendererContext(width, height, int(self.rendering_settings.glfw_gl_version[0]), int(self.rendering_settings.glfw_gl_version[1]), self.rendering_settings.show_glfw_window, rendering_settings.fullscreen)
else:
from igibson.render.mesh_renderer import EGLRendererContext
self.r = EGLRendererContext.EGLRendererContext(width, height, device)
self.r.init()
self.glstring = self.r.getstring_meshrenderer()
logging.debug('Rendering device and GL version')
logging.debug(self.glstring)
self.colors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.lightcolor = [1, 1, 1]
logging.debug('Is using fisheye camera: {}'.format(self.fisheye))
if self.fisheye:
logging.error('Fisheye is currently not supported.')
exit(1)
else:
if (self.platform == 'Darwin'):
self.shaderProgram = self.r.compile_shader_meshrenderer(''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'vert.shader')).readlines()), ''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'frag.shader')).readlines()))
self.textShaderProgram = self.r.compile_shader_meshrenderer(''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'text_vert.shader')).readlines()), ''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'text_frag.shader')).readlines()))
else:
if self.optimized:
self.shaderProgram = self.r.compile_shader_meshrenderer(''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'optimized_vert.shader')).readlines()), ''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'optimized_frag.shader')).readlines()))
else:
self.shaderProgram = self.r.compile_shader_meshrenderer(''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'vert.shader')).readlines()), ''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'frag.shader')).readlines()))
self.textShaderProgram = self.r.compile_shader_meshrenderer(''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'text_vert.shader')).readlines()), ''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'text_frag.shader')).readlines()))
self.skyboxShaderProgram = self.r.compile_shader_meshrenderer(''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'skybox_vs.glsl')).readlines()), ''.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'skybox_fs.glsl')).readlines()))
self.set_light_position_direction([0, 0, 2], [0, 0.5, 0])
self.setup_framebuffer()
self.vertical_fov = vertical_fov
self.horizontal_fov = (((2 * np.arctan(((np.tan((((self.vertical_fov / 180.0) * np.pi) / 2.0)) * self.width) / self.height))) / np.pi) * 180.0)
self.camera = [1, 0, 0]
self.target = [0, 0, 0]
self.up = [0, 0, 1]
self.znear = 0.1
self.zfar = 100
P = perspective(self.vertical_fov, (float(self.width) / float(self.height)), self.znear, self.zfar)
V = lookat(self.camera, self.target, up=self.up)
self.V = np.ascontiguousarray(V, np.float32)
self.last_V = np.copy(self.V)
self.cache = np.copy(self.V)
self.P = np.ascontiguousarray(P, np.float32)
self.materials_mapping = {}
self.mesh_materials = []
self.or_buffer_shape_num = 0
self.trans_data = None
self.rot_data = None
self.skybox_size = rendering_settings.skybox_size
if ((not (self.platform == 'Darwin')) and rendering_settings.enable_pbr):
self.setup_pbr()
self.setup_lidar_param()
self.text_manager.gen_text_fbo() | :param width: width of the renderer output
:param height: width of the renderer output
:param vertical_fov: vertical field of view for the renderer
:param device_idx: which GPU to run the renderer on
:param render_settings: rendering settings
:param simulator: Simulator object. | igibson/render/mesh_renderer/mesh_renderer_cpu.py | __init__ | suresh-guttikonda/iGibson | 0 | python | def __init__(self, width=512, height=512, vertical_fov=90, device_idx=0, rendering_settings=MeshRendererSettings(), simulator=None):
'\n :param width: width of the renderer output\n :param height: width of the renderer output\n :param vertical_fov: vertical field of view for the renderer\n :param device_idx: which GPU to run the renderer on\n :param render_settings: rendering settings\n :param simulator: Simulator object.\n '
self.simulator = simulator
self.rendering_settings = rendering_settings
self.shaderProgram = None
self.windowShaderProgram = None
self.fbo = None
(self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d) = (None, None, None, None)
(self.color_tex_scene_flow, self.color_tex_optical_flow, self.color_tex_ins_seg) = (None, None, None)
self.depth_tex = None
self.VAOs = []
self.VBOs = []
self.textures = []
self.objects = []
self.visual_objects = []
self.vertex_data = []
self.shapes = []
self.width = width
self.height = height
self.faces = []
self.instances = []
self.fisheye = rendering_settings.use_fisheye
self.optimized = rendering_settings.optimized
self.texture_files = {}
self.enable_shadow = rendering_settings.enable_shadow
self.platform = platform.system()
self.optimization_process_executed = False
self.pose_trans_array = None
self.pose_rot_array = None
self.last_trans_array = None
self.last_rot_array = None
self.text_manager = TextManager(self)
self.texts = []
device = None
'\n device_idx is the major id\n device is the minor id\n you can get it from nvidia-smi -a\n\n The minor number for the device is such that the Nvidia device node file for each GPU will have the form\n /dev/nvidia[minor number]. Available only on Linux platform.\n\n TODO: add device management for windows platform.\n '
if os.environ.get('GIBSON_DEVICE_ID', None):
device = int(os.environ.get('GIBSON_DEVICE_ID'))
logging.info('GIBSON_DEVICE_ID environment variable has been manually set. Using device {} for rendering'.format(device))
elif (self.platform != 'Windows'):
available_devices = get_available_devices()
if (device_idx < len(available_devices)):
device = available_devices[device_idx]
logging.info('Using device {} for rendering'.format(device))
else:
logging.info('Device index is larger than number of devices, falling back to use 0')
logging.info('If you have trouble using EGL, please visit our trouble shooting guide', 'at http://svl.stanford.edu/igibson/docs/issues.html')
device = 0
self.device_idx = device_idx
self.device_minor = device
self.msaa = rendering_settings.msaa
if ((self.platform == 'Darwin') and self.optimized):
logging.error('Optimized renderer is not supported on Mac')
exit()
if (self.platform == 'Darwin'):
from igibson.render.mesh_renderer import GLFWRendererContext
self.r = GLFWRendererContext.GLFWRendererContext(width, height, int(self.rendering_settings.glfw_gl_version[0]), int(self.rendering_settings.glfw_gl_version[1]), self.rendering_settings.show_glfw_window, rendering_settings.fullscreen)
elif (self.platform == 'Windows'):
from igibson.render.mesh_renderer import VRRendererContext
self.r = VRRendererContext.VRRendererContext(width, height, int(self.rendering_settings.glfw_gl_version[0]), int(self.rendering_settings.glfw_gl_version[1]), self.rendering_settings.show_glfw_window, rendering_settings.fullscreen)
else:
from igibson.render.mesh_renderer import EGLRendererContext
self.r = EGLRendererContext.EGLRendererContext(width, height, device)
self.r.init()
self.glstring = self.r.getstring_meshrenderer()
logging.debug('Rendering device and GL version')
logging.debug(self.glstring)
self.colors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.lightcolor = [1, 1, 1]
logging.debug('Is using fisheye camera: {}'.format(self.fisheye))
if self.fisheye:
logging.error('Fisheye is currently not supported.')
exit(1)
else:
if (self.platform == 'Darwin'):
self.shaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'frag.shader')).readlines()))
self.textShaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'text_vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'text_frag.shader')).readlines()))
else:
if self.optimized:
self.shaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'optimized_vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'optimized_frag.shader')).readlines()))
else:
self.shaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'frag.shader')).readlines()))
self.textShaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'text_vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'text_frag.shader')).readlines()))
self.skyboxShaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'skybox_vs.glsl')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'skybox_fs.glsl')).readlines()))
self.set_light_position_direction([0, 0, 2], [0, 0.5, 0])
self.setup_framebuffer()
self.vertical_fov = vertical_fov
self.horizontal_fov = (((2 * np.arctan(((np.tan((((self.vertical_fov / 180.0) * np.pi) / 2.0)) * self.width) / self.height))) / np.pi) * 180.0)
self.camera = [1, 0, 0]
self.target = [0, 0, 0]
self.up = [0, 0, 1]
self.znear = 0.1
self.zfar = 100
P = perspective(self.vertical_fov, (float(self.width) / float(self.height)), self.znear, self.zfar)
V = lookat(self.camera, self.target, up=self.up)
self.V = np.ascontiguousarray(V, np.float32)
self.last_V = np.copy(self.V)
self.cache = np.copy(self.V)
self.P = np.ascontiguousarray(P, np.float32)
self.materials_mapping = {}
self.mesh_materials = []
self.or_buffer_shape_num = 0
self.trans_data = None
self.rot_data = None
self.skybox_size = rendering_settings.skybox_size
if ((not (self.platform == 'Darwin')) and rendering_settings.enable_pbr):
self.setup_pbr()
self.setup_lidar_param()
self.text_manager.gen_text_fbo() | def __init__(self, width=512, height=512, vertical_fov=90, device_idx=0, rendering_settings=MeshRendererSettings(), simulator=None):
'\n :param width: width of the renderer output\n :param height: width of the renderer output\n :param vertical_fov: vertical field of view for the renderer\n :param device_idx: which GPU to run the renderer on\n :param render_settings: rendering settings\n :param simulator: Simulator object.\n '
self.simulator = simulator
self.rendering_settings = rendering_settings
self.shaderProgram = None
self.windowShaderProgram = None
self.fbo = None
(self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d) = (None, None, None, None)
(self.color_tex_scene_flow, self.color_tex_optical_flow, self.color_tex_ins_seg) = (None, None, None)
self.depth_tex = None
self.VAOs = []
self.VBOs = []
self.textures = []
self.objects = []
self.visual_objects = []
self.vertex_data = []
self.shapes = []
self.width = width
self.height = height
self.faces = []
self.instances = []
self.fisheye = rendering_settings.use_fisheye
self.optimized = rendering_settings.optimized
self.texture_files = {}
self.enable_shadow = rendering_settings.enable_shadow
self.platform = platform.system()
self.optimization_process_executed = False
self.pose_trans_array = None
self.pose_rot_array = None
self.last_trans_array = None
self.last_rot_array = None
self.text_manager = TextManager(self)
self.texts = []
device = None
'\n device_idx is the major id\n device is the minor id\n you can get it from nvidia-smi -a\n\n The minor number for the device is such that the Nvidia device node file for each GPU will have the form\n /dev/nvidia[minor number]. Available only on Linux platform.\n\n TODO: add device management for windows platform.\n '
if os.environ.get('GIBSON_DEVICE_ID', None):
device = int(os.environ.get('GIBSON_DEVICE_ID'))
logging.info('GIBSON_DEVICE_ID environment variable has been manually set. Using device {} for rendering'.format(device))
elif (self.platform != 'Windows'):
available_devices = get_available_devices()
if (device_idx < len(available_devices)):
device = available_devices[device_idx]
logging.info('Using device {} for rendering'.format(device))
else:
logging.info('Device index is larger than number of devices, falling back to use 0')
logging.info('If you have trouble using EGL, please visit our trouble shooting guide', 'at http://svl.stanford.edu/igibson/docs/issues.html')
device = 0
self.device_idx = device_idx
self.device_minor = device
self.msaa = rendering_settings.msaa
if ((self.platform == 'Darwin') and self.optimized):
logging.error('Optimized renderer is not supported on Mac')
exit()
if (self.platform == 'Darwin'):
from igibson.render.mesh_renderer import GLFWRendererContext
self.r = GLFWRendererContext.GLFWRendererContext(width, height, int(self.rendering_settings.glfw_gl_version[0]), int(self.rendering_settings.glfw_gl_version[1]), self.rendering_settings.show_glfw_window, rendering_settings.fullscreen)
elif (self.platform == 'Windows'):
from igibson.render.mesh_renderer import VRRendererContext
self.r = VRRendererContext.VRRendererContext(width, height, int(self.rendering_settings.glfw_gl_version[0]), int(self.rendering_settings.glfw_gl_version[1]), self.rendering_settings.show_glfw_window, rendering_settings.fullscreen)
else:
from igibson.render.mesh_renderer import EGLRendererContext
self.r = EGLRendererContext.EGLRendererContext(width, height, device)
self.r.init()
self.glstring = self.r.getstring_meshrenderer()
logging.debug('Rendering device and GL version')
logging.debug(self.glstring)
self.colors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.lightcolor = [1, 1, 1]
logging.debug('Is using fisheye camera: {}'.format(self.fisheye))
if self.fisheye:
logging.error('Fisheye is currently not supported.')
exit(1)
else:
if (self.platform == 'Darwin'):
self.shaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'frag.shader')).readlines()))
self.textShaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'text_vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'text_frag.shader')).readlines()))
else:
if self.optimized:
self.shaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'optimized_vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'optimized_frag.shader')).readlines()))
else:
self.shaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'frag.shader')).readlines()))
self.textShaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'text_vert.shader')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450', 'text_frag.shader')).readlines()))
self.skyboxShaderProgram = self.r.compile_shader_meshrenderer(.join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'skybox_vs.glsl')).readlines()), .join(open(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '410', 'skybox_fs.glsl')).readlines()))
self.set_light_position_direction([0, 0, 2], [0, 0.5, 0])
self.setup_framebuffer()
self.vertical_fov = vertical_fov
self.horizontal_fov = (((2 * np.arctan(((np.tan((((self.vertical_fov / 180.0) * np.pi) / 2.0)) * self.width) / self.height))) / np.pi) * 180.0)
self.camera = [1, 0, 0]
self.target = [0, 0, 0]
self.up = [0, 0, 1]
self.znear = 0.1
self.zfar = 100
P = perspective(self.vertical_fov, (float(self.width) / float(self.height)), self.znear, self.zfar)
V = lookat(self.camera, self.target, up=self.up)
self.V = np.ascontiguousarray(V, np.float32)
self.last_V = np.copy(self.V)
self.cache = np.copy(self.V)
self.P = np.ascontiguousarray(P, np.float32)
self.materials_mapping = {}
self.mesh_materials = []
self.or_buffer_shape_num = 0
self.trans_data = None
self.rot_data = None
self.skybox_size = rendering_settings.skybox_size
if ((not (self.platform == 'Darwin')) and rendering_settings.enable_pbr):
self.setup_pbr()
self.setup_lidar_param()
self.text_manager.gen_text_fbo()<|docstring|>:param width: width of the renderer output
:param height: width of the renderer output
:param vertical_fov: vertical field of view for the renderer
:param device_idx: which GPU to run the renderer on
:param render_settings: rendering settings
:param simulator: Simulator object.<|endoftext|> |
09a39ff9de30997a1b9c865fef832c0c90487b2cc1c1dcf2661e4ec772a151e7 | def setup_pbr(self):
'\n Set up physics-based rendering\n '
if (os.path.exists(self.rendering_settings.env_texture_filename) or os.path.exists(self.rendering_settings.env_texture_filename2) or os.path.exists(self.rendering_settings.env_texture_filename3)):
self.r.setup_pbr(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450'), self.rendering_settings.env_texture_filename, self.rendering_settings.env_texture_filename2, self.rendering_settings.env_texture_filename3, self.rendering_settings.light_modulation_map_filename, self.rendering_settings.light_dimming_factor)
else:
logging.warning('Environment texture not available, cannot use PBR.')
if self.rendering_settings.enable_pbr:
self.r.loadSkyBox(self.skyboxShaderProgram, self.skybox_size) | Set up physics-based rendering | igibson/render/mesh_renderer/mesh_renderer_cpu.py | setup_pbr | suresh-guttikonda/iGibson | 0 | python | def setup_pbr(self):
'\n \n '
if (os.path.exists(self.rendering_settings.env_texture_filename) or os.path.exists(self.rendering_settings.env_texture_filename2) or os.path.exists(self.rendering_settings.env_texture_filename3)):
self.r.setup_pbr(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450'), self.rendering_settings.env_texture_filename, self.rendering_settings.env_texture_filename2, self.rendering_settings.env_texture_filename3, self.rendering_settings.light_modulation_map_filename, self.rendering_settings.light_dimming_factor)
else:
logging.warning('Environment texture not available, cannot use PBR.')
if self.rendering_settings.enable_pbr:
self.r.loadSkyBox(self.skyboxShaderProgram, self.skybox_size) | def setup_pbr(self):
'\n \n '
if (os.path.exists(self.rendering_settings.env_texture_filename) or os.path.exists(self.rendering_settings.env_texture_filename2) or os.path.exists(self.rendering_settings.env_texture_filename3)):
self.r.setup_pbr(os.path.join(os.path.dirname(mesh_renderer.__file__), 'shaders', '450'), self.rendering_settings.env_texture_filename, self.rendering_settings.env_texture_filename2, self.rendering_settings.env_texture_filename3, self.rendering_settings.light_modulation_map_filename, self.rendering_settings.light_dimming_factor)
else:
logging.warning('Environment texture not available, cannot use PBR.')
if self.rendering_settings.enable_pbr:
self.r.loadSkyBox(self.skyboxShaderProgram, self.skybox_size)<|docstring|>Set up physics-based rendering<|endoftext|> |
a0db29e37157087dd0cce09e5c35d9497bc362dda95385c9b6a2b2e1c312216c | def set_light_position_direction(self, position, target):
'\n Set light position and orientation\n\n :param position: light position\n :param target: light target\n '
self.lightpos = position
self.lightV = lookat(self.lightpos, target, [0, 1, 0])
self.lightP = ortho((- 5), 5, (- 5), 5, (- 10), 20.0) | Set light position and orientation
:param position: light position
:param target: light target | igibson/render/mesh_renderer/mesh_renderer_cpu.py | set_light_position_direction | suresh-guttikonda/iGibson | 0 | python | def set_light_position_direction(self, position, target):
'\n Set light position and orientation\n\n :param position: light position\n :param target: light target\n '
self.lightpos = position
self.lightV = lookat(self.lightpos, target, [0, 1, 0])
self.lightP = ortho((- 5), 5, (- 5), 5, (- 10), 20.0) | def set_light_position_direction(self, position, target):
'\n Set light position and orientation\n\n :param position: light position\n :param target: light target\n '
self.lightpos = position
self.lightV = lookat(self.lightpos, target, [0, 1, 0])
self.lightP = ortho((- 5), 5, (- 5), 5, (- 10), 20.0)<|docstring|>Set light position and orientation
:param position: light position
:param target: light target<|endoftext|> |
a0aa6a688d038b27f6ca599202216b48f76da86d8c3fb42bb17cacbd21cbb1a4 | def setup_framebuffer(self):
'\n Set up framebuffers for the renderer\n '
[self.fbo, self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_ins_seg, self.color_tex_3d, self.color_tex_scene_flow, self.color_tex_optical_flow, self.depth_tex] = self.r.setup_framebuffer_meshrenderer(self.width, self.height)
if self.msaa:
[self.fbo_ms, self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_ins_seg_ms, self.color_tex_3d_ms, self.color_tex_scene_flow_ms, self.color_tex_optical_flow_ms, self.depth_tex_ms] = self.r.setup_framebuffer_meshrenderer_ms(self.width, self.height)
self.depth_tex_shadow = self.r.allocateTexture(self.width, self.height) | Set up framebuffers for the renderer | igibson/render/mesh_renderer/mesh_renderer_cpu.py | setup_framebuffer | suresh-guttikonda/iGibson | 0 | python | def setup_framebuffer(self):
'\n \n '
[self.fbo, self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_ins_seg, self.color_tex_3d, self.color_tex_scene_flow, self.color_tex_optical_flow, self.depth_tex] = self.r.setup_framebuffer_meshrenderer(self.width, self.height)
if self.msaa:
[self.fbo_ms, self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_ins_seg_ms, self.color_tex_3d_ms, self.color_tex_scene_flow_ms, self.color_tex_optical_flow_ms, self.depth_tex_ms] = self.r.setup_framebuffer_meshrenderer_ms(self.width, self.height)
self.depth_tex_shadow = self.r.allocateTexture(self.width, self.height) | def setup_framebuffer(self):
'\n \n '
[self.fbo, self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_ins_seg, self.color_tex_3d, self.color_tex_scene_flow, self.color_tex_optical_flow, self.depth_tex] = self.r.setup_framebuffer_meshrenderer(self.width, self.height)
if self.msaa:
[self.fbo_ms, self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_ins_seg_ms, self.color_tex_3d_ms, self.color_tex_scene_flow_ms, self.color_tex_optical_flow_ms, self.depth_tex_ms] = self.r.setup_framebuffer_meshrenderer_ms(self.width, self.height)
self.depth_tex_shadow = self.r.allocateTexture(self.width, self.height)<|docstring|>Set up framebuffers for the renderer<|endoftext|> |
5b8a01258bd5456a06afe3ee96a427efb41f723366ab6e509540244c91330f93 | def load_texture_file(self, tex_filename):
'\n Load the texture file into the renderer\n\n :param tex_filename: texture file filename\n :return texture_id: texture id of this texture in the renderer\n '
if ((tex_filename is None) or (not os.path.isfile(tex_filename))):
return None
if (tex_filename in self.texture_files):
return self.texture_files[tex_filename]
if self.optimized:
texture_id = len(self.texture_files)
else:
texture_id = self.r.loadTexture(tex_filename, self.rendering_settings.texture_scale, igibson.key_path)
self.textures.append(texture_id)
self.texture_files[tex_filename] = texture_id
return texture_id | Load the texture file into the renderer
:param tex_filename: texture file filename
:return texture_id: texture id of this texture in the renderer | igibson/render/mesh_renderer/mesh_renderer_cpu.py | load_texture_file | suresh-guttikonda/iGibson | 0 | python | def load_texture_file(self, tex_filename):
'\n Load the texture file into the renderer\n\n :param tex_filename: texture file filename\n :return texture_id: texture id of this texture in the renderer\n '
if ((tex_filename is None) or (not os.path.isfile(tex_filename))):
return None
if (tex_filename in self.texture_files):
return self.texture_files[tex_filename]
if self.optimized:
texture_id = len(self.texture_files)
else:
texture_id = self.r.loadTexture(tex_filename, self.rendering_settings.texture_scale, igibson.key_path)
self.textures.append(texture_id)
self.texture_files[tex_filename] = texture_id
return texture_id | def load_texture_file(self, tex_filename):
'\n Load the texture file into the renderer\n\n :param tex_filename: texture file filename\n :return texture_id: texture id of this texture in the renderer\n '
if ((tex_filename is None) or (not os.path.isfile(tex_filename))):
return None
if (tex_filename in self.texture_files):
return self.texture_files[tex_filename]
if self.optimized:
texture_id = len(self.texture_files)
else:
texture_id = self.r.loadTexture(tex_filename, self.rendering_settings.texture_scale, igibson.key_path)
self.textures.append(texture_id)
self.texture_files[tex_filename] = texture_id
return texture_id<|docstring|>Load the texture file into the renderer
:param tex_filename: texture file filename
:return texture_id: texture id of this texture in the renderer<|endoftext|> |
265ce05a8a03727b7338724e3d374e04e0e24100f80cece467539ceb12b7eed7 | def load_randomized_material(self, material):
'\n Load all the texture files in the RandomizedMaterial.\n Populate material_ids with the texture id assigned by the renderer.\n\n :param material: an instance of RandomizedMaterial\n '
if (material.material_ids is not None):
return
material.material_ids = {}
for material_class in material.material_files:
if (material_class not in material.material_ids):
material.material_ids[material_class] = []
for material_instance in material.material_files[material_class]:
material_id_instance = {}
for key in material_instance:
material_id_instance[key] = self.load_texture_file(material_instance[key])
material.material_ids[material_class].append(material_id_instance)
material.randomize() | Load all the texture files in the RandomizedMaterial.
Populate material_ids with the texture id assigned by the renderer.
:param material: an instance of RandomizedMaterial | igibson/render/mesh_renderer/mesh_renderer_cpu.py | load_randomized_material | suresh-guttikonda/iGibson | 0 | python | def load_randomized_material(self, material):
'\n Load all the texture files in the RandomizedMaterial.\n Populate material_ids with the texture id assigned by the renderer.\n\n :param material: an instance of RandomizedMaterial\n '
if (material.material_ids is not None):
return
material.material_ids = {}
for material_class in material.material_files:
if (material_class not in material.material_ids):
material.material_ids[material_class] = []
for material_instance in material.material_files[material_class]:
material_id_instance = {}
for key in material_instance:
material_id_instance[key] = self.load_texture_file(material_instance[key])
material.material_ids[material_class].append(material_id_instance)
material.randomize() | def load_randomized_material(self, material):
'\n Load all the texture files in the RandomizedMaterial.\n Populate material_ids with the texture id assigned by the renderer.\n\n :param material: an instance of RandomizedMaterial\n '
if (material.material_ids is not None):
return
material.material_ids = {}
for material_class in material.material_files:
if (material_class not in material.material_ids):
material.material_ids[material_class] = []
for material_instance in material.material_files[material_class]:
material_id_instance = {}
for key in material_instance:
material_id_instance[key] = self.load_texture_file(material_instance[key])
material.material_ids[material_class].append(material_id_instance)
material.randomize()<|docstring|>Load all the texture files in the RandomizedMaterial.
Populate material_ids with the texture id assigned by the renderer.
:param material: an instance of RandomizedMaterial<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.