repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/lstm_agg.py | safekit/models/lstm_agg.py | """
"""
import json
import os
import sys
# TODO: Comment for usage.
# TODO: Test this on DS data
# TODO: skipheader error message that is informative
# TODO: Make consistent output printing and writing to file for aggregate and baseline models.
# TODO: Comment crazy transform functions
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import itertools
import numpy as np
import tensorflow as tf
import time
import math
from safekit.batch import StateTrackingBatcher
from safekit.tf_ops import diag_mvn_loss, multivariate_loss, eyed_mvn_loss, full_mvn_loss
from safekit.util import get_multivariate_loss_names, make_feature_spec, make_loss_spec, Parser
from safekit.tf_ops import weights, swapping_rnn, layer_norm_rnn
from safekit.graph_training_utils import ModelRunner, EarlyStop
def lstm_parser():
parser = Parser(description="Cert Aggregate Feature LSTM.")
parser.add_argument('datafile', type=str,
help='Path to data file.')
parser.add_argument('results_folder', type=str,
help='Folder where to write losses.')
parser.add_argument('dataspecs', type=str,
help='Name of json file with specs for splitting data.')
parser.add_argument('-num_steps', type=int,
help='Number of time steps for truncated backpropagation.', default=5)
parser.add_argument('-learnrate', type=float, default=0.01,
help='Step size for gradient descent.')
parser.add_argument('-initrange', type=float, default=0.0001,
help='For initialization of weights.')
parser.add_argument('-numlayers', type=int, default=3,
help='Number of hidden layers')
parser.add_argument('-hiddensize', type=int, default=3,
help='Number of hidden nodes per layer')
parser.add_argument('-verbose', type=int, default=1,
help='Level to print training progress and/or other details.')
parser.add_argument('-mb', type=int, default=21,
help='The max number of events in the structured mini_batch.')
parser.add_argument('-embedding_ratio', type=float, default=0.5,
help='Embedding_ratio * num_classes = embedding size.')
parser.add_argument('-min_embedding', type=int, default=5,
help='Minimum embedding size.')
parser.add_argument('-max_embedding', type=int, default=500,
help='Maximum embedding size.')
parser.add_argument('-use_next_time_step', default=0, type=int,
help='Whether to predict next time step or autoencode.')
parser.add_argument('-act', default='relu', type=str,
help='A string denoting the activation function.')
parser.add_argument('-dist', default='diag', type=str,
help='A string denoting the multivariate normal type for prediction.')
parser.add_argument('-variance_floor', default='0.1', type=float,
help='Float to derive variance floor.')
parser.add_argument('-norm', type=str,
help='"layer" for layer normalization. Default is None.')
parser.add_argument('-keep_prob', type=float, default=None,
help='Percent of nodes to keep for dropout layers.')
parser.add_argument('-debug', action='store_true',
help='Use this flag to print feed dictionary contents and dimensions.')
parser.add_argument('-random_seed', type=int, default=5,
help='Random seed for reproducible experiments.')
parser.add_argument('-replay_ratio', type=int, nargs='+', default=(1, 0)),
parser.add_argument('-delimiter', type=str, default=' ',
help="Delimiter for input text file. You should be using ' ' for the dayshuffled cert.")
parser.add_argument('-maxbadcount', type=int, default=100,
help="For stopping training when loss does not improve.")
parser.add_argument('-residual', action='store_true',
help="Flag for calculating residual (difference between sequential actions) instead of next action")
parser.add_argument('-skipheader', action='store_true',
help="Whether or not to skip first line of input file.")
parser.add_argument('-alpha', type=float, default=0.99,
help='Parameter for exponential moving average and variance')
parser.add_argument('-input_norm', action='store_true',
help='Use this flag for online normalization')
return parser
def regression_transform(matrix, eval_indices):
"""
:param matrix:
:param eval_indices:
:return:
"""
return np.concatenate([matrix[timestep][indices.astype(int)]
for timestep, indices
in enumerate(eval_indices)], axis=0)
def evaluation_transform(matrix, eval_indices):
"""
:param matrix:
:param eval_indices:
:return:
"""
num_steps = matrix.shape[0]
return list(itertools.chain.from_iterable([matrix[tstep][indices.astype(int)]
for tstep, indices
in zip(range(num_steps), eval_indices)]))
def index_classification_transform(matrix, eval_indices):
"""
:param matrix:
:param eval_indices:
:return:
"""
return np.concatenate([matrix[timestep][indices.astype(int)]
for timestep, indices
in enumerate(eval_indices)], axis=0)
def redteam_transform(matrix, eval_indices, dataspec=None):
"""
:param matrix:
:param eval_indices:
:param dataspec:
:return:
"""
matrix = np.sum(matrix, axis=2)
return evaluation_transform(matrix, eval_indices)
def augment_datadict(datadict, next_time_step, feature_spec, residual=False):
"""
:param datadict:
:param next_time_step:
:param feature_spec:
:param residual:
:return:
"""
startIdx = next_time_step
for cat_feat in feature_spec['categorical']:
datadict["target_%s" % cat_feat] = index_classification_transform(datadict[cat_feat][startIdx:],
datadict["eval_indices"])
endIdx = (datadict[cat_feat].shape[0], -1)[next_time_step]
datadict[cat_feat] = datadict[cat_feat][:endIdx]
for cont_feat in feature_spec['continuous']:
if residual:
endIdx = (datadict[cont_feat].shape[0], -1)[next_time_step]
pre = datadict[cont_feat][:endIdx]
post_residual = datadict[cont_feat][startIdx:] - pre
datadict["target_%s" % cont_feat] = regression_transform(post_residual,
datadict["eval_indices"])
datadict[cont_feat] = pre
else:
datadict["target_%s" % cont_feat] = regression_transform(datadict[cont_feat][startIdx:],
datadict["eval_indices"])
endIdx = (datadict[cont_feat].shape[0], -1)[next_time_step]
datadict[cont_feat] = datadict[cont_feat][:endIdx]
for feat in ['user']:
endIdx = (datadict[feat].shape[0], -1)[next_time_step]
datadict[feat + '_eval'] = evaluation_transform(datadict[feat][:endIdx],
datadict["eval_indices"])
for feat in ['time', 'redteam']:
endIdx = (datadict[feat].shape[0], -1)[next_time_step]
datadict[feat + '_eval'] = redteam_transform(datadict[feat][:endIdx],
datadict["eval_indices"])
if __name__ == '__main__':
#========================== SETUP ==========================================
args = lstm_parser().parse_args()
args.layers = [args.hiddensize for x in range(args.numlayers)]
outfile_name = "cert_mv_auto_act_%s_lr_%s_nl_%s_hs_%s_mb_%s_nm_%s_kp_%s_ds_%s_em_%s_rs_%s" % (args.act,
args.learnrate,
len(args.layers),
args.layers[0],
args.mb,
args.norm,
args.keep_prob,
args.dist,
args.embedding_ratio,
args.random_seed)
outfile_name = str(time.time()) + outfile_name
if not args.results_folder.endswith('/'):
args.results_folder += '/'
if args.dist == "ident":
mvn = eyed_mvn_loss
elif args.dist == "diag":
mvn = diag_mvn_loss
elif args.dist == "full":
mvn = full_mvn_loss
else:
mvn = diag_mvn_loss
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
# ======================== BUILD GRAPH ===================================================
dataspecs = json.load(open(args.dataspecs, 'r'))
ph_dict = {'training': tf.placeholder(tf.bool)}
embeddings, continuous_features, targets = {}, {}, {}
feature_spec = make_feature_spec(dataspecs)
# Make placeholders for all input data and select embeddings for categorical data
for dataname in feature_spec['categorical']:
embedding_size = math.ceil(args.embedding_ratio * dataspecs[dataname]['num_classes'])
embedding_size = int(max(min(args.max_embedding, embedding_size), args.min_embedding))
with tf.variable_scope(dataname):
ph_dict[dataname] = tf.placeholder(tf.int32, [args.num_steps, None], name=dataname)
embeddings[dataname] = tf.nn.embedding_lookup(weights('tnorm',
(dataspecs[dataname]['num_classes'],
embedding_size)),
ph_dict[dataname])
for dataname in feature_spec['continuous']:
ph_dict[dataname] = tf.placeholder(tf.float32,
[args.num_steps, None, len(dataspecs[dataname]['index'])],
name=dataname)
continuous_features[dataname] = ph_dict[dataname]
# concatenate all features
features = tf.concat(continuous_features.values() + embeddings.values(), 2, name='features')
# split into a list along time dimension
input_features = tf.unstack(features)
initial_state = [[tf.placeholder(tf.float32, (None, units), name='cell_'),
tf.placeholder(tf.float32, (None, units), name='hidden')]
for units in args.layers]
# Run prepared list of matrices through rnn
if args.norm == 'layer':
hidden_states, states = layer_norm_rnn(input_features,
initial_state=initial_state,
layers=args.layers,
sequence_lengths=None,
state_index=None)
else:
hidden_states, states = swapping_rnn(input_features,
initial_state=initial_state,
layers=args.layers,
sequence_lengths=None,
state_index=None)
# evaluate on selected outputs from rnn
eval_vecs = [tf.placeholder(tf.int32, [None], name='eval_%s' % i)
for i in range(args.num_steps - args.use_next_time_step)]
hidden_states = [tf.nn.embedding_lookup(hidden_state, state_indices)
for hidden_state, state_indices in zip(hidden_states, eval_vecs)]
hidden_matrix = tf.concat(hidden_states, 0)
loss_spec = make_loss_spec(dataspecs, mvn)
loss_matrix = multivariate_loss(hidden_matrix, loss_spec, ph_dict)
loss_vector = tf.reduce_sum(loss_matrix, reduction_indices=1) # is MB x 1
loss = tf.reduce_mean(loss_vector) # is scalar
loss_names = get_multivariate_loss_names(loss_spec)
eval_tensors = [loss, loss_vector, loss_matrix] + np.array(states).flatten().tolist()
# Create an object to train on this graph
initial_state = np.array(initial_state).flatten().tolist()
ph_dict.update({'initial_state': initial_state,
'eval_indices': eval_vecs})
trainer = ModelRunner(loss, ph_dict, args.learnrate, debug=args.debug)
data = StateTrackingBatcher(args.datafile, dataspecs,
batchsize=args.mb,
num_steps=args.num_steps,
layers=args.layers,
next_step=args.use_next_time_step,
replay_ratio=args.replay_ratio,
delimiter=args.delimiter,
skipheader=args.skipheader,
standardize=args.input_norm,
alpha=args.alpha,
datastart_index=dataspecs['counts']['index'][0])
# ========================== TRAIN ===============================================================
os.system('mkdir /tmp/lstm_agg/')
with open('/tmp/lstm_agg/' + outfile_name, 'w') as loss_file:
header = 'time user red loss '
if args.verbose == 3:
header += ' '.join(loss_names)
loss_file.write(header + '\n')
datadict = data.next_batch()
current_loss = sys.float_info.max
not_early_stop = EarlyStop(args.maxbadcount)
continue_training = not_early_stop(datadict, current_loss)
while continue_training: # mat is not None and self.badcount < self.badlimit and loss != inf, nan:
augment_datadict(datadict, args.use_next_time_step, feature_spec, args.residual)
datadict['training'] = True
val = trainer.train_step(datadict, eval_tensors=eval_tensors, update=True)
current_loss, current_loss_vector, current_loss_matrix, current_states = val[1], val[2], val[3], val[4:]
data.update_states(current_states)
for item in ['redteam_eval', 'time_eval', 'user_eval']:
assert len(current_loss_vector) == len(datadict[item]), (
'Mismatched lengths for evaluation lists\n'
'%s: %s\n'
'loss_vector %s\n' % (item, len(datadict[item]),
len(current_loss_vector)))
loss_values = zip(datadict['time_eval'],
datadict['user_eval'],
datadict['redteam_eval'],
current_loss_vector.tolist())
sorted_day_values = sorted(loss_values, key=lambda row: row[0], reverse=False)
if data.event_number % 100 == 0:
print("%s loss: %s" % (data.event_number, current_loss))
if not data.replay and not (math.isnan(current_loss) or math.isinf(current_loss)):
for time, user, red, loss in sorted_day_values:
loss_file.write('%s %s %s %s ' % (time, user, red, loss))
loss_file.write('\n')
datadict = data.next_batch()
continue_training = not_early_stop(datadict, current_loss)
if continue_training < 0:
exit(1)
os.system('mv /tmp/lstm_agg/' + outfile_name + ' ' + args.results_folder + outfile_name)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/__init__.py | safekit/models/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/iso_forest.py | safekit/models/iso_forest.py | """
Isolation Forest baseline.
"""
import sys
import os
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import argparse
from sklearn.ensemble import IsolationForest
from safekit.batch import DayBatcher
from safekit.util import apr
import time
import math
def sample_hyps_iso_forest(nest, contam, boot):
"""
:param nest:
:param contam:
:param boot:
:return: An IsolationForest object with specified hyperparameters, used to detect anomaly.
"""
n_estimators = nest # random.choice(range(20, 300)) # default is 100
max_samples = 'auto'
contamination = contam #randrange_float(0.0, 0.5, 0.05)
max_features = 1.0 # default is 1.0 (use all features)
bootstrap = boot # random.choice(['True', 'False'])
n_jobs = -1 # Uses all cores
verbose = 0
model = IsolationForest(n_estimators=n_estimators, max_samples=max_samples,
contamination=contamination, max_features=max_features,
bootstrap=bootstrap, n_jobs=n_jobs, verbose=verbose)
return model
def train_model(model, batcher, res_file):
"""
Run model
:param model: A sklearn anomaly detection model. Needs to have the decision_function() function.
:param batcher: A Batcher object that delivers batches of training data.
:param outfile: (file obj) Where to write results.
"""
resultsfile = open(res_file, 'w')
resultsfile.write('day user red loss\n')
mat = batcher.next_batch()
batch_num = 0
while mat is not None:
datadict = {'features': mat[:, 3:], 'red': mat[:, 2], 'user': mat[:, 1], 'day': mat[:, 0]}
model.fit(datadict['features'])
anomaly_scores = model.decision_function(datadict['features'])
for day, user, red, score in zip(datadict['day'], datadict['user'], datadict['red'], anomaly_scores):
if math.isnan(score) and not math.isinf(score):
print('exiting due divergence')
exit(1)
else:
resultsfile.write(str(day) + ' ' + str(user) + ' ' + str(red) + ' ' + str(-1*score) + '\n')
batch_num += 1
print('finished batch num: ' + str(batch_num))
mat = batcher.next_batch()
def return_parser():
parser = argparse.ArgumentParser("Run anomaly detection with Isolation Forest.")
parser.add_argument('datafile', type=str, help='Input data for anomaly detection.')
parser.add_argument('result_path', type=str, help='Results dir.')
parser.add_argument('-loss_fn', type=str, default='/tmp/' + str(time.time()), help='Loss file param for spearmint')
parser.add_argument('-nest', type=int, default=100, help='Number of estimators.')
parser.add_argument('-contam', type=float, default=0.25, help='Contamination.')
parser.add_argument('-bootstrap', type=str, default=False, help='Bootstrap t/f.')
return parser
if __name__ == '__main__':
args = return_parser().parse_args()
day_batcher = DayBatcher(args.datafile, skiprow=1)
if not args.result_path.endswith('/'):
args.result_path += '/'
resultsfile = (args.result_path + str(time.time()) + 'iso_forest' +
'__nEstimators_' + str(args.nest) +
'__maxSamples_' + 'auto' +
'__contamination_' + str(args.contam) +
'__max_features_' + '2' +
'__bootstrap_' + str(args.bootstrap) +
'__nJobs_' + '-1')
model = sample_hyps_iso_forest(args.nest, args.contam, args.bootstrap)
start_time = time.time()
train_model(model, day_batcher, resultsfile)
with open(args.loss_fn, 'w') as lf:
lf.write(str(apr(resultsfile, [0, 12], inverse=True))) | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/dnn_agg.py | safekit/models/dnn_agg.py | #!/usr/bin/env python
"""
Multivariate Deep Neural Network Autoencoder network anomaly detection. Anomaly detection is performed on model output
by ranking of loss scores within a time window from output of model. An output file is created with a timestamp prepended,
and values of hyper-parameters in name.
**Abbreviation of hyper-parameter descriptions in name are as follows:**
- tg -> target: {auto, next}
- lr -> learnrate
- nl -> number of hidden layers
- hs -> size of hidden layers
- mb -> minibatch size
- bn -> Whether of not to use batch normalization
- kp -> The keep probability for drop out
- ds -> The family of multivariate distribution to use {ident, diag, full}
- bc -> bad count for early stopping
- em -> The ratio of embedding sizes to number of classes for categorical inputs
"""
import os
import sys
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
# TODO: Test this on DS data
# TODO: skipheader error message that is informative
# TODO: Fix replay learning
# TODO: Comment for usage.
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import tensorflow as tf
import numpy as np
import json
from safekit.batch import OnlineBatcher, NormalizingReplayOnlineBatcher, split_batch
from safekit.graph_training_utils import ModelRunner, EarlyStop
from safekit.tf_ops import join_multivariate_inputs, dnn, \
diag_mvn_loss, multivariate_loss, eyed_mvn_loss, \
full_mvn_loss, layer_norm, batch_normalize
from safekit.util import get_multivariate_loss_names, make_feature_spec, make_loss_spec, Parser
import time
import random
def return_parser():
"""
Defines and returns argparse ArgumentParser object.
:return: ArgumentParser
"""
parser = Parser("Dnn auto-encoder for online unsupervised training.")
parser.add_argument('datafile',
type=str,
help='The csv data file for our unsupervised training.'+\
'fields: day, user, redcount, [count1, count2, ...., count408]')
parser.add_argument('results_folder', type=str,
help='The folder to print results to.')
parser.add_argument('dataspecs', type=str,
help='Filename of json file with specification of feature indices.')
parser.add_argument('-learnrate', type=float, default=0.001,
help='Step size for gradient descent.')
parser.add_argument('-numlayers', type=int, default=3,
help='Number of hidden layers.')
parser.add_argument('-hiddensize', type=int, default=20,
help='Number of hidden units in hidden layers.')
parser.add_argument('-mb', type=int, default=256,
help='The mini batch size for stochastic gradient descent.')
parser.add_argument('-act', type=str, default='tanh',
help='May be "tanh" or "relu"')
parser.add_argument('-norm', type=str, default="none",
help='Can be "layer", "batch", or "none"')
parser.add_argument('-keep_prob', type=float, default=None,
help='Percent of nodes to keep for dropout layers.')
parser.add_argument('-debug', action='store_true',
help='Use this flag to print feed dictionary contents and dimensions.')
parser.add_argument('-dist', type=str, default='diag',
help='"diag" or "ident". Describes whether to model multivariate guassian with identity, '
'or arbitrary diagonal covariance matrix.')
parser.add_argument('-maxbadcount', type=str, default=20,
help='Threshold for early stopping.')
parser.add_argument('-embedding_ratio', type=float, default=.75,
help='For determining size of embeddings for categorical features.')
parser.add_argument('-min_embed', type=int, default=2,
help='Minimum size for embeddings of categorical features.')
parser.add_argument('-max_embed', type=int, default=1000,
help='Maximum size for embeddings of categorical features.')
parser.add_argument('-verbose', type=int, default=0, help='1 to print full loss contributors.')
parser.add_argument('-variance_floor', type=float, default=0.01,
help='Parameter for diagonal MVN learning.')
parser.add_argument('-initrange', type=float, default=1.0,
help='For weight initialization.')
parser.add_argument('-decay_rate', type=float, default=1.0,
help='Exponential learn rate decay for gradient descent.')
parser.add_argument('-decay_steps', type=int, default=20,
help='Number of updates to perform learn rate decay')
parser.add_argument('-alpha', type=float, default=0.99,
help='Parameter for exponential moving average and variance')
parser.add_argument('-input_norm', action='store_true',
help='Use this flag for online normalization')
parser.add_argument('-refresh_ratio', type=float, default=0.5,
help='The proportion of the new mini-batch to use in refreshing the pool.')
parser.add_argument('-ratio', nargs='+', type=int, default=[1, 1],
help='(tuple) (x, y): Number of new batches of data points x and number of old data points y.')
parser.add_argument('-pool_size', type=int, default=9000,
help='The scale of the pool.')
parser.add_argument('-random_seed', type=int, default=None,
help='For reproducible results')
parser.add_argument('-replay', action='store_true',
help='Use this flag for replay learning')
parser.add_argument('-delimiter', type=str, default=' ',
help="Delimiter for input text file. You should be using ' ' for the dayshuffled cert.")
parser.add_argument('-skipheader', action='store_true',
help="Whether or not to skip first line of input file.")
return parser
def write_results(datadict, pointloss, outfile):
"""
Writes loss for each datapoint, along with meta-data to file.
:param datadict: Dictionary of data names (str) keys to numpy matrix values for this mini-batch.
:param pointloss: MB X 1 numpy array
:param outfile: Where to write results.
"""
for d, u, t, l, in zip(datadict['time'].flatten().tolist(), datadict['user'].tolist(),
datadict['redteam'].flatten().tolist(), pointloss.flatten().tolist()):
outfile.write('%s %s %s %s\n' % (d, u, t, l))
def write_all_contrib(datadict, pointloss, contrib, outfile):
"""
Writes loss, broken down loss from all contributors, and metadata for each datapoint to file.
:param datadict: Dictionary of data names (str) keys to numpy matrix values for this mini-batch.
:param pointloss: MB X 1 numpy array.
:param contrib: MB X total_num_loss_contributors nompy array.
:param outfile: Where to write results.
"""
for time, user, red, loss, contributor in zip(datadict['time'].tolist(),
datadict['user'].tolist(),
datadict['redteam'].tolist(),
pointloss.flatten().tolist(),
contrib.tolist()):
outfile.write('%s %s %s %s ' % (time, user, red, loss))
outfile.write(str(contributor).strip('[').strip(']').replace(',', ''))
outfile.write('\n')
if __name__ == '__main__':
np.seterr(all='raise')
args = return_parser().parse_args()
if args.random_seed is None:
args.random_seed = random.randint(0,1000)
normalizers = {'none': None,
'layer': layer_norm,
'batch': batch_normalize}
outfile_name = "lanlAgg__lr_%.2f__tg_auto__rs_%s__ir_%.2f__nl_%s__hs_%s__mb_%s__nm_%s__kp_%s__ds_%s__bc_%s__em_%s__dr_%.2f__ds_%s" % (
args.learnrate,
args.random_seed,
args.initrange,
args.numlayers,
args.hiddensize,
args.mb,
args.norm,
args.keep_prob,
args.dist,
args.maxbadcount,
args.embedding_ratio,
args.decay_rate,
args.decay_steps)
tf.set_random_seed(args.random_seed)
start_time = time.time()
outfile_name = str(start_time) + '__' + outfile_name
if not args.results_folder.endswith('/'):
args.results_folder += '/'
os.system('mkdir /tmp/dnn_agg/')
outfile = open('/tmp/dnn_agg/' + outfile_name, 'w')
if args.act == 'tanh':
activation = tf.tanh
elif args.act == 'relu':
activation = tf.nn.relu
else:
raise ValueError('Activation must be "relu", or "tanh"')
if args.dist == "ident":
mvn = eyed_mvn_loss
elif args.dist == "diag":
mvn = diag_mvn_loss
elif args.dist == "full":
mvn = full_mvn_loss
dataspecs = json.load(open(args.dataspecs, 'r'))
datastart_index = dataspecs['counts']['index'][0]
if not args.replay:
data = OnlineBatcher(args.datafile, args.mb, skipheader=args.skipheader,
alpha=args.alpha, norm=args.input_norm,
delimiter=args.delimiter,
datastart_index=datastart_index)
else:
data = NormalizingReplayOnlineBatcher(args.datafile, args.mb, skipheader=args.skipheader,
refresh_ratio=args.refresh_ratio, ratio=tuple(args.ratio),
pool_size=args.pool_size, delimiter=args.delimiter,
alpha=args.alpha,datastart_index=datastart_index)
feature_spec = make_feature_spec(dataspecs)
x, ph_dict = join_multivariate_inputs(feature_spec, dataspecs,
args.embedding_ratio, args.max_embed, args.min_embed)
h = dnn(x, layers=[args.hiddensize for i in range(args.numlayers)],
act=activation, keep_prob=args.keep_prob, norm=normalizers[args.norm],
scale_range=args.initrange)
loss_spec = make_loss_spec(dataspecs, mvn)
loss_matrix = multivariate_loss(h, loss_spec, ph_dict, variance_floor=args.variance_floor)
loss_vector = tf.reduce_sum(loss_matrix, reduction_indices=1) # is MB x 1
loss = tf.reduce_mean(loss_vector) # is scalar
loss_names = get_multivariate_loss_names(loss_spec)
eval_tensors = [loss, loss_vector, loss_matrix]
model = ModelRunner(loss, ph_dict, learnrate=args.learnrate, opt='adam', debug=args.debug, decay_rate=args.decay_rate, decay_steps=args.decay_steps)
raw_batch = data.next_batch()
current_loss = sys.float_info.max
not_early_stop = EarlyStop(args.maxbadcount)
loss_feats = [triple[0] for triple in loss_spec]
# training loop
continue_training = not_early_stop(raw_batch, current_loss)
while continue_training: # mat is not None and self.badcount < self.badlimit and loss != inf, nan:
datadict = split_batch(raw_batch, dataspecs)
targets = {'target_' + name: datadict[name] for name in loss_feats}
datadict.update(targets)
current_loss, pointloss, contrib = model.eval(datadict, eval_tensors)
model.train_step(datadict)
if args.verbose == 1 and not data.replay:
write_all_contrib(datadict, pointloss, contrib, outfile)
elif args.verbose == 0 and not data.replay:
write_results(datadict, pointloss, outfile)
print('index: %s loss: %.4f' % (data.index, current_loss))
raw_batch = data.next_batch()
continue_training = not_early_stop(raw_batch, current_loss)
if continue_training < 0:
exit(1)
outfile.close()
os.system('mv /tmp/dnn_agg/' + outfile_name + ' ' + args.results_folder + outfile_name)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/features/merge_streams.py | safekit/features/merge_streams.py | from datetime import datetime
class Merge:
"""
Live merging of csv files. The call of this object is a generator function
which interleaves lines from a collection of files, ordered by a sort_column
parameter.
Assumes:
(i) Individual files are ordered by ascending sort column values.
(ii) Individual files have headers with one column named the same as <sort_column> parameter.
(iii) Files to merge are in the same folder specified by <file_path> parameter>.
The generator operates as follows:
(i) Upon initialization, aligned lists of files, file names, file headers, and
the first non-header line (split on delimiter with file-type index appended)
of each file are constructed.
(ii) When the Merge object is called the list of lines is sorted by time-stamp specified by <sort_column>
and <date_format> parameters.
(iii) The line (split on delimiter) with the earliest time stamp is returned along with
the name of the file it came from (determined by appended event_type int).
(iv) The line is replaced from the file it came from (determined by appended event_type int).
(v) If there are no more lines left in the file then it is closed and list entries associated with this file are
removed from lists (determined by appended event_type int).
(vi) Concludes generating when all files are ended.
"""
def __init__(self, filepath='./',
file_list=['short_t_toy_auth.txt', 'short_t_toy_proc.txt'],
sort_column='time',
date_format='int',
delimiter=','):
"""
:param filepath: Path to folder with files to merge.
:param file_list: List of names of files to merge.
:param sort_column: Column to sort lines of files on for sequential ordering of log lines.
:param date_format: Can be any format string which makes sense to datetime.strptime or 'int' for simple integer time stamps.
:param delimiter: Delimiter of csv columns, e.g. ',', ' ' ...
"""
if not filepath.endswith('/'):
filepath += '/'
self.file_list = file_list
self.filepath = filepath
self.files = [open(filepath + f, 'r') for f in file_list]
self._headers = [f.readline().strip().split(',') for f in self.files]
self.sorters = [header.index(sort_column) for header in self._headers]
self.event_types = range(len(self.files))
self.events = [f.readline().strip().split(delimiter) + [idx] for idx, f in enumerate(self.files)]
self.event_lengths = [len(header) for header in self._headers]
self.auth_index = 0
self.proc_index = 0
if date_format == 'int':
self.sort_function = lambda x: int(x[self.sorters[x[-1]]])
else:
self.sort_function = lambda x: datetime.strptime(x[self.sorters[x[-1]]], self.date_format)
@property
def headers(self):
"""
:return: A list of headers (split by delimiter) from files being merged
"""
return self._headers
def next_event(self, event_type):
"""
:param event_type: Integer associated with a file to read from.
:return: Next event (line from file split on delimiter with type appended) from file associated with event_type.
"""
return self.files[event_type].readline().strip().split(',') + [event_type]
def __call__(self):
"""
:return: (tuple) filename, Next event (line from file split on delimiter) according to time stamp.
"""
while True:
# try:
if len(self.events) == 0:
return
elif len(self.events) == 1:
if self.events[0][0] == '':
return
# else:
# event_type = self.events[0][:-1]
# least_time_event = self.events[0][:-1]
# self.events[0] = self.next_event(event_type)
# yield self.file_list[event_type], least_time_event
least_time_event = sorted(self.events, key=self.sort_function)[0]
event_type = least_time_event[-1]
yield self.file_list[event_type], least_time_event[:-1]
new_event = self.next_event(event_type)
if new_event[0] == '':
self.files[event_type].close()
self.files.pop(event_type)
self.sorters.pop(event_type)
self.event_types.pop(event_type)
self.events.pop(event_type)
self.file_list.pop(event_type)
self.event_lengths.pop(event_type)
if len(self.files) == 0:
return
else:
self.events[event_type] = new_event
assert len(self.events[event_type]) == self.event_lengths[event_type] + 1
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/features/__init__.py | safekit/features/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/features/lanl/char_feats.py | safekit/features/lanl/char_feats.py | """
SOS = 0, EOS = 1, all other chars are ASCII values - 30
,, Note:: Line numbers in raw_char are off by one from original raw data in auth_h.txt. However, no data is changed.
.. Note:: The first field time stamp is not transliterated here, just used for meta data
"""
import argparse
from word_feats import second_to_day
# TODO: CHECK THAT PADDING WORKS, LINE LENGTHS ARE CORRECT AND WRITING PROPERLY
def return_parser():
"""Configures and returns argparse ArgumentParser object for command line script."""
parser = argparse.ArgumentParser("Convert raw loglines to ASCII code (minus 30) integer representation.")
parser.add_argument('-datapath',
type=str,
help='Path to files to transliterate.')
parser.add_argument('-outfile',
type=str,
help='Where to write derived features.')
return parser
def translate_line(string, pad_len):
"""
:param string:
:param pad_len:
:return:
"""
return "0 " + " ".join([str(ord(c) - 30) for c in string]) + " 1 " + " ".join(["0"] * pad_len) + "\n"
if __name__ == '__main__':
LONGEST_LEN = 120 # Length of the longest line in auth_h.txt, used for padding
weekend_days = [3, 4, 10, 11, 17, 18, 24, 25, 31, 32, 38, 39, 45, 46, 47, 52, 53]
args = return_parser().parse_args()
if not args.datapath.endswith('/'):
args.datapath += '/'
with open(args.datapath + 'redevents.txt', 'r') as red:
redevents = set(red.readlines())
with open(args.datapath + "auth_h.txt", 'r') as infile, open(args.outfile, 'w') as outfile:
outfile.write('line_number second day user red seq_len start_sentence\n')
infile.readline()
for line_num, line in enumerate(infile):
if line_num % 10000 == 0:
print line_num
line_minus_time = ','.join(line.strip().split(',')[1:])
diff = LONGEST_LEN - len(line_minus_time)
raw_line = line.split(",")
if len(raw_line) != 9:
print('bad length')
continue
sec = raw_line[0]
user = raw_line[1].strip().split('@')[0]
day = second_to_day(int(sec))
red = 0
red += int(line in redevents)
if user.startswith('U') and day not in weekend_days:
index_rep = translate_line(line_minus_time, diff)
outfile.write("%s %s %s %s %s %s %s" % (line_num,
sec,
day,
user.replace("U", ""),
red,
len(line_minus_time) + 1,
index_rep))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/features/lanl/__init__.py | safekit/features/lanl/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/features/lanl/word_feats.py | safekit/features/lanl/word_feats.py | """
@authors: Ryan Baerwolf, Aaron Tuor (rdbaerwolf@gmail.com, baerwor@wwu.edu, aaron.tuor@pnnl.gov)
Derives word-level features for LANL using auth_h.txt. Weekend days are filtered out of data set.
"""
import argparse
import operator
import json
weekend_days = [3, 4, 10, 11, 17, 18, 24, 25, 31, 32, 38, 39, 45, 46, 47, 52, 53]
OOV_CUTOFF = 40
# Frequency Dicts
usr_counts = {}
pc_counts = {}
domain_counts = {}
sos = 0
eos = 1
# Lookups
usr_OOV = 2
pc_OOV = 3
domain_OOV = 4
usr_inds = {}
pc_inds = {}
domain_inds = {}
auth_dict = {}
logon_dict = {}
orient_dict = {}
success_dict = {}
curr_ind = 5 # All IDs should be unique across all type dictionaries, we want multiple types of OOVs
def return_parser():
"""Configures and returns argparse ArgumentParser object for command line script."""
parser = argparse.ArgumentParser("Convert raw loglines to ASCII code (minus 30) integer representation.")
parser.add_argument('-datafile',
type=str,
default="/home/hutch_research/data/lanl/auth_h.txt",
help='Path to files to transliterate.')
parser.add_argument('-outfile',
type=str,
help='Where to write derived features.')
parser.add_argument('-record_dir',
type=str,
help='Directory to dump frequency counts, and word token mappings in')
parser.add_argument('-redfile',
type=str,
help='Location of the completely specified redteam events file.')
return parser
def lookup(word, ind_dict, count_dict):
"""
:param word: Raw text word token
:param ind_dict: (dict) keys: raw word tokens, values: Integer representation
:param count_dict: (dict) keys: raw word tokens, values: Number of occurrences
:return: Integer representation of word
"""
global curr_ind
if count_dict is not None and count_dict[word] < OOV_CUTOFF:
if count_dict is usr_counts:
return usr_OOV
elif count_dict is pc_counts:
return pc_OOV
elif count_dict is domain_counts:
return domain_OOV
else:
if word not in ind_dict:
ind_dict[word] = curr_ind
curr_ind += 1
return ind_dict[word]
def increment_freq(ind_dict, key):
"""
Used during -make_counts to track the frequencies of each element
:param ind_dict: (dict) keys: Raw word token, values: integer representation
:param key: Raw word token
"""
if key in ind_dict:
ind_dict[key] += 1
else:
ind_dict[key] = 1
def split_line(string):
"""
Turn raw some fields of raw log line from auth_h.txt into a list of word tokens
(needed for consistent user ids and domain ids)
:param string: Raw log line from auth_h.txt
:return: (list) word tokens for some fields of auth_h.txt
"""
data = string.strip().split(',')
src_user = data[1].split("@")[0]
src_domain = data[1].split("@")[1]
dst_user = data[2].split("@")[0]
dst_domain = data[2].split("@")[1]
src_pc = data[3]
dst_pc = data[4]
return src_user, src_domain, dst_user.replace("$", ""), dst_domain, src_pc, dst_pc
def get_line_counts(string):
"""
Increments frequency counts for each element in a log line
:param string: Raw log line from auth_h.txt
"""
data = string.strip().split(",")
if len(data) != 9:
return
src_user, src_domain, dst_user, dst_domain, src_pc, dst_pc = split_line(string)
increment_freq(usr_counts, src_user)
increment_freq(domain_counts, src_domain)
increment_freq(domain_counts, dst_domain)
if dst_user.startswith("U"):
increment_freq(usr_counts, dst_user)
else:
increment_freq(pc_counts, dst_user)
increment_freq(pc_counts, dst_pc)
increment_freq(pc_counts, src_pc)
def translate_line(string):
"""
Translates raw log line into sequence of integer representations for word tokens with sos and eos tokens.
:param string: Raw log line from auth_h.txt
:return: (list) Sequence of integer representations for word tokens with sos and eos tokens.
"""
data = string.split(",")
time = int(data[0]) # could be used to make categorical variables for day of week and time of day.
src_user, src_domain, dst_user, dst_domain, src_pc, dst_pc = split_line(string)
src_user = lookup(src_user, usr_inds, None)
src_domain = lookup(src_domain, domain_inds, domain_counts)
if dst_user.startswith('U'):
dst_user = lookup(dst_user, usr_inds, None)
else:
dst_user = lookup(dst_user, pc_inds, pc_counts)
dst_domain = lookup(dst_domain, domain_inds, domain_counts)
src_pc = lookup(src_pc, pc_inds, pc_counts)
dst_pc = lookup(dst_pc, pc_inds, pc_counts)
if data[5].startswith("MICROSOFT_AUTH"): # Deals with file corruption for this value.
data[5] = "MICROSOFT_AUTH"
auth_type = lookup(data[5], auth_dict, None)
logon_type = lookup(data[6], logon_dict, None)
auth_orient = lookup(data[7], orient_dict, None)
success = lookup(data[8].strip(), success_dict, None)
return "%s %s %s %s %s %s %s %s %s %s %s %s\n" % (sos, src_user, src_domain, dst_user,
dst_domain, src_pc, dst_pc, auth_type,
logon_type, auth_orient, success, eos)
def write_sorted_counts(count_dict, out_fn):
"""
Sorts all of the elements in a dictionary by their counts and writes them to json and plain text
:param count_dict: (dict) keys: word tokens, values: number of occurrences
:param out_fn: (str) Where to write .json and .txt files to (extensions are appended)
"""
sorted_counts = sorted(count_dict.items(), key=operator.itemgetter(1))
json.dump(count_dict, open(out_fn + ".json", 'w'))
with open(out_fn + ".txt", 'w') as outfile:
for key, value in sorted_counts:
outfile.write("%s, %s\n" % (key, value))
def second_to_day(seconds):
"""
:param seconds: (int) Time in seconds starting at 0 as start of data collection.
:return: (int) Time in days starting at 0 as start of data collection
"""
return int(seconds) / 86400
if __name__ == '__main__':
args = return_parser().parse_args()
if not args.record_dir.endswith('/'):
args.record_dir += '/'
# pass to get token counts
with open(args.datafile, 'r') as infile:
infile.readline()
for line_num, line in enumerate(infile):
if line_num % 100000 == 0:
print line_num
linevec = line.strip().split(',')
user = linevec[1]
day = second_to_day(int(linevec[0]))
if user.startswith('U') and day not in weekend_days:
get_line_counts(line)
write_sorted_counts(usr_counts, args.record_dir + "usr_counts")
write_sorted_counts(pc_counts, args.record_dir + "pc_counts")
write_sorted_counts(domain_counts, args.record_dir + "domain_counts")
# pass to make features
with open(args.redfile, 'r') as red:
redevents = set(red.readlines())
with open(args.datafile, 'r') as infile, open(args.outfile, 'w') as outfile:
outfile.write(
'line_number second day user red src_usr src_domain dst_usr dst_domain src_pc dst_pc auth_type logon auth_orient success\n')
infile.readline()
for line_num, line in enumerate(infile):
if line_num % 10000 == 0:
print line_num
raw_line = line.split(",")
if len(raw_line) != 9:
print('bad length')
continue
sec = raw_line[0]
user = raw_line[1].strip().split('@')[0]
day = second_to_day(int(sec))
red = 0
red += int(line in redevents)
if user.startswith('U') and day not in weekend_days:
outfile.write("%s %s %s %s %s %s" % (line_num,
sec,
day,
user.replace("U", ""),
red,
translate_line(line)))
with open(args.record_dir + str(OOV_CUTOFF) + "_em_size.txt", 'w') as emsize_file:
emsize_file.write("%s" % curr_ind)
other_inds = {'sos': 0, 'eos': 1, 'usr_OOV': 2, 'pc_OOV': 3, 'domain_OOV': 4}
for map, file in zip([usr_inds.items(),
pc_inds.items(),
domain_inds.items(),
auth_dict.items(),
logon_dict.items(),
orient_dict.items(),
success_dict.items(),
other_inds.items()],
['pc_map.json',
'domain_map.json',
'auth_map.json',
'logon_map.json',
'orient_map.json',
'success_map.json',
'other_map.json']):
json.dump(map, open(args.record_dir + file, 'w'))
b_usr_inds = {v: k for k, v in usr_inds.items()}
b_pc_inds = {v: k for k, v in pc_inds.items()}
b_domain_inds = {v: k for k, v in domain_inds.items()}
b_auth_inds = {v: k for k, v in auth_dict.items()}
b_logon_inds = {v: k for k, v in logon_dict.items()}
b_orient_inds = {v: k for k, v in orient_dict.items()}
b_success_inds = {v: k for k, v in success_dict.items()}
b_other_inds = {v: k for k, v in other_inds.items()}
back_mappings = dict(b_usr_inds.items() +
b_pc_inds.items() +
b_domain_inds.items() +
b_auth_inds.items() +
b_logon_inds.items() +
b_orient_inds.items() +
b_success_inds.items() +
b_other_inds.items())
json.dump(back_mappings, open(args.record_dir + 'word_token_map.json', 'w'))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/features/lanl/lanl_agg_features.py | safekit/features/lanl/lanl_agg_features.py | """
New method for determining general notion of common/uncommon: >< mean.
Also this script doesn't print weekend events.
"""
import os
import sys
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-4])
sys.path.insert(0, cyberpath)
from safekit.features import merge_streams
import argparse
from itertools import product
from collections import Counter
from pprint import pprint
import numpy as np
def return_parser():
"""Configures and returns argparse ArgumentParser object for command line script."""
parser = argparse.ArgumentParser("Crisp aggregate feature derivation script.")
parser.add_argument('-datapath',
type=str,
help='Path to files to transliterate.')
parser.add_argument('-outfile',
type=str,
help='Where to write derived features.')
parser.add_argument('-redpath',
type=str,
help='Where the json of completely specified redteam events is.')
return parser
def popularness(id, counter):
if float(counter[id])/float(counter['total']) < .05:
return 'unpop_'
else:
return 'common_'
def gen_popularness(id, counter):
diff = counter['mean'] - counter[id]
if diff <= 0:
return 'common_'
else:
return 'unpop_'
class ID:
def __init__(self):
self.id = 0
self.map = {}
def __call__(self, value):
if value not in self.map:
eyedee = self.id
self.map[value] = self.id
self.id += 1
else:
eyedee = self.map[value]
return eyedee
def second_to_day(seconds):
"""
:param seconds:
:return:
"""
day = int(seconds)/86400
assert day < 58, 'Too many seconds, reached day %s' % day
return day
daymap = {0: '12am-6am',
1: '6am-12pm',
2: '12pm-6pm',
3: '6pm-12am'}
def part_of_day(seconds):
"""
:param seconds:
:return:
"""
time_of_day_in_seconds = int(seconds) % 86400
daypart = time_of_day_in_seconds / 21600
return daymap[daypart]
def hour_num(seconds):
"""
:param seconds:
:return:
"""
return int(seconds)/3600
def print_events(of, day, redevents, usercounts):
"""
:param of:
:param day:
:param redevents:
:param usercounts:
:return:
"""
for user in usercounts.keys():
of.write('%s,%s,%s,' % (day, user, redevents[user]) + ','.join([str(k) for k in usercounts[user].values()]) + '\n')
if __name__ == '__main__':
weekend_days = {3, 4, 10, 11, 17, 18, 24, 25, 31, 32, 38, 39, 45, 46, 47, 52, 53}
popularity = ['unpop_', 'common_']
specificity = ['user_', 'all_']
objects = ['src_pc_', 'dst_pc_', 'dst_user_', 'proc_pc_', 'proc_']
times = ['all_day', '12am-6am', '6am-12pm', '12pm-6pm', '6pm-12am']
# src_cmp_time_pop_user src_cmp_time_pop_all src_cmp_
c = list(product(popularity, specificity, objects, times))
counts = [''.join(k) for k in c]
pprint(counts)
print(len(counts))
other = ['auth_h.txt',
'proc_h.txt',
'Start',
'End',
'?',
'MICROSOFT_AUTH',
'Kerberos',
'TivoliAP',
'NTLM',
'Negotiate',
'Wave',
'CygwinLsa',
'ACRONIS_RELOGON_AUTHENTICATION_PACKAGE',
'N',
'NETWARE_AUTHENTICATION_PACKAGE_V1_0',
'Setuid',
'Network',
'Service',
'RemoteInteractive',
'Batch',
'CachedInteractive',
'NetworkCleartext',
'Unlock',
'Interactive',
'NewCredentials',
'TGS',
'TGT',
'ScreenUnlock',
'ScreenLock',
'LogOff',
'LogOn',
'AuthMap',
'Success',
'Fail']
counts += other
print(len(counts))
pcid = ID()
uid = ID()
prid = ID()
args = return_parser().parse_args()
if not args.datapath.endswith('/'):
args.datapath += '/'
if not args.redpath.endswith('/'):
args.redpath += '/'
with open(args.redpath + 'redevents.txt', 'r') as red:
redevents = set(red.readlines())
data = merge_streams.Merge(filepath=args.datapath, file_list=['auth_h.txt', 'proc_h.txt'],
sort_column='time', date_format='int', delimiter=',')
with open(args.outfile, 'w') as of:
usercounts = {}
dst_user_counts = Counter()
src_pc_counts = Counter()
dst_pc_counts = Counter()
process_pc_counts = Counter()
process_counts = Counter()
user_dst_user_counts = {}
user_pc_counts = {}
user_src_pc_counts = {}
user_dst_pc_counts = {}
user_process_pc_counts = {}
user_process_counts = {}
point = 0
day = 0
hour_counts = [0]*1392
red = Counter()
usercounts['header'] = {k: 0 for k in counts}
of.write('day,user,red,' + ','.join([str(k) for k in usercounts['header'].keys()]) + '\n')
del usercounts['header']
for event_type, event in data():
if point % 10000 == 0:
print(point)
point += 1
# Only use lines labelled with a source user that is a person.
if event[1].startswith('U'):
time = event[0]
hour_counts[int(hour_num(time))] += 1
current_day = second_to_day(time)
if current_day > day:
print_events(of, day, red, usercounts)
day = current_day
usercounts = {}
red = Counter()
if int(current_day) not in weekend_days:
timeslice = part_of_day(time)
user = uid(event[1].split('@')[0].replace('$', ''))
if ','.join(event) + '\n' in redevents:
redteamcount = 1
else:
redteamcount = 0
red[user] += redteamcount
if user not in usercounts:
usercounts[user] = {k: 0 for k in counts}
user_dst_user_counts[user] = Counter()
user_pc_counts[user] = Counter()
user_src_pc_counts[user] = Counter()
user_dst_pc_counts[user] = Counter()
user_process_counts[user] = Counter()
user_process_pc_counts[user] = Counter()
if event_type == 'auth_h.txt':
# destination user
dst_user = uid(event[2].split('@')[0].replace('$', ''))
# all
dst_user_counts[dst_user] += 1
dst_user_counts['total'] += 1
if len(dst_user_counts) == 2 and dst_user_counts[dst_user] == 1:
dst_user_counts['mean'] = 1.0
elif dst_user_counts[dst_user] > 1:
dst_user_counts['mean'] += 1.0/(len(dst_user_counts) - 2.0)
elif dst_user_counts[dst_user] == 1:
dst_user_counts['mean'] += (1 - dst_user_counts['mean'])/float(len(dst_user_counts) - 2)
p = gen_popularness(dst_user, dst_user_counts)
usercounts[user][p + 'all_dst_user_' + timeslice] += 1
usercounts[user][p + 'all_dst_user_' + 'all_day'] += 1
# user
user_dst_user_counts[user][dst_user] += 1
user_dst_user_counts[user]['total'] += 1
p = popularness(dst_user, user_dst_user_counts[user])
usercounts[user][p + 'user_dst_user_' + timeslice] += 1
usercounts[user][p + 'user_dst_user_' + 'all_day'] += 1
# source pc
src_pc = pcid(event[3].split('@')[0].replace('$', ''))
# all
src_pc_counts[src_pc] += 1
src_pc_counts['total'] += 1
if len(src_pc_counts) == 2 and src_pc_counts[src_pc] == 1:
src_pc_counts['mean'] = 1.0
elif src_pc_counts[src_pc] > 1:
src_pc_counts['mean'] += 1.0 / (len(src_pc_counts) - 2.0)
elif src_pc_counts[src_pc] == 1:
src_pc_counts['mean'] += (1 - src_pc_counts['mean']) / float(len(src_pc_counts) - 2)
p = gen_popularness(src_pc, src_pc_counts)
usercounts[user][p + 'all_src_pc_' + timeslice] += 1
usercounts[user][p + 'all_src_pc_' + 'all_day'] += 1
# user
user_src_pc_counts[user][src_pc] += 1
user_src_pc_counts[user]['total'] += 1
p = popularness(src_pc, user_src_pc_counts[user])
usercounts[user][p + 'user_src_pc_' + timeslice] += 1
usercounts[user][p + 'user_src_pc_' + 'all_day'] += 1
# dst pc
dst_pc = pcid(event[4].split('@')[0].replace('$', ''))
# all
dst_pc_counts[dst_pc] += 1
dst_pc_counts['total'] += 1
if len(dst_pc_counts) == 2 and dst_pc_counts[dst_pc] == 1:
dst_pc_counts['mean'] = 1.0
elif dst_pc_counts[dst_pc] > 1:
dst_pc_counts['mean'] += 1.0 / (len(dst_pc_counts) - 2.0)
elif dst_pc_counts[dst_pc] == 1:
dst_pc_counts['mean'] += (1 - dst_pc_counts['mean']) / float(len(dst_pc_counts) - 2)
p = gen_popularness(dst_pc, dst_pc_counts)
usercounts[user][p + 'all_dst_pc_' + timeslice] += 1
usercounts[user][p + 'all_dst_pc_' + 'all_day'] += 1
# user
user_dst_pc_counts[user][dst_pc] += 1
user_dst_pc_counts[user]['total'] += 1
p = popularness(dst_pc, user_dst_pc_counts[user])
usercounts[user][p + 'user_dst_pc_' + timeslice] += 1
usercounts[user][p + 'user_dst_pc_' + 'all_day'] += 1
# rest of auth.txt fields
if event[5].startswith('MICROSOFT_AUTH'):
usercounts[user]['MICROSOFT_AUTH'] += 1 # auth_type
else:
usercounts[user][event[5]] += 1 # auth_type
usercounts[user][event[6]] += 1 # logon_type
usercounts[user][event[7]] += 1 # auth_orient
usercounts[user][event[8]] += 1 # success/fail
elif event_type == 'proc_h.txt':
# proc pc
pc = pcid(event[2])
# all
process_pc_counts[pc] += 1
process_pc_counts['total'] += 1
if len(process_pc_counts) == 2 and process_pc_counts[pc] == 1:
process_pc_counts['mean'] = 1.0
elif process_pc_counts[pc] > 1:
process_pc_counts['mean'] += 1.0 / (len(process_pc_counts) - 2.0)
elif process_pc_counts[pc] == 1:
process_pc_counts['mean'] += (1 - process_pc_counts['mean']) / float(len(process_pc_counts) - 2)
p = gen_popularness(pc, process_pc_counts)
usercounts[user][p + 'all_proc_pc_' + timeslice] += 1
usercounts[user][p + 'all_proc_pc_' + 'all_day'] += 1
# user
user_process_pc_counts[user][pc] += 1
user_process_pc_counts[user]['total'] += 1
p = popularness(pc, user_process_pc_counts[user])
usercounts[user][p + 'user_proc_pc_' + timeslice] += 1
usercounts[user][p + 'user_proc_pc_' + 'all_day'] += 1
# process
proc = prid(event[3])
#all
process_counts[proc] += 1
process_counts['total'] += 1
p = popularness(proc, process_counts)
usercounts[user][p + 'all_proc_' + timeslice] += 1
usercounts[user][p + 'all_proc_' + 'all_day'] += 1
# user
user_process_counts[user][proc] += 1
user_process_counts[user]['total'] += 1
p = popularness(proc, user_process_counts[user])
usercounts[user][p + 'user_proc_' + timeslice] += 1
usercounts[user][p + 'user_proc_' + 'all_day'] += 1
# start/stop
usercounts[user][event[4]] += 1
print_events(of, day, red, usercounts)
with(open('usermap.txt', 'w')) as u:
for k,v in uid.map.iteritems():
u.write('%s %s\n' % (k, v))
with(open('pcmap.txt', 'w')) as u:
for k, v in pcid.map.iteritems():
u.write('%s %s\n' % (k, v))
with(open('procmap.txt', 'w')) as u:
for k, v in prid.map.iteritems():
u.write('%s %s\n' % (k, v))
np.savetxt('log_line_count_by_hour.txt', np.array(hour_counts))
np.savetxt('dst_user_counts.txt', np.array(dst_user_counts.values()))
np.savetxt('src_pc_counts.txt', np.array(src_pc_counts.values()))
np.savetxt('dst_pc_counts.txt', np.array(dst_pc_counts.values()))
np.savetxt('process_pc_counts.txt', np.array(process_pc_counts.values()))
np.savetxt('process_counts.txt', np.array(process_counts.values()))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/test/agg_tests.py | test/agg_tests.py | """
"""
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('lanl_datapath', type=str, help="path to lanl parent directory of aggregate model features.")
parser.add_argument('cert_datapath', type=str, help="path to cert parent directory of aggregate model features.")
parser.add_argument('logfile', type=str, help="File to write stderr messages.")
args = parser.parse_args()
if not args.lanl_datapath.endswith('/'):
args.lanl_datapath += '/'
if not args.cert_datapath.endswith('/'):
args.cert_datapath += '/'
modelpath = '/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/safekit/models'
specpath = '/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/safekit/features/specs/agg'
num_finished = 0
all_okay = 0
# =================================cert agg dnn stuff==================================================================
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
print('\n\ncert dnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag -norm batch -replay 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
print('\n\ncert dnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag -input_norm -alpha 0.5 -norm batch 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
print('\n\ncert dnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag -input_norm -alpha 0.5 -norm layer 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('\n\ncert dnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
print('\n\ncert dnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist ident -input_norm -alpha 0.5 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
print('\n\ncert dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist full -input_norm -alpha 0.5 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
print('\n\ncert dnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
print('\n\ncert dnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist ident 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
print('\n\ncert dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist full 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
# lanl agg dnn stuff
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
print('\n\ndnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -norm batch -replay -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
print('\n\ndnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -input_norm -alpha 0.5 -norm batch -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
print('\n\ndnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -input_norm -alpha 0.5 -norm layer -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
print('\n\ndnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist ident -input_norm -alpha 0.5 -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/ full covariance, no model norm, input norm\n\n\n')
print('\n\ndnn autoencoder w/ full covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist full -input_norm -alpha 0.5 -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
print('\n\ndnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
print('\n\ndnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist ident -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
print('\n\ndnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/dnn_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist full -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
# ==========================================cert agg lstm stuff=====================================================
with open(args.logfile, 'a') as log:
log.write('cert rnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
print('\n\ncert rnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag -norm batch -replay_ratio 2 2 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
print('\n\ncert rnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag -input_norm -alpha 0.5 -norm batch 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
print('\n\ncert rnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag -input_norm -alpha 0.5 -norm layer 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
print('\n\ncert rnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist ident -input_norm -alpha 0.5 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn autoencoder w/ full covariance, no model norm, input norm\n\n\n')
print('\n\ncert rnn autoencoder w/ full covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist full -input_norm -alpha 0.5 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
print('\n\ncert rnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist diag 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn dnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
print('\n\ncert rnn dnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist ident 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('cert rnn dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
print('\n\ncert rnn dnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %scert_head.csv /tmp %s/cert_all_in_all_out_agg.json -dist full 2>> %s' % (modelpath, args.cert_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
# ======================================lanl agg rnn stuff=======================================================
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
print('\n\nrnn autoencoder w/diag covariance, batch norm, input norm, replay\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -norm batch -replay_ratio 2 2 -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
print('\n\nrnn autoencoder w/diag covariance, batch norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -alpha 0.5 -norm batch -input_norm -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
print('\n\nrnn autoencoder w/diag covariance, layer norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -input_norm -alpha 0.5 -norm layer -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
print('\n\nrnn autoencoder w/ identity covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist ident -input_norm -alpha 0.5 -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/ full covariance, no model norm, input norm\n\n\n')
print('\n\nrnn autoencoder w/ full covariance, no model norm, input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist full -input_norm -alpha 0.5 -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
print('\n\nrnn autoencoder w/ diag covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist diag -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
print('\n\nrnn autoencoder w/ identity covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist ident -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('rnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
print('\n\nrnn autoencoder w/ full covariance, no model norm, no input norm\n\n\n')
ok = os.system('python %s/lstm_agg.py %slanl_agg_head.txt /tmp %s/lanl_count_in_count_out_agg.json -dist full -delimiter , -skipheader 2>> %s' % (modelpath, args.lanl_datapath, specpath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
print('Number finished: %s\nNumber failed: %s' % (num_finished, all_okay))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/test/lanl_lm_tests.py | test/lanl_lm_tests.py | """
"""
# TODO: Change test calls to reflect good hyper-parameters for reference
# TODO: Add more tests for different model configurations
# TODO: Make new test script for running all tests on full data set
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('datapath', type=str, help="path to lanl parent directory of language model features.")
parser.add_argument('logfile', type=str, help="File to write stderr messages.")
args = parser.parse_args()
if not args.datapath.endswith('/'):
args.datapath += '/'
modelpath = '/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/safekit/models'
specpath = '/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/safekit/features/specs'
num_finished = 0
all_okay = 0
# # ============================================================================
# # ================== SIMPLE LSTM =============================================
# # ============================================================================
with open(args.logfile, 'a') as log:
log.write('simple word forward lstm\n\n\n')
print('simple word forward lstm\n\n\n')
ok = os.system('python %s/simple_lm.py ./ %s/lm/lanl_word_config.json %sword_day_split/ -encoding word -skipsos -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('simple word bidirectional lstm\n\n\n')
print('simple word bidirectional lstm\n\n\n')
ok = os.system('python %s/simple_lm.py ./ %s/lm/lanl_word_config.json %sword_day_split/ -encoding word -bidir -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('simple raw forward lstm\n\n\n')
print('simple raw forward lstm\n\n\n')
ok = os.system('python %s/simple_lm.py ./ %s/lm/lanl_char_config.json %sraw_day_split/ -encoding raw -jagged -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('simple raw bidirectional lstm\n\n\n')
print('simple raw bidirectional lstm\n\n\n')
ok = os.system('python %s/simple_lm.py ./ %s/lm/lanl_char_config.json %sraw_day_split/ -encoding raw -bidir -jagged -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
# # ============================================================================
# # ================== TIERED LSTM =============================================
# # ============================================================================
with open(args.logfile, 'a') as log:
log.write('tiered word forward lstm\n\n\n')
print('tiered word forward lstm\n\n\n')
ok = os.system('python %s/tiered_lm.py ./ %s/lm/lanl_word_config.json %sword_day_split/ -encoding word -skipsos -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('tiered word bidirectional lstm\n\n\n')
print('tiered word bidirectional lstm\n\n\n')
ok = os.system('python %s/tiered_lm.py ./ %s/lm/lanl_word_config.json %sword_day_split/ -encoding word -bidir -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('tiered raw forward lstm\n\n\n')
print('tiered raw forward lstm\n\n\n')
ok = os.system('python %s/tiered_lm.py ./ %s/lm/lanl_char_config.json %sraw_day_split/ -encoding raw -skipsos -jagged -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
with open(args.logfile, 'a') as log:
log.write('tiered raw bidirectional lstm\n\n\n')
print('tiered raw bidirectional lstm\n\n\n')
ok = os.system('python %s/tiered_lm.py ./ %s/lm/lanl_char_config.json %sraw_day_split/ -encoding raw -bidir -jagged -test -delimiter , 2>> %s' % (modelpath, specpath, args.datapath, args.logfile))
num_finished += ok == 0
all_okay += ok != 0
print('Number finished: %s\nNumber failed: %s' % (num_finished, all_okay))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
#
# safekit documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 5 17:42:22 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(1, os.path.abspath('sphinxext')) # lets python know where to find your sphinx extensions
sys.path.insert(1, os.path.abspath('../safekit/')) # This is so sphinx knows where to find your module
sys.path.insert(1, os.path.abspath('../safekit/models/')) # This is so sphinx knows where to find your module
sys.path.insert(1, os.path.abspath('../safekit/features/')) # This is so sphinx knows where to find your module
sys.path.insert(1, os.path.abspath('../safekit/features/lanl/')) # This is so sphinx knows where to find your module
sys.path.insert(1, os.path.abspath('../safekit/analysis/')) # This is so sphinx knows where to find your module
html_logo = '_static/pnnl.jpg' # adds logo to documents pages.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxarg.ext',
'sphinxcontrib.autoprogram'
]
# 'sphinxarg.ext',
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'safekit'
copyright = u'2017, Batelle Memorial Institute'
author = u'safekit_authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.01'
# The full version, including alpha/beta/rc tags.
release = u'0.01'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme" # Nice clean theme.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'safekitdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'safekit.tex', u'safekit Documentation',
u'safekit\\_authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'safekit', u'safekit Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'safekit', u'safekit Documentation',
author, 'safekit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoclass_content = 'both'
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub.py | docs/sphinxext/sphinxtogithub.py | #! /usr/bin/env python
from optparse import OptionParser
import os
import sys
import shutil
import codecs
class DirHelper(object):
def __init__(self, is_dir, list_dir, walk, rmtree):
self.is_dir = is_dir
self.list_dir = list_dir
self.walk = walk
self.rmtree = rmtree
class FileSystemHelper(object):
def __init__(self, open_, path_join, move, exists):
self.open_ = open_
self.path_join = path_join
self.move = move
self.exists = exists
class Replacer(object):
"Encapsulates a simple text replace"
def __init__(self, from_, to):
self.from_ = from_
self.to = to
def process(self, text):
return text.replace( self.from_, self.to )
class FileHandler(object):
"Applies a series of replacements the contents of a file inplace"
def __init__(self, name, replacers, opener):
self.name = name
self.replacers = replacers
self.opener = opener
def process(self):
text = self.opener(self.name, "r").read()
for replacer in self.replacers:
text = replacer.process( text )
self.opener(self.name, "w").write(text)
class Remover(object):
def __init__(self, exists, remove):
self.exists = exists
self.remove = remove
def __call__(self, name):
if self.exists(name):
self.remove(name)
class ForceRename(object):
def __init__(self, renamer, remove):
self.renamer = renamer
self.remove = remove
def __call__(self, from_, to):
self.remove(to)
self.renamer(from_, to)
class VerboseRename(object):
def __init__(self, renamer, stream):
self.renamer = renamer
self.stream = stream
def __call__(self, from_, to):
self.stream.write(
"Renaming directory '%s' -> '%s'\n"
% (os.path.basename(from_), os.path.basename(to))
)
self.renamer(from_, to)
class DirectoryHandler(object):
"Encapsulates renaming a directory by removing its first character"
def __init__(self, name, root, renamer):
self.name = name
self.new_name = name[1:]
self.root = root + os.sep
self.renamer = renamer
def path(self):
return os.path.join(self.root, self.name)
def relative_path(self, directory, filename):
path = directory.replace(self.root, "", 1)
return os.path.join(path, filename)
def new_relative_path(self, directory, filename):
path = self.relative_path(directory, filename)
return path.replace(self.name, self.new_name, 1)
def process(self):
from_ = os.path.join(self.root, self.name)
to = os.path.join(self.root, self.new_name)
self.renamer(from_, to)
class HandlerFactory(object):
def create_file_handler(self, name, replacers, opener):
return FileHandler(name, replacers, opener)
def create_dir_handler(self, name, root, renamer):
return DirectoryHandler(name, root, renamer)
class OperationsFactory(object):
def create_force_rename(self, renamer, remover):
return ForceRename(renamer, remover)
def create_verbose_rename(self, renamer, stream):
return VerboseRename(renamer, stream)
def create_replacer(self, from_, to):
return Replacer(from_, to)
def create_remover(self, exists, remove):
return Remover(exists, remove)
class Layout(object):
"""
Applies a set of operations which result in the layout
of a directory changing
"""
def __init__(self, directory_handlers, file_handlers):
self.directory_handlers = directory_handlers
self.file_handlers = file_handlers
def process(self):
for handler in self.file_handlers:
handler.process()
for handler in self.directory_handlers:
handler.process()
class NullLayout(object):
"""
Layout class that does nothing when asked to process
"""
def process(self):
pass
class LayoutFactory(object):
"Creates a layout object"
def __init__(self, operations_factory, handler_factory, file_helper, dir_helper, verbose, stream, force):
self.operations_factory = operations_factory
self.handler_factory = handler_factory
self.file_helper = file_helper
self.dir_helper = dir_helper
self.verbose = verbose
self.output_stream = stream
self.force = force
def create_layout(self, path):
contents = self.dir_helper.list_dir(path)
renamer = self.file_helper.move
if self.force:
remove = self.operations_factory.create_remover(self.file_helper.exists, self.dir_helper.rmtree)
renamer = self.operations_factory.create_force_rename(renamer, remove)
if self.verbose:
renamer = self.operations_factory.create_verbose_rename(renamer, self.output_stream)
# Build list of directories to process
directories = [d for d in contents if self.is_underscore_dir(path, d)]
underscore_directories = [
self.handler_factory.create_dir_handler(d, path, renamer)
for d in directories
]
if not underscore_directories:
if self.verbose:
self.output_stream.write(
"No top level directories starting with an underscore "
"were found in '%s'\n" % path
)
return NullLayout()
# Build list of files that are in those directories
replacers = []
for handler in underscore_directories:
for directory, dirs, files in self.dir_helper.walk(handler.path()):
for f in files:
replacers.append(
self.operations_factory.create_replacer(
handler.relative_path(directory, f),
handler.new_relative_path(directory, f)
)
)
# Build list of handlers to process all files
filelist = []
for root, dirs, files in self.dir_helper.walk(path):
for f in files:
if f.endswith(".html"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
replacers,
self.file_helper.open_)
)
if f.endswith(".js"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
[self.operations_factory.create_replacer("'_sources/'", "'sources/'")],
self.file_helper.open_
)
)
return Layout(underscore_directories, filelist)
def is_underscore_dir(self, path, directory):
return (self.dir_helper.is_dir(self.file_helper.path_join(path, directory))
and directory.startswith("_"))
def sphinx_extension(app, exception):
"Wrapped up as a Sphinx Extension"
if not app.builder.name in ("html", "dirhtml"):
return
if not app.config.sphinx_to_github:
if app.config.sphinx_to_github_verbose:
print "Sphinx-to-github: Disabled, doing nothing."
return
if exception:
if app.config.sphinx_to_github_verbose:
print "Sphinx-to-github: Exception raised in main build, doing nothing."
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree
)
file_helper = FileSystemHelper(
lambda f, mode: codecs.open(f, mode, app.config.sphinx_to_github_encoding),
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
app.config.sphinx_to_github_verbose,
sys.stdout,
force=True
)
layout = layout_factory.create_layout(app.outdir)
layout.process()
def setup(app):
"Setup function for Sphinx Extension"
app.add_config_value("sphinx_to_github", True, '')
app.add_config_value("sphinx_to_github_verbose", True, '')
app.add_config_value("sphinx_to_github_encoding", 'utf-8', '')
app.connect("build-finished", sphinx_extension)
def main(args):
usage = "usage: %prog [options] <html directory>"
parser = OptionParser(usage=usage)
parser.add_option("-v","--verbose", action="store_true",
dest="verbose", default=False, help="Provides verbose output")
parser.add_option("-e","--encoding", action="store",
dest="encoding", default="utf-8", help="Encoding for reading and writing files")
opts, args = parser.parse_args(args)
try:
path = args[0]
except IndexError:
sys.stderr.write(
"Error - Expecting path to html directory:"
"sphinx-to-github <path>\n"
)
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree
)
file_helper = FileSystemHelper(
lambda f, mode: codecs.open(f, mode, opts.encoding),
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
opts.verbose,
sys.stdout,
force=False
)
layout = layout_factory.create_layout(path)
layout.process()
if __name__ == "__main__":
main(sys.argv[1:])
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxarg/parser.py | docs/sphinxext/sphinxarg/parser.py | from argparse import _HelpAction, _SubParsersAction
import re
class NavigationException(Exception):
pass
def parser_navigate(parser_result, path, current_path=None):
if isinstance(path, str):
if path == '':
return parser_result
path = re.split('\s+', path)
current_path = current_path or []
if len(path) == 0:
return parser_result
if 'children' not in parser_result:
raise NavigationException(
'Current parser have no children elements. (path: %s)' %
' '.join(current_path))
next_hop = path.pop(0)
for child in parser_result['children']:
if child['name'] == next_hop:
current_path.append(next_hop)
return parser_navigate(child, path, current_path)
raise NavigationException(
'Current parser have no children element with name: %s (path: %s)' % (
next_hop, ' '.join(current_path)))
def _try_add_parser_attribute(data, parser, attribname):
attribval = getattr(parser, attribname, None)
if attribval is None:
return
if not isinstance(attribval, str):
return
if len(attribval) > 0:
data[attribname] = attribval
def _format_usage_without_prefix(parser):
"""
Use private argparse APIs to get the usage string without
the 'usage: ' prefix.
"""
fmt = parser._get_formatter()
fmt.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
return fmt.format_help().strip()
def parse_parser(parser, data=None, **kwargs):
if data is None:
data = {
'name': '',
'usage': parser.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(parser),
'prog': parser.prog,
}
_try_add_parser_attribute(data, parser, 'description')
_try_add_parser_attribute(data, parser, 'epilog')
for action in parser._get_positional_actions():
if isinstance(action, _HelpAction):
continue
if isinstance(action, _SubParsersAction):
helps = {}
for item in action._choices_actions:
helps[item.dest] = item.help
# commands which share an existing parser are an alias,
# don't duplicate docs
subsection_alias = {}
subsection_alias_names = set()
for name, subaction in action._name_parser_map.items():
if subaction not in subsection_alias:
subsection_alias[subaction] = []
else:
subsection_alias[subaction].append(name)
subsection_alias_names.add(name)
for name, subaction in action._name_parser_map.items():
if name in subsection_alias_names:
continue
subalias = subsection_alias[subaction]
subaction.prog = '%s %s' % (parser.prog, name)
subdata = {
'name': name if not subalias else
'%s (%s)' % (name, ', '.join(subalias)),
'help': helps.get(name, ''),
'usage': subaction.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(subaction),
}
parse_parser(subaction, subdata, **kwargs)
data.setdefault('children', []).append(subdata)
continue
if 'args' not in data:
data['args'] = []
arg = {
'name': action.dest,
'help': action.help or '',
'metavar': action.metavar
}
if action.choices:
arg['choices'] = action.choices
data['args'].append(arg)
show_defaults = (
('skip_default_values' not in kwargs)
or (kwargs['skip_default_values'] is False))
for action in parser._get_optional_actions():
if isinstance(action, _HelpAction):
continue
if 'options' not in data:
data['options'] = []
option = {
'name': action.option_strings,
'default': action.default if show_defaults else '==SUPPRESS==',
'help': action.help or ''
}
if action.choices:
option['choices'] = action.choices
if "==SUPPRESS==" not in option['help']:
data['options'].append(option)
return data
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxarg/ext.py | docs/sphinxext/sphinxarg/ext.py | from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
ch = subitem[idx]
if len(ch.children) > 0:
term = ch.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
def_node = subitem[idx]
def_node.attributes['classifier'] = classifier
definitions[term] = def_node
return definitions
def print_arg_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'args' in data:
for arg in data['args']:
my_def = [nodes.paragraph(text=arg['help'])] if arg['help'] else []
name = arg['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in arg['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', nodes.option_string(text=name)),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_opt_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'options' in data:
for opt in data['options']:
names = []
my_def = [nodes.paragraph(text=opt['help'])] if opt['help'] else []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in opt['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_command_args_and_opts(arg_list, opt_list, sub_list=None):
items = []
if arg_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Positional arguments:'),
nodes.definition('', arg_list)))
if opt_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Options:'),
nodes.definition('', opt_list)))
if sub_list and len(sub_list):
items.append(nodes.definition_list_item(
'', nodes.term(text='Sub-commands:'),
nodes.definition('', sub_list)))
return nodes.definition_list('', *items)
def apply_definition(definitions, my_def, name):
if name in definitions:
definition = definitions[name]
classifier = definition['classifier']
if classifier == '@replace':
return definition.children
if classifier == '@after':
return my_def + definition.children
if classifier == '@before':
return definition.children + my_def
raise Exception('Unknown classifier: %s' % classifier)
return my_def
def print_subcommand_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
for child in data['children']:
my_def = [nodes.paragraph(
text=child['help'])] if child['help'] else []
name = child['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'description' in child:
my_def.append(nodes.paragraph(text=child['description']))
my_def.append(nodes.literal_block(text=child['usage']))
my_def.append(print_command_args_and_opts(
print_arg_list(child, nested_content),
print_opt_list(child, nested_content),
print_subcommand_list(child, nested_content)
))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(text=name)),
nodes.definition('', *my_def)
)
)
return nodes.definition_list('', *items)
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
manpage=unchanged, nosubcommands=unchanged, passparser=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
# DESCRIPTION section
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
if parser_info.get('epilog'):
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
description_section += nodes.paragraph(
text=parser_info['epilog'])
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Optional arguments:')
options_section += self._format_optional_arguments(parser_info)
items = [
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
synopsis_section,
description_section,
# TODO: files
# TODO: see also
# TODO: bugs
]
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
else:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
else:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
else:
raise self.error(
':module: and :func: should be specified, or :ref:')
mod = __import__(module_name, globals(), locals(), [attr_name])
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
nested_content = nodes.paragraph()
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
items = []
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
if 'description' in result:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.append(print_command_args_and_opts(
print_arg_list(result, nested_content),
print_opt_list(result, nested_content),
print_subcommand_list(result, nested_content)
))
if 'epilog' in result:
items.append(self._nested_parse_paragraph(result['epilog']))
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxarg/__init__.py | docs/sphinxext/sphinxarg/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinx-argparse-0.1.15/setup.py | docs/sphinxext/sphinx-argparse-0.1.15/setup.py | import sys
from setuptools import setup
# from tests import PyTest
deps = ["sphinx"]
if sys.version_info[:2] == (2, 6):
deps.append('argparse')
setup(
name='sphinx-argparse',
version='0.1.15',
packages=[
'sphinxarg',
],
url='',
license='MIT',
author='Aleksandr Rudakov',
author_email='ribozz@gmail.com',
description='Sphinx extension that automatically document argparse commands and options',
long_description='',
install_requires=deps,
extras_require={
'dev': ['pytest', 'sphinx_rtd_theme', 'sphinx'],
}
)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinx-argparse-0.1.15/sphinxarg/parser.py | docs/sphinxext/sphinx-argparse-0.1.15/sphinxarg/parser.py | from argparse import _HelpAction, _SubParsersAction
import re
class NavigationException(Exception):
pass
def parser_navigate(parser_result, path, current_path=None):
if isinstance(path, str):
if path == '':
return parser_result
path = re.split('\s+', path)
current_path = current_path or []
if len(path) == 0:
return parser_result
if 'children' not in parser_result:
raise NavigationException(
'Current parser have no children elements. (path: %s)' %
' '.join(current_path))
next_hop = path.pop(0)
for child in parser_result['children']:
if child['name'] == next_hop:
current_path.append(next_hop)
return parser_navigate(child, path, current_path)
raise NavigationException(
'Current parser have no children element with name: %s (path: %s)' % (
next_hop, ' '.join(current_path)))
def _try_add_parser_attribute(data, parser, attribname):
attribval = getattr(parser, attribname, None)
if attribval is None:
return
if not isinstance(attribval, str):
return
if len(attribval) > 0:
data[attribname] = attribval
def _format_usage_without_prefix(parser):
"""
Use private argparse APIs to get the usage string without
the 'usage: ' prefix.
"""
fmt = parser._get_formatter()
fmt.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
return fmt.format_help().strip()
def parse_parser(parser, data=None, **kwargs):
if data is None:
data = {
'name': '',
'usage': parser.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(parser),
'prog': parser.prog,
}
_try_add_parser_attribute(data, parser, 'description')
_try_add_parser_attribute(data, parser, 'epilog')
for action in parser._get_positional_actions():
if isinstance(action, _HelpAction):
continue
if isinstance(action, _SubParsersAction):
helps = {}
for item in action._choices_actions:
helps[item.dest] = item.help
# commands which share an existing parser are an alias,
# don't duplicate docs
subsection_alias = {}
subsection_alias_names = set()
for name, subaction in action._name_parser_map.items():
if subaction not in subsection_alias:
subsection_alias[subaction] = []
else:
subsection_alias[subaction].append(name)
subsection_alias_names.add(name)
for name, subaction in action._name_parser_map.items():
if name in subsection_alias_names:
continue
subalias = subsection_alias[subaction]
subaction.prog = '%s %s' % (parser.prog, name)
subdata = {
'name': name if not subalias else
'%s (%s)' % (name, ', '.join(subalias)),
'help': helps.get(name, ''),
'usage': subaction.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(subaction),
}
parse_parser(subaction, subdata, **kwargs)
data.setdefault('children', []).append(subdata)
continue
if 'args' not in data:
data['args'] = []
arg = {
'name': action.dest,
'help': action.help or '',
'metavar': action.metavar
}
if action.choices:
arg['choices'] = action.choices
data['args'].append(arg)
show_defaults = (
('skip_default_values' not in kwargs)
or (kwargs['skip_default_values'] is False))
for action in parser._get_optional_actions():
if isinstance(action, _HelpAction):
continue
if 'options' not in data:
data['options'] = []
option = {
'name': action.option_strings,
'default': action.default if show_defaults else '==SUPPRESS==',
'help': action.help or ''
}
if action.choices:
option['choices'] = action.choices
if "==SUPPRESS==" not in option['help']:
data['options'].append(option)
return data
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinx-argparse-0.1.15/sphinxarg/ext.py | docs/sphinxext/sphinx-argparse-0.1.15/sphinxarg/ext.py | from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
ch = subitem[idx]
if len(ch.children) > 0:
term = ch.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
def_node = subitem[idx]
def_node.attributes['classifier'] = classifier
definitions[term] = def_node
return definitions
def print_arg_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'args' in data:
for arg in data['args']:
my_def = [nodes.paragraph(text=arg['help'])] if arg['help'] else []
name = arg['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in arg['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', nodes.option_string(text=name)),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_opt_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'options' in data:
for opt in data['options']:
names = []
my_def = [nodes.paragraph(text=opt['help'])] if opt['help'] else []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in opt['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_command_args_and_opts(arg_list, opt_list, sub_list=None):
items = []
if arg_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Positional arguments:'),
nodes.definition('', arg_list)))
if opt_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Options:'),
nodes.definition('', opt_list)))
if sub_list and len(sub_list):
items.append(nodes.definition_list_item(
'', nodes.term(text='Sub-commands:'),
nodes.definition('', sub_list)))
return nodes.definition_list('', *items)
def apply_definition(definitions, my_def, name):
if name in definitions:
definition = definitions[name]
classifier = definition['classifier']
if classifier == '@replace':
return definition.children
if classifier == '@after':
return my_def + definition.children
if classifier == '@before':
return definition.children + my_def
raise Exception('Unknown classifier: %s' % classifier)
return my_def
def print_subcommand_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
for child in data['children']:
my_def = [nodes.paragraph(
text=child['help'])] if child['help'] else []
name = child['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'description' in child:
my_def.append(nodes.paragraph(text=child['description']))
my_def.append(nodes.literal_block(text=child['usage']))
my_def.append(print_command_args_and_opts(
print_arg_list(child, nested_content),
print_opt_list(child, nested_content),
print_subcommand_list(child, nested_content)
))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(text=name)),
nodes.definition('', *my_def)
)
)
return nodes.definition_list('', *items)
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
manpage=unchanged, nosubcommands=unchanged, passparser=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
# DESCRIPTION section
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
if parser_info.get('epilog'):
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
description_section += nodes.paragraph(
text=parser_info['epilog'])
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Optional arguments:')
options_section += self._format_optional_arguments(parser_info)
items = [
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
synopsis_section,
description_section,
# TODO: files
# TODO: see also
# TODO: bugs
]
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
else:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
else:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
else:
raise self.error(
':module: and :func: should be specified, or :ref:')
mod = __import__(module_name, globals(), locals(), [attr_name])
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
nested_content = nodes.paragraph()
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
items = []
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
if 'description' in result:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.append(print_command_args_and_opts(
print_arg_list(result, nested_content),
print_opt_list(result, nested_content),
print_subcommand_list(result, nested_content)
))
if 'epilog' in result:
items.append(self._nested_parse_paragraph(result['epilog']))
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinx-argparse-0.1.15/sphinxarg/__init__.py | docs/sphinxext/sphinx-argparse-0.1.15/sphinxarg/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinx-argparse-0.1.15/test/test_parser.py | docs/sphinxext/sphinx-argparse-0.1.15/test/test_parser.py | import argparse
import json
from pprint import pprint
from sphinxarg.parser import parse_parser, parser_navigate
def test_parse_options():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', action='store_true', default=False, help='foo help')
parser.add_argument('--bar', action='store_true', default=False)
data = parse_parser(parser)
assert data['options'] == [
{
'name': ['--foo'],
'default': False,
'help': 'foo help'
}, {
'name': ['--bar'],
'default': False,
'help': ''
},
]
def test_parse_default():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', default='123')
data = parse_parser(parser)
assert data['options'] == [
{
'name': ['--foo'],
'default': '123',
'help': ''
}
]
def test_parse_arg_choices():
parser = argparse.ArgumentParser()
parser.add_argument('move', choices=['rock', 'paper', 'scissors'])
data = parse_parser(parser)
assert data['args'] == [
{
'name': 'move',
'help': '',
'choices': ['rock', 'paper', 'scissors'],
'metavar': None
}
]
def test_parse_opt_choices():
parser = argparse.ArgumentParser()
parser.add_argument('--move', choices=['rock', 'paper', 'scissors'])
data = parse_parser(parser)
assert data['options'] == [
{
'name': ['--move'],
'default': None,
'help': '',
'choices': ['rock', 'paper', 'scissors']
}
]
def test_parse_default_skip_default():
parser = argparse.ArgumentParser()
parser.add_argument('--foo', default='123')
data = parse_parser(parser, skip_default_values=True)
assert data['options'] == [
{
'name': ['--foo'],
'default': '==SUPPRESS==',
'help': ''
}
]
def test_parse_positional():
parser = argparse.ArgumentParser()
parser.add_argument('foo', default=False, help='foo help')
parser.add_argument('bar', default=False)
data = parse_parser(parser)
assert data['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
def test_parse_description():
parser = argparse.ArgumentParser(description='described', epilog='epilogged')
parser.add_argument('foo', default=False, help='foo help')
parser.add_argument('bar', default=False)
data = parse_parser(parser)
assert data['description'] == 'described'
assert data['epilog'] == 'epilogged'
assert data['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
def test_parse_nested():
parser = argparse.ArgumentParser()
parser.add_argument('foo', default=False, help='foo help')
parser.add_argument('bar', default=False)
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser('install', help='install help')
subparser.add_argument('ref', type=str, help='foo1 help')
subparser.add_argument('--upgrade', action='store_true', default=False, help='foo2 help')
data = parse_parser(parser)
assert data['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
assert data['children'] == [
{
'name': 'install',
'help': 'install help',
'usage': 'usage: py.test install [-h] [--upgrade] ref',
'bare_usage': 'py.test install [-h] [--upgrade] ref',
'args': [
{
'name': 'ref',
'help': 'foo1 help',
'metavar': None
},
],
'options': [
{
'name': ['--upgrade'],
'default': False,
'help': 'foo2 help'
},
]
},
]
def test_parse_nested_traversal():
parser = argparse.ArgumentParser()
subparsers1 = parser.add_subparsers()
subparser1 = subparsers1.add_parser('level1')
subparsers2 = subparser1.add_subparsers()
subparser2 = subparsers2.add_parser('level2')
subparsers3 = subparser2.add_subparsers()
subparser3 = subparsers3.add_parser('level3')
subparser3.add_argument('foo', help='foo help')
subparser3.add_argument('bar')
data = parse_parser(parser)
data3 = parser_navigate(data, 'level1 level2 level3')
assert data3['args'] == [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
}, {
'name': 'bar',
'help': '',
'metavar': None
},
]
data2 = parser_navigate(data, 'level1 level2')
assert data2['children'] == [
{
'name': 'level3',
'help': '',
'usage': 'usage: py.test level1 level2 level3 [-h] foo bar',
'bare_usage': 'py.test level1 level2 level3 [-h] foo bar',
'args': [
{
'name': 'foo',
'help': 'foo help',
'metavar': None
},
{
'name': 'bar',
'help': '',
'metavar': None
},
],
}
]
assert data == parser_navigate(data, '') | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/setup.py | docs/sphinxext/sphinxtogithub-1.1.0/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import unittest
try:
from setuptools import setup, find_packages, Command
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages, Command
import sphinxtogithub
from sphinxtogithub.tests import (
filehandler,
directoryhandler,
replacer,
renamer,
remover,
layout,
layoutfactory,
setup as setuptest,
)
class RunTests(Command):
description = "Run the sphinxtogithub test suite."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
suites = [
filehandler.testSuite(),
directoryhandler.testSuite(),
replacer.testSuite(),
renamer.testSuite(),
remover.testSuite(),
layout.testSuite(),
layoutfactory.testSuite(),
setuptest.testSuite(),
]
suite = unittest.TestSuite(suites)
runner = unittest.TextTestRunner()
runner.run(suite)
class Publish(Command):
description = "Publish package to PyPi"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Publish to PyPi"""
os.system("python setup.py sdist upload")
long_description = codecs.open("README.rst", "r", "utf-8").read()
setup(
name='sphinxtogithub',
version=sphinxtogithub.__version__,
description=sphinxtogithub.__doc__,
author=sphinxtogithub.__author__,
author_email=sphinxtogithub.__contact__,
url=sphinxtogithub.__homepage__,
platforms=["any"],
license="BSD",
packages=find_packages(),
scripts=["bin/sphinxtogithub"],
zip_safe=False,
install_requires=[],
cmdclass = {"test": RunTests, "publish" : Publish},
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Environment :: Plugins",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Topic :: Documentation",
],
long_description=long_description,
)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/tests/__init__.py | docs/sphinxext/sphinxtogithub-1.1.0/tests/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/tests/testreplace.py | docs/sphinxext/sphinxtogithub-1.1.0/tests/testreplace.py | # -*- coding: utf-8 -*-
from unittest import TestCase
import sphinxtogithub
class TestReplace(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_unicode_replace(self):
print u"this is a test ✓".replace( "this", "that" )
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/sphinxtogithub.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/sphinxtogithub.py | #! /usr/bin/env python
from optparse import OptionParser
import os
import sys
import shutil
import codecs
class DirHelper(object):
def __init__(self, is_dir, list_dir, walk, rmtree):
self.is_dir = is_dir
self.list_dir = list_dir
self.walk = walk
self.rmtree = rmtree
class FileSystemHelper(object):
def __init__(self, open_, path_join, move, exists):
self.open_ = open_
self.path_join = path_join
self.move = move
self.exists = exists
class Replacer(object):
"Encapsulates a simple text replace"
def __init__(self, from_, to):
self.from_ = from_
self.to = to
def process(self, text):
return text.replace( self.from_, self.to )
class FileHandler(object):
"Applies a series of replacements the contents of a file inplace"
def __init__(self, name, replacers, opener):
self.name = name
self.replacers = replacers
self.opener = opener
def process(self):
text = self.opener(self.name, "r").read()
for replacer in self.replacers:
text = replacer.process( text )
self.opener(self.name, "w").write(text)
class Remover(object):
def __init__(self, exists, remove):
self.exists = exists
self.remove = remove
def __call__(self, name):
if self.exists(name):
self.remove(name)
class ForceRename(object):
def __init__(self, renamer, remove):
self.renamer = renamer
self.remove = remove
def __call__(self, from_, to):
self.remove(to)
self.renamer(from_, to)
class VerboseRename(object):
def __init__(self, renamer, stream):
self.renamer = renamer
self.stream = stream
def __call__(self, from_, to):
self.stream.write(
"Renaming directory '%s' -> '%s'\n"
% (os.path.basename(from_), os.path.basename(to))
)
self.renamer(from_, to)
class DirectoryHandler(object):
"Encapsulates renaming a directory by removing its first character"
def __init__(self, name, root, renamer):
self.name = name
self.new_name = name[1:]
self.root = root + os.sep
self.renamer = renamer
def path(self):
return os.path.join(self.root, self.name)
def relative_path(self, directory, filename):
path = directory.replace(self.root, "", 1)
return os.path.join(path, filename)
def new_relative_path(self, directory, filename):
path = self.relative_path(directory, filename)
return path.replace(self.name, self.new_name, 1)
def process(self):
from_ = os.path.join(self.root, self.name)
to = os.path.join(self.root, self.new_name)
self.renamer(from_, to)
class HandlerFactory(object):
def create_file_handler(self, name, replacers, opener):
return FileHandler(name, replacers, opener)
def create_dir_handler(self, name, root, renamer):
return DirectoryHandler(name, root, renamer)
class OperationsFactory(object):
def create_force_rename(self, renamer, remover):
return ForceRename(renamer, remover)
def create_verbose_rename(self, renamer, stream):
return VerboseRename(renamer, stream)
def create_replacer(self, from_, to):
return Replacer(from_, to)
def create_remover(self, exists, remove):
return Remover(exists, remove)
class Layout(object):
"""
Applies a set of operations which result in the layout
of a directory changing
"""
def __init__(self, directory_handlers, file_handlers):
self.directory_handlers = directory_handlers
self.file_handlers = file_handlers
def process(self):
for handler in self.file_handlers:
handler.process()
for handler in self.directory_handlers:
handler.process()
class NullLayout(object):
"""
Layout class that does nothing when asked to process
"""
def process(self):
pass
class LayoutFactory(object):
"Creates a layout object"
def __init__(self, operations_factory, handler_factory, file_helper, dir_helper, verbose, stream, force):
self.operations_factory = operations_factory
self.handler_factory = handler_factory
self.file_helper = file_helper
self.dir_helper = dir_helper
self.verbose = verbose
self.output_stream = stream
self.force = force
def create_layout(self, path):
contents = self.dir_helper.list_dir(path)
renamer = self.file_helper.move
if self.force:
remove = self.operations_factory.create_remover(self.file_helper.exists, self.dir_helper.rmtree)
renamer = self.operations_factory.create_force_rename(renamer, remove)
if self.verbose:
renamer = self.operations_factory.create_verbose_rename(renamer, self.output_stream)
# Build list of directories to process
directories = [d for d in contents if self.is_underscore_dir(path, d)]
underscore_directories = [
self.handler_factory.create_dir_handler(d, path, renamer)
for d in directories
]
if not underscore_directories:
if self.verbose:
self.output_stream.write(
"No top level directories starting with an underscore "
"were found in '%s'\n" % path
)
return NullLayout()
# Build list of files that are in those directories
replacers = []
for handler in underscore_directories:
for directory, dirs, files in self.dir_helper.walk(handler.path()):
for f in files:
replacers.append(
self.operations_factory.create_replacer(
handler.relative_path(directory, f),
handler.new_relative_path(directory, f)
)
)
# Build list of handlers to process all files
filelist = []
for root, dirs, files in self.dir_helper.walk(path):
for f in files:
if f.endswith(".html"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
replacers,
self.file_helper.open_)
)
if f.endswith(".js"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
[self.operations_factory.create_replacer("'_sources/'", "'sources/'")],
self.file_helper.open_
)
)
return Layout(underscore_directories, filelist)
def is_underscore_dir(self, path, directory):
return (self.dir_helper.is_dir(self.file_helper.path_join(path, directory))
and directory.startswith("_"))
def sphinx_extension(app, exception):
"Wrapped up as a Sphinx Extension"
if not app.builder.name in ("html", "dirhtml"):
return
if not app.config.sphinx_to_github:
if app.config.sphinx_to_github_verbose:
print "Sphinx-to-github: Disabled, doing nothing."
return
if exception:
if app.config.sphinx_to_github_verbose:
print "Sphinx-to-github: Exception raised in main build, doing nothing."
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree
)
file_helper = FileSystemHelper(
lambda f, mode: codecs.open(f, mode, app.config.sphinx_to_github_encoding),
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
app.config.sphinx_to_github_verbose,
sys.stdout,
force=True
)
layout = layout_factory.create_layout(app.outdir)
layout.process()
def setup(app):
"Setup function for Sphinx Extension"
app.add_config_value("sphinx_to_github", True, '')
app.add_config_value("sphinx_to_github_verbose", True, '')
app.add_config_value("sphinx_to_github_encoding", 'utf-8', '')
app.connect("build-finished", sphinx_extension)
def main(args):
usage = "usage: %prog [options] <html directory>"
parser = OptionParser(usage=usage)
parser.add_option("-v","--verbose", action="store_true",
dest="verbose", default=False, help="Provides verbose output")
parser.add_option("-e","--encoding", action="store",
dest="encoding", default="utf-8", help="Encoding for reading and writing files")
opts, args = parser.parse_args(args)
try:
path = args[0]
except IndexError:
sys.stderr.write(
"Error - Expecting path to html directory:"
"sphinx-to-github <path>\n"
)
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree
)
file_helper = FileSystemHelper(
lambda f, mode: codecs.open(f, mode, opts.encoding),
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
opts.verbose,
sys.stdout,
force=False
)
layout = layout_factory.create_layout(path)
layout.process()
if __name__ == "__main__":
main(sys.argv[1:])
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/__init__.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/__init__.py | """Script for preparing the html output of the Sphinx documentation system for
github pages. """
VERSION = (1, 1, 0, 'dev')
__version__ = ".".join(map(str, VERSION[:-1]))
__release__ = ".".join(map(str, VERSION))
__author__ = "Michael Jones"
__contact__ = "http://github.com/michaeljones"
__homepage__ = "http://github.com/michaeljones/sphinx-to-github"
__docformat__ = "restructuredtext"
from sphinxtogithub import (
setup,
sphinx_extension,
LayoutFactory,
Layout,
DirectoryHandler,
VerboseRename,
ForceRename,
Remover,
FileHandler,
Replacer,
DirHelper,
FileSystemHelper,
OperationsFactory,
HandlerFactory
)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/renamer.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/renamer.py |
from sphinxtogithub.tests import MockExists, MockRemove, MockStream
import sphinxtogithub
import unittest
import os
class MockRename(object):
def __call__(self, from_, to):
self.from_ = from_
self.to = to
class TestForceRename(unittest.TestCase):
def testCall(self):
rename = MockRename()
remove = MockRemove()
renamer = sphinxtogithub.ForceRename(rename, remove)
from_ = "from"
to = "to"
renamer(from_, to)
self.assertEqual(rename.from_, from_)
self.assertEqual(rename.to, to)
self.assertEqual(remove.name, to)
class TestVerboseRename(unittest.TestCase):
def testCall(self):
rename = MockRename()
stream = MockStream()
renamer = sphinxtogithub.VerboseRename(rename, stream)
from_ = os.path.join("path", "to", "from")
to = os.path.join("path", "to", "to")
renamer(from_, to)
self.assertEqual(rename.from_, from_)
self.assertEqual(rename.to, to)
self.assertEqual(
stream.msgs[0],
"Renaming directory '%s' -> '%s'\n" % (os.path.basename(from_), os.path.basename(to))
)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestForceRename("testCall"))
suite.addTest(TestVerboseRename("testCall"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/filehandler.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/filehandler.py |
import unittest
import sphinxtogithub
class MockFileObject(object):
before = """
<title>Breathe's documentation — BreatheExample v0.0.1 documentation</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
"""
after = """
<title>Breathe's documentation — BreatheExample v0.0.1 documentation</title>
<link rel="stylesheet" href="static/default.css" type="text/css" />
<link rel="stylesheet" href="static/pygments.css" type="text/css" />
"""
def read(self):
return self.before
def write(self, text):
self.written = text
class MockOpener(object):
def __init__(self):
self.file_object = MockFileObject()
def __call__(self, name, readmode="r"):
self.name = name
return self.file_object
class TestFileHandler(unittest.TestCase):
def testProcess(self):
filepath = "filepath"
opener = MockOpener()
file_handler = sphinxtogithub.FileHandler(filepath, [], opener)
file_handler.process()
self.assertEqual(opener.file_object.written, MockFileObject.before)
self.assertEqual(opener.name, filepath)
def testProcessWithReplacers(self):
filepath = "filepath"
replacers = []
replacers.append(sphinxtogithub.Replacer("_static/default.css", "static/default.css"))
replacers.append(sphinxtogithub.Replacer("_static/pygments.css", "static/pygments.css"))
opener = MockOpener()
file_handler = sphinxtogithub.FileHandler(filepath, replacers, opener)
file_handler.process()
self.assertEqual(opener.file_object.written, MockFileObject.after)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestFileHandler("testProcess"))
suite.addTest(TestFileHandler("testProcessWithReplacers"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/setup.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/setup.py |
import sphinxtogithub
import unittest
class MockApp(object):
def __init__(self):
self.config_values = {}
self.connections = {}
def add_config_value(self, name, default, rebuild):
self.config_values[name] = (default, rebuild)
def connect(self, stage, function):
self.connections[stage] = function
class TestSetup(unittest.TestCase):
def testSetup(self):
# Sadly not flexible enough to test it independently
# so the tests rely on and test the values pass in the
# production code
app = MockApp()
sphinxtogithub.setup(app)
self.assertEqual(app.connections["build-finished"], sphinxtogithub.sphinx_extension)
self.assertEqual(len(app.connections), 1)
self.assertEqual(app.config_values["sphinx_to_github"],(True, ''))
self.assertEqual(app.config_values["sphinx_to_github_verbose"],(True, ''))
self.assertEqual(app.config_values["sphinx_to_github_encoding"],('utf-8', ''))
self.assertEqual(len(app.config_values),3)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestSetup("testSetup"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/layoutfactory.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/layoutfactory.py |
from sphinxtogithub.tests import MockStream
import sphinxtogithub
import unittest
import os
import shutil
root = "test_path"
dirs = ["dir1", "dir2", "dir_", "d_ir", "_static", "_source"]
files = ["file1.html", "nothtml.txt", "file2.html", "javascript.js"]
def mock_is_dir(path):
directories = [ os.path.join(root, dir_) for dir_ in dirs ]
return path in directories
def mock_list_dir(path):
contents = []
contents.extend(dirs)
contents.extend(files)
return contents
def mock_walk(path):
yield path, dirs, files
class MockHandlerFactory(object):
def create_file_handler(self, name, replacers, opener):
return sphinxtogithub.FileHandler(name, replacers, opener)
def create_dir_handler(self, name, root, renamer):
return sphinxtogithub.DirectoryHandler(name, root, renamer)
class TestLayoutFactory(unittest.TestCase):
def setUp(self):
verbose = True
force = False
stream = MockStream()
dir_helper = sphinxtogithub.DirHelper(
mock_is_dir,
mock_list_dir,
mock_walk,
shutil.rmtree
)
file_helper = sphinxtogithub.FileSystemHelper(
open,
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = sphinxtogithub.OperationsFactory()
handler_factory = MockHandlerFactory()
self.layoutfactory = sphinxtogithub.LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
verbose,
stream,
force
)
def tearDown(self):
self.layoutfactory = None
def testUnderscoreCheck(self):
func = self.layoutfactory.is_underscore_dir
self.assert_(func(root, "_static"))
self.assert_(not func(root, "dir_"))
self.assert_(not func(root, "d_ir"))
self.assert_(not func(root, "dir1"))
def testCreateLayout(self):
layout = self.layoutfactory.create_layout(root)
dh = layout.directory_handlers
self.assertEqual(dh[0].name, "_static")
self.assertEqual(dh[1].name, "_source")
self.assertEqual(len(dh), 2)
fh = layout.file_handlers
self.assertEqual(fh[0].name, os.path.join(root,"file1.html"))
self.assertEqual(fh[1].name, os.path.join(root,"file2.html"))
self.assertEqual(fh[2].name, os.path.join(root,"javascript.js"))
self.assertEqual(len(fh), 3)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestLayoutFactory("testUnderscoreCheck"))
suite.addTest(TestLayoutFactory("testCreateLayout"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/__init__.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/__init__.py |
class MockExists(object):
def __call__(self, name):
self.name = name
return True
class MockRemove(MockExists):
pass
class MockStream(object):
def __init__(self):
self.msgs = []
def write(self, msg):
self.msgs.append(msg)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/remover.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/remover.py |
from sphinxtogithub.tests import MockExists, MockRemove
import sphinxtogithub
import unittest
class TestRemover(unittest.TestCase):
def testCall(self):
exists = MockExists()
remove = MockRemove()
remover = sphinxtogithub.Remover(exists, remove)
filepath = "filepath"
remover(filepath)
self.assertEqual(filepath, exists.name)
self.assertEqual(filepath, remove.name)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestRemover("testCall"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/directoryhandler.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/directoryhandler.py |
import unittest
import os
import sphinxtogithub
class MockRenamer(object):
def __call__(self, from_, to):
self.from_ = from_
self.to = to
class TestDirectoryHandler(unittest.TestCase):
def setUp(self):
self.directory = "_static"
self.new_directory = "static"
self.root = os.path.join("build", "html")
renamer = MockRenamer()
self.dir_handler = sphinxtogithub.DirectoryHandler(self.directory, self.root, renamer)
def tearDown(self):
self.dir_handler = None
def testPath(self):
self.assertEqual(self.dir_handler.path(), os.path.join(self.root, self.directory))
def testRelativePath(self):
dir_name = "css"
dir_path = os.path.join(self.root, self.directory, dir_name)
filename = "cssfile.css"
self.assertEqual(
self.dir_handler.relative_path(dir_path, filename),
os.path.join(self.directory, dir_name, filename)
)
def testNewRelativePath(self):
dir_name = "css"
dir_path = os.path.join(self.root, self.directory, dir_name)
filename = "cssfile.css"
self.assertEqual(
self.dir_handler.new_relative_path(dir_path, filename),
os.path.join(self.new_directory, dir_name, filename)
)
def testProcess(self):
self.dir_handler.process()
self.assertEqual(
self.dir_handler.renamer.to,
os.path.join(self.root, self.new_directory)
)
self.assertEqual(
self.dir_handler.renamer.from_,
os.path.join(self.root, self.directory)
)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestDirectoryHandler("testPath"))
suite.addTest(TestDirectoryHandler("testRelativePath"))
suite.addTest(TestDirectoryHandler("testNewRelativePath"))
suite.addTest(TestDirectoryHandler("testProcess"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/replacer.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/replacer.py |
import unittest
import sphinxtogithub
class TestReplacer(unittest.TestCase):
before = """
<title>Breathe's documentation — BreatheExample v0.0.1 documentation</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
"""
after = """
<title>Breathe's documentation — BreatheExample v0.0.1 documentation</title>
<link rel="stylesheet" href="static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
"""
def testReplace(self):
replacer = sphinxtogithub.Replacer("_static/default.css", "static/default.css")
self.assertEqual(replacer.process(self.before), self.after)
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestReplacer("testReplace"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/layout.py | docs/sphinxext/sphinxtogithub-1.1.0/sphinxtogithub/tests/layout.py |
from sphinxtogithub.tests import MockExists, MockRemove
import sphinxtogithub
import unittest
class MockHandler(object):
def __init__(self):
self.processed = False
def process(self):
self.processed = True
class TestLayout(unittest.TestCase):
def testProcess(self):
directory_handlers = []
file_handlers = []
for i in range(0, 10):
directory_handlers.append(MockHandler())
for i in range(0, 5):
file_handlers.append(MockHandler())
layout = sphinxtogithub.Layout(directory_handlers, file_handlers)
layout.process()
# Check all handlers are processed by reducing them with "and"
self.assert_(reduce(lambda x, y: x and y.processed, directory_handlers, True))
self.assert_(reduce(lambda x, y: x and y.processed, file_handlers, True))
def testSuite():
suite = unittest.TestSuite()
suite.addTest(TestLayout("testProcess"))
return suite
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
google/bi-tempered-loss | https://github.com/google/bi-tempered-loss/blob/1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88/jax/loss.py | jax/loss.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A JAX implementation of Robust Bi-tempered loss.
Source: https://bit.ly/3jSol8T
"""
import functools
import jax
from jax.lax import cond
from jax.lax import while_loop
import jax.numpy as jnp
from jax.scipy.special import logsumexp
@jax.jit
def _cross_entropy_loss(logits: jnp.ndarray,
labels: jnp.ndarray) -> jnp.ndarray:
log_preds = jax.nn.log_softmax(logits)
return jnp.sum(labels * (jnp.log(labels + 1e-15) - log_preds), axis=-1)
@jax.jit
def log_t(u: jnp.ndarray, t: float) -> jnp.ndarray:
"""Compute log_t for `u`."""
def _internal_log_t(u: jnp.ndarray, t: float) -> jnp.ndarray:
return (jnp.power(u, (1.0 - t)) - 1.0) / (1.0 - t)
return cond(
jnp.abs(t - 1.0) < 1e-15, jnp.log,
functools.partial(_internal_log_t, t=t), u)
@jax.jit
def exp_t(u: jnp.ndarray, t: float) -> jnp.ndarray:
"""Compute exp_t for `u`."""
def _internal_exp_t(u: jnp.ndarray, t: float) -> jnp.ndarray:
return jnp.power(jnp.maximum(1.0 + (1.0 - t) * u, 0.0), 1.0 / (1.0 - t))
return cond(
jnp.abs(t - 1.0) < 1e-15, jnp.exp,
functools.partial(_internal_exp_t, t=t), u)
@jax.jit
def compute_normalization_fixed_point(activations: jnp.ndarray,
t: float,
num_iters: int = 5):
"""Returns the normalization value for each example (t > 1.0).
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: An array of same rank as activation with the last dimension being 1.
"""
mu = jnp.max(activations, -1, keepdims=True)
normalized_activations_step_0 = activations - mu
def cond_fun(carry):
_, iters = carry
return iters < num_iters
def body_fun(carry):
normalized_activations, iters = carry
logt_partition = jnp.sum(
exp_t(normalized_activations, t), -1, keepdims=True)
normalized_activations_t = normalized_activations_step_0 * jnp.power(
logt_partition, 1.0 - t)
return normalized_activations_t, iters + 1
normalized_activations_t, _ = while_loop(cond_fun, body_fun,
(normalized_activations_step_0, 0))
logt_partition = jnp.sum(
exp_t(normalized_activations_t, t), -1, keepdims=True)
return -log_t(1.0 / logt_partition, t) + mu
@jax.jit
def compute_normalization_binary_search(activations: jnp.ndarray,
t: float,
num_iters: int = 10):
"""Returns the normalization value for each example (t < 1.0).
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: An array of same rank as activation with the last dimension being 1.
"""
mu = jnp.max(activations, -1, keepdims=True)
normalized_activations = activations - mu
shape_activations = activations.shape
effective_dim = jnp.float32(
jnp.sum(
jnp.int32(normalized_activations > -1.0 / (1.0 - t)),
-1,
keepdims=True))
shape_partition = list(shape_activations[:-1]) + [1]
lower = jnp.zeros(shape_partition)
upper = -log_t(1.0 / effective_dim, t) * jnp.ones(shape_partition)
def cond_fun(carry):
_, _, iters = carry
return iters < num_iters
def body_fun(carry):
lower, upper, iters = carry
logt_partition = (upper + lower) / 2.0
sum_probs = jnp.sum(
exp_t(normalized_activations - logt_partition, t), -1, keepdims=True)
update = jnp.float32(sum_probs < 1.0)
lower = jnp.reshape(lower * update + (1.0 - update) * logt_partition,
shape_partition)
upper = jnp.reshape(upper * (1.0 - update) + update * logt_partition,
shape_partition)
return lower, upper, iters + 1
lower = jnp.zeros(shape_partition)
upper = -log_t(1.0 / effective_dim, t) * jnp.ones(shape_partition)
lower, upper, _ = while_loop(cond_fun, body_fun, (lower, upper, 0))
logt_partition = (upper + lower) / 2.0
return logt_partition + mu
@jax.jit
def compute_tempered_normalization(activations: jnp.ndarray,
t: float,
num_iters: int = 5):
return cond(
t < 1.0,
functools.partial(
compute_normalization_binary_search, t=t, num_iters=num_iters),
functools.partial(
compute_normalization_fixed_point, t=t, num_iters=num_iters),
activations)
@jax.jit
def compute_normalization(activations: jnp.ndarray,
t: float,
num_iters: int = 5):
"""Returns the normalization value for each example.
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support, > 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: An array of same rank as activation with the last dimension being 1.
"""
return cond(
jnp.abs(t - 1.0) < 1e-15,
functools.partial(logsumexp, axis=-1, keepdims=True),
functools.partial(
compute_tempered_normalization, t=t, num_iters=num_iters),
activations)
@jax.jit
def tempered_sigmoid(activations, t, num_iters=5):
"""Tempered sigmoid function.
Args:
activations: Activations for the positive class for binary classification.
t: Temperature array > 0.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities array.
"""
input_shape = activations.shape
activations_2d = jnp.reshape(activations, [-1, 1])
internal_activations = jnp.concatenate(
[jnp.zeros_like(activations_2d), activations_2d], 1)
internal_probabilities = tempered_softmax(internal_activations, t, num_iters)
one_class_probabilities = internal_probabilities[:, 1]
return jnp.reshape(one_class_probabilities, input_shape)
@jax.jit
def tempered_softmax(activations, t, num_iters=5):
"""Tempered softmax function.
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
t: Temperature array > 0.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities array.
"""
normalization_constants = compute_normalization(activations, t, num_iters)
return exp_t(activations - normalization_constants, t)
def _internal_bi_tempered_logistic_loss(activations, labels, t1, t2):
"""Computes the Bi-Tempered logistic loss.
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
labels: batch_size
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness).
Returns:
A loss array for robust loss.
"""
normalization_constants = compute_normalization(activations, t2, num_iters=5)
if t2 == 1.0:
if t1 == 1.0:
return normalization_constants + jnp.sum(
jnp.multiply(labels,
jnp.log(labels + 1e-10) - activations), -1)
else:
shifted_activations = jnp.exp(activations - normalization_constants)
one_minus_t1 = (1.0 - t1)
one_minus_t2 = 1.0
else:
one_minus_t1 = (1.0 - t1)
one_minus_t2 = (1.0 - t2)
shifted_activations = jnp.maximum(
1.0 + one_minus_t2 * (activations - normalization_constants), 0.0)
if t1 == 1.0:
return jnp.sum(
jnp.multiply(
jnp.log(labels + 1e-10) -
jnp.log(jnp.power(shifted_activations, 1.0 / one_minus_t2)),
labels), -1)
else:
beta = 1.0 + one_minus_t1
logt_probs = (jnp.power(shifted_activations, one_minus_t1 / one_minus_t2) -
1.0) / one_minus_t1
return jnp.sum(
jnp.multiply(log_t(labels, t1) - logt_probs, labels) - 1.0 / beta *
(jnp.power(labels, beta) -
jnp.power(shifted_activations, beta / one_minus_t2)), -1)
@jax.custom_vjp
def bi_tempered_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5):
"""Bi-Tempered Logistic Loss with custom gradient.
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
labels: An array with shape and dtype as activations.
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing parameter between [0, 1).
num_iters: Number of iterations to run the method.
Returns:
A loss array.
"""
loss_values, _ = bi_tempered_logistic_loss_fwd(activations, labels, t1, t2,
label_smoothing, num_iters)
return loss_values
@jax.jit
def bi_tempered_logistic_loss_fwd(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5):
"""Forward pass function for bi-tempered logistic loss.
Args:
activations: A multi-dimensional array with last dimension `num_classes`.
labels: An array with shape and dtype as activations.
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing parameter between [0, 1).
num_iters: Number of iterations to run the method.
Returns:
A loss array, residuals.
"""
num_classes = jnp.int32(labels.shape[-1])
labels = cond(
label_smoothing > 0.0,
lambda u: # pylint: disable=g-long-lambda
(1 - num_classes /
(num_classes - 1) * label_smoothing) * u + label_smoothing /
(num_classes - 1),
lambda u: u,
labels)
probabilities = tempered_softmax(activations, t2, num_iters)
def _tempred_cross_entropy_loss(unused_activations):
loss_values = jnp.multiply(
labels,
log_t(labels + 1e-10, t1) -
log_t(probabilities, t1)) - 1.0 / (2.0 - t1) * (
jnp.power(labels, 2.0 - t1) - jnp.power(probabilities, 2.0 - t1))
loss_values = jnp.sum(loss_values, -1)
return loss_values
loss_values = cond(
jnp.logical_and(
jnp.less(jnp.abs(t1 - 1.0), 1e-15),
jnp.less(jnp.abs(t2 - 1.0), 1e-15)),
functools.partial(_cross_entropy_loss, labels=labels),
_tempred_cross_entropy_loss,
activations)
return loss_values, (labels, t1, t2, probabilities)
@jax.jit
def bi_tempered_logistic_loss_bwd(res, d_loss):
"""Backward pass function for bi-tempered logistic loss.
Args:
res: Residuals.
d_loss: Differential.
Returns:
Derivatives.
"""
labels, t1, t2, probabilities = res
delta_probs = probabilities - labels
forget_factor = jnp.power(probabilities, t2 - t1)
delta_probs_times_forget_factor = jnp.multiply(delta_probs, forget_factor)
delta_forget_sum = jnp.sum(
delta_probs_times_forget_factor, -1, keepdims=True)
escorts = jnp.power(probabilities, t2)
escorts = escorts / jnp.sum(escorts, -1, keepdims=True)
derivative = delta_probs_times_forget_factor - jnp.multiply(
escorts, delta_forget_sum)
if len(d_loss.shape) < len(derivative.shape):
d_loss = jnp.expand_dims(d_loss, -1)
return (jnp.multiply(d_loss, derivative), None, None, None, None, None)
bi_tempered_logistic_loss.defvjp(
bi_tempered_logistic_loss_fwd, bi_tempered_logistic_loss_bwd)
def bi_tempered_binary_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5):
"""Bi-Tempered binary logistic loss.
Args:
activations: An array containing activations for class 1.
labels: An array with shape and dtype as activations.
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing
num_iters: Number of iterations to run the method.
Returns:
A loss array.
"""
out_shape = labels.shape
labels_2d = jnp.reshape(labels, [-1, 1])
activations_2d = jnp.reshape(activations, [-1, 1])
internal_labels = jnp.concatenate([1.0 - labels_2d, labels_2d], 1)
internal_logits = jnp.concatenate(
[jnp.zeros_like(activations_2d), activations_2d], 1)
losses = bi_tempered_logistic_loss(internal_logits, internal_labels, t1, t2,
label_smoothing, num_iters)
return jnp.reshape(losses, out_shape)
| python | Apache-2.0 | 1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88 | 2026-01-05T07:13:09.810991Z | false |
google/bi-tempered-loss | https://github.com/google/bi-tempered-loss/blob/1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88/jax/loss_test.py | jax/loss_test.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bi-tempered loss."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.random as random
import numpy.testing as npt
from six.moves import zip
import loss
python_version = "PY3"
class LossTest(absltest.TestCase):
def test_normalization(self):
"""Test the normalization constant."""
rng = random.PRNGKey(seed=1335)
activations = random.normal(rng, shape=[100, 50000])
for t in [0.99, 1.01]:
normalization_constants = loss.compute_normalization(
activations, t, num_iters=20)
npt.assert_allclose(normalization_constants.shape, (100, 1))
probabilities = jnp.sum(
loss.exp_t(activations - normalization_constants, t), -1)
npt.assert_allclose(probabilities, [1.0] * 100, atol=1e-5)
for t in [0.1, 2.0]:
normalization_constants = loss.compute_normalization(
activations, t, num_iters=20)
probabilities = jnp.sum(
loss.exp_t(activations - normalization_constants, t), -1)
npt.assert_allclose(probabilities, [1.0] * 100, atol=1e-5)
def test_limit_case_logistic_loss(self):
"""Test for checking if t1 = t2 = 1.0 yields the logistic loss."""
labels = jnp.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
rng = random.PRNGKey(seed=1335)
activations = random.normal(rng, shape=[3, 3])
actual_loss = loss.bi_tempered_logistic_loss(activations, labels, 1.0,
1.0)
logistic_loss = loss._cross_entropy_loss(
logits=activations, labels=labels)
npt.assert_allclose(actual_loss, logistic_loss)
def test_loss_value(self):
"""Test the loss based on precomputed values."""
labels = jnp.array([[0.2, 0.3, 0.5], [0.6, 0.3, 0.1], [0.2, 0.8, 0.0]])
activations = jnp.array([[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0],
[4.0, -3.0, -6.0]])
actual_loss = loss.bi_tempered_logistic_loss(activations, labels, 0.5, 1.5)
npt.assert_allclose(actual_loss,
jnp.array([0.02301914, 0.18972909, 0.93874922]),
atol=1e-4)
actual_loss = loss.bi_tempered_logistic_loss(activations, labels, 0.5,
0.8, num_iters=20)
npt.assert_allclose(actual_loss,
jnp.array([0.21646356, 0.41836615, 1.33997854]),
atol=1e-4)
def test_constant_shift(self):
"""Test if adding a constant to all activations is vacuous."""
labels = jnp.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.7, 0.2, 0.1]])
rng = random.PRNGKey(seed=1335)
rng, use_key = random.split(rng)
activations = random.normal(use_key, shape=[3, 3])
bias = random.normal(rng, shape=[3, 1])
for t2 in [0.8, 1.2]:
actual_loss = loss.bi_tempered_logistic_loss(
activations, labels, 0.5, t2)
shifted_loss = loss.bi_tempered_logistic_loss(
activations + bias, labels, 0.5, t2)
npt.assert_allclose(actual_loss, shifted_loss, atol=1e-6)
def test_label_smoothing(self):
"""Test label smoothing."""
labels = jnp.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
activations = jnp.array([[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0],
[4.0, -3.0, -6.0]])
actual_loss = loss.bi_tempered_logistic_loss(
activations, labels, 0.5, 1.5, label_smoothing=0.1)
npt.assert_allclose(
actual_loss, jnp.array([0.76652711, 0.08627685, 1.35443510]),
atol=1e-5)
def test_binary_logistic_loss(self):
"""Test binary logistic loss."""
labels = jnp.array([1.0, 0.0])
activations = jnp.array([0.0, 0.0])
actual_loss = loss.bi_tempered_binary_logistic_loss(activations, labels,
1.0, 1.0)
npt.assert_allclose(actual_loss, jnp.array([0.69314718, 0.69314718]),
atol=1e-5)
def test_dynamic_temperatures(self):
"""Test changing temperatures dynamically."""
labels = jnp.array([[0.2, 0.5, 0.3]])
activations = jnp.array([[-0.5, 0.1, 2.0]])
t1_values = [1.0, 0.9, 0.8, 0.7]
t2_values = [1.0, 1.1, 1.2, 1.3]
loss_values = [[0.628705], [0.45677936], [0.34298314], [0.26295574]]
loss_out = []
for t1_value, t2_value in zip(t1_values, t2_values):
loss_out.append(loss.bi_tempered_logistic_loss(
activations, labels, t1_value, t2_value, num_iters=5))
npt.assert_allclose(loss_values, loss_out, atol=1e-5)
def test_tempered_softmax(self):
# Test softmax function with different temperatures.
activations = jnp.array(
[[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0], [4.0, -3.0, -6.0]])
# Test with temperature = 1.0, which should recover regular
# softmax probabilities.
softmax_probabilities_t_1 = loss.tempered_softmax(
activations, t=1.0)
vanilla_softmax_probabilties = jax.nn.softmax(activations)
npt.assert_allclose(vanilla_softmax_probabilties,
softmax_probabilities_t_1, atol=1e-6)
softmax_probabilities_t_4 = loss.tempered_softmax(
activations, t=4.0)
expected_softmax_probabilities_t_4 = jnp.array([[
0.3205458, 0.32714278, 0.3523402
], [0.3430056, 0.36491093,
0.29220778], [0.41369352, 0.30534995, 0.28299212]])
npt.assert_allclose(expected_softmax_probabilities_t_4,
softmax_probabilities_t_4, atol=1e-6)
def test_tempered_sigmoid(self):
# Test sigmoid function with different temperatures.
activations = jnp.array([0.0, 3.0, 6.0])
# Test with temperature = 1.0, which should recover regular
# sigmoid probabilities.
sigmoid_probabilities_t_1 = loss.tempered_sigmoid(
activations, t=1.0)
vanilla_softmax_probabilties = jax.nn.sigmoid(activations)
npt.assert_allclose(vanilla_softmax_probabilties,
sigmoid_probabilities_t_1, atol=1e-6)
sigmoid_probabilities_t_4 = loss.tempered_sigmoid(
activations, t=4.0)
expected_sigmoid_probabilities_t_4 = jnp.array([0.5, 0.58516014, 0.6421035])
npt.assert_allclose(expected_sigmoid_probabilities_t_4,
sigmoid_probabilities_t_4, atol=1e-6)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88 | 2026-01-05T07:13:09.810991Z | false |
google/bi-tempered-loss | https://github.com/google/bi-tempered-loss/blob/1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88/tensorflow/loss.py | tensorflow/loss.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robust Bi-Tempered Logistic Loss Based on Bregman Divergences.
Source: https://bit.ly/3jSol8T
"""
import functools
import tensorflow as tf
def for_loop(num_iters, body, initial_args):
"""Runs a simple for-loop with given body and initial_args.
Args:
num_iters: Maximum number of iterations.
body: Body of the for-loop.
initial_args: Args to the body for the first iteration.
Returns:
Output of the final iteration.
"""
for i in range(num_iters):
if i == 0:
outputs = body(*initial_args)
else:
outputs = body(*outputs)
return outputs
def log_t(u, t):
"""Compute log_t for `u`."""
def _internal_log_t(u, t):
return (u**(1.0 - t) - 1.0) / (1.0 - t)
return tf.cond(
tf.equal(t, 1.0), lambda: tf.math.log(u),
functools.partial(_internal_log_t, u, t))
def exp_t(u, t):
"""Compute exp_t for `u`."""
def _internal_exp_t(u, t):
return tf.nn.relu(1.0 + (1.0 - t) * u)**(1.0 / (1.0 - t))
return tf.cond(
tf.equal(t, 1.0), lambda: tf.math.exp(u),
functools.partial(_internal_exp_t, u, t))
def compute_normalization_fixed_point(activations, t, num_iters=5):
"""Returns the normalization value for each example (t > 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (> 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
mu = tf.reduce_max(activations, -1, True)
normalized_activations_step_0 = activations - mu
shape_normalized_activations = tf.shape(normalized_activations_step_0)
def iter_body(i, normalized_activations):
logt_partition = tf.reduce_sum(
exp_t(normalized_activations, t), -1, True)
normalized_activations_t = tf.reshape(
normalized_activations_step_0 * tf.pow(logt_partition, 1.0 - t),
shape_normalized_activations)
return [i + 1, normalized_activations_t]
_, normalized_activations_t = for_loop(num_iters, iter_body,
[0, normalized_activations_step_0])
logt_partition = tf.reduce_sum(
exp_t(normalized_activations_t, t), -1, True)
return -log_t(1.0 / logt_partition, t) + mu
def compute_normalization_binary_search(activations, t, num_iters=10):
"""Returns the normalization value for each example (t < 1.0).
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
mu = tf.reduce_max(activations, -1, True)
normalized_activations = activations - mu
shape_activations = tf.shape(activations)
effective_dim = tf.cast(
tf.reduce_sum(
tf.cast(
tf.greater(normalized_activations, -1.0 / (1.0 - t)), tf.int32),
-1,
True), tf.float32)
shape_partition = tf.concat([shape_activations[:-1], [1]], 0)
lower = tf.zeros(shape_partition)
upper = -log_t(1.0 / effective_dim, t) * tf.ones(shape_partition)
def iter_body(i, lower, upper):
logt_partition = (upper + lower)/2.0
sum_probs = tf.reduce_sum(exp_t(
normalized_activations - logt_partition, t), -1, True)
update = tf.cast(tf.less(sum_probs, 1.0), tf.float32)
lower = tf.reshape(lower * update + (1.0 - update) * logt_partition,
shape_partition)
upper = tf.reshape(upper * (1.0 - update) + update * logt_partition,
shape_partition)
return [i + 1, lower, upper]
_, lower, upper = for_loop(num_iters, iter_body, [0, lower, upper])
logt_partition = (upper + lower)/2.0
return logt_partition + mu
def compute_normalization(activations, t, num_iters=5):
"""Returns the normalization value for each example.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature 2 (< 1.0 for finite support, > 1.0 for tail heaviness).
num_iters: Number of iterations to run the method.
Return: A tensor of same rank as activation with the last dimension being 1.
"""
return tf.cond(
tf.less(t, 1.0),
functools.partial(compute_normalization_binary_search, activations, t,
num_iters),
functools.partial(compute_normalization_fixed_point, activations, t,
num_iters))
def _internal_bi_tempered_logistic_loss(activations, labels, t1, t2):
"""Computes the Bi-Tempered logistic loss.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
labels: batch_size
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness).
Returns:
A loss tensor for robust loss.
"""
if t2 == 1.0:
normalization_constants = tf.math.log(
tf.reduce_sum(tf.math.exp(activations), -1, True))
if t1 == 1.0:
return normalization_constants + tf.reduce_sum(
tf.multiply(labels, tf.math.log(labels + 1e-10) - activations), -1)
else:
shifted_activations = tf.math.exp(activations - normalization_constants)
one_minus_t1 = (1.0 - t1)
one_minus_t2 = 1.0
else:
one_minus_t1 = (1.0 - t1)
one_minus_t2 = (1.0 - t2)
normalization_constants = compute_normalization(
activations, t2, num_iters=5)
shifted_activations = tf.nn.relu(1.0 + one_minus_t2 *
(activations - normalization_constants))
if t1 == 1.0:
return tf.reduce_sum(
tf.multiply(
tf.math.log(labels + 1e-10) -
tf.math.log(tf.pow(shifted_activations, 1.0 / one_minus_t2)),
labels), -1)
else:
beta = 1.0 + one_minus_t1
logt_probs = (tf.pow(shifted_activations, one_minus_t1 / one_minus_t2) -
1.0) / one_minus_t1
return tf.reduce_sum(
tf.multiply(log_t(labels, t1) - logt_probs, labels) - 1.0 / beta *
(tf.pow(labels, beta) -
tf.pow(shifted_activations, beta / one_minus_t2)), -1)
def tempered_sigmoid(activations, t, num_iters=5):
"""Tempered sigmoid function.
Args:
activations: Activations for the positive class for binary classification.
t: Temperature tensor > 0.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities tensor.
"""
t = tf.convert_to_tensor(t)
input_shape = tf.shape(activations)
activations_2d = tf.reshape(activations, [-1, 1])
internal_activations = tf.concat(
[tf.zeros_like(activations_2d), activations_2d], 1)
normalization_constants = tf.cond(
# pylint: disable=g-long-lambda
tf.equal(t, 1.0),
lambda: tf.math.log(
tf.reduce_sum(tf.math.exp(internal_activations), -1, True)),
functools.partial(compute_normalization, internal_activations, t,
num_iters))
internal_probabilities = exp_t(internal_activations - normalization_constants,
t)
one_class_probabilities = tf.split(internal_probabilities, 2, axis=1)[1]
return tf.reshape(one_class_probabilities, input_shape)
def tempered_softmax(activations, t, num_iters=5):
"""Tempered softmax function.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
t: Temperature tensor > 0.0.
num_iters: Number of iterations to run the method.
Returns:
A probabilities tensor.
"""
t = tf.convert_to_tensor(t)
normalization_constants = tf.cond(
tf.equal(t, 1.0),
lambda: tf.math.log(tf.reduce_sum(tf.math.exp(activations), -1, True)),
functools.partial(compute_normalization, activations, t, num_iters))
return exp_t(activations - normalization_constants, t)
def bi_tempered_binary_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5):
"""Bi-Tempered binary logistic loss.
Args:
activations: A tensor containing activations for class 1.
labels: A tensor with shape and dtype as activations.
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing
num_iters: Number of iterations to run the method.
Returns:
A loss tensor.
"""
with tf.name_scope('binary_bitempered_logistic'):
t1 = tf.convert_to_tensor(t1)
t2 = tf.convert_to_tensor(t2)
out_shape = tf.shape(labels)
labels_2d = tf.reshape(labels, [-1, 1])
activations_2d = tf.reshape(activations, [-1, 1])
internal_labels = tf.concat([1.0 - labels_2d, labels_2d], 1)
internal_logits = tf.concat([tf.zeros_like(activations_2d), activations_2d],
1)
losses = bi_tempered_logistic_loss(internal_logits, internal_labels, t1, t2,
label_smoothing, num_iters)
return tf.reshape(losses, out_shape)
def bi_tempered_logistic_loss(activations,
labels,
t1,
t2,
label_smoothing=0.0,
num_iters=5):
"""Bi-Tempered Logistic Loss with custom gradient.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
labels: A tensor with shape and dtype as activations.
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
label_smoothing: Label smoothing parameter between [0, 1).
num_iters: Number of iterations to run the method.
Returns:
A loss tensor.
"""
with tf.name_scope('bitempered_logistic'):
t1 = tf.convert_to_tensor(t1)
t2 = tf.convert_to_tensor(t2)
if label_smoothing > 0.0:
num_classes = tf.cast(tf.shape(labels)[-1], tf.float32)
labels = (
1 - num_classes /
(num_classes - 1) * label_smoothing) * labels + label_smoothing / (
num_classes - 1)
@tf.custom_gradient
def _custom_gradient_bi_tempered_logistic_loss(activations):
"""Bi-Tempered Logistic Loss with custom gradient.
Args:
activations: A multi-dimensional tensor with last dim `num_classes`.
Returns:
A loss tensor, grad.
"""
with tf.name_scope('gradient_bitempered_logistic'):
probabilities = tempered_softmax(activations, t2, num_iters)
loss_values = tf.multiply(
labels,
log_t(labels + 1e-10, t1) -
log_t(probabilities, t1)) - 1.0 / (2.0 - t1) * (
tf.pow(labels, 2.0 - t1) - tf.pow(probabilities, 2.0 - t1))
def grad(d_loss):
"""Explicit gradient calculation.
Args:
d_loss: Infinitesimal change in the loss value.
Returns:
Loss gradient.
"""
delta_probs = probabilities - labels
forget_factor = tf.pow(probabilities, t2 - t1)
delta_probs_times_forget_factor = tf.multiply(delta_probs,
forget_factor)
delta_forget_sum = tf.reduce_sum(
delta_probs_times_forget_factor, -1, True)
escorts = tf.pow(probabilities, t2)
escorts = escorts / tf.reduce_sum(escorts, -1, True)
derivative = delta_probs_times_forget_factor - tf.multiply(
escorts, delta_forget_sum)
return tf.multiply(d_loss, derivative)
return loss_values, grad
def reduced_loss(activations):
return tf.reduce_sum(
_custom_gradient_bi_tempered_logistic_loss(activations), -1)
loss_values = tf.cond(
tf.math.logical_and(tf.equal(t1, 1.0), tf.equal(t2, 1.0)),
functools.partial(
tf.nn.softmax_cross_entropy_with_logits,
labels=labels,
logits=activations), functools.partial(reduced_loss, activations))
return loss_values
def sparse_bi_tempered_logistic_loss(activations, labels, t1, t2, num_iters=5):
"""Sparse Bi-Tempered Logistic Loss with custom gradient.
Args:
activations: A multi-dimensional tensor with last dimension `num_classes`.
labels: A tensor with dtype of int32.
t1: Temperature 1 (< 1.0 for boundedness).
t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).
num_iters: Number of iterations to run the method.
Returns:
A loss tensor.
"""
with tf.name_scope('sparse_bitempered_logistic'):
t1 = tf.convert_to_tensor(t1)
t2 = tf.convert_to_tensor(t2)
num_classes = tf.shape(activations)[-1]
@tf.custom_gradient
def _custom_gradient_sparse_bi_tempered_logistic_loss(activations):
"""Sparse Bi-Tempered Logistic Loss with custom gradient.
Args:
activations: A multi-dimensional tensor with last dim `num_classes`.
Returns:
A loss tensor, grad.
"""
with tf.name_scope('gradient_sparse_bitempered_logistic'):
probabilities = tempered_softmax(activations, t2, num_iters)
# TODO(eamid): Replace one hot with gather.
loss_values = -log_t(
tf.reshape(
tf.gather_nd(probabilities,
tf.where(tf.one_hot(labels, num_classes))),
tf.shape(activations)[:-1]), t1) - 1.0 / (2.0 - t1) * (
1.0 - tf.reduce_sum(tf.pow(probabilities, 2.0 - t1), -1))
def grad(d_loss):
"""Explicit gradient calculation.
Args:
d_loss: Infinitesimal change in the loss value.
Returns:
Loss gradient.
"""
delta_probs = probabilities - tf.one_hot(labels, num_classes)
forget_factor = tf.pow(probabilities, t2 - t1)
delta_probs_times_forget_factor = tf.multiply(delta_probs,
forget_factor)
delta_forget_sum = tf.reduce_sum(
delta_probs_times_forget_factor, -1, True)
escorts = tf.pow(probabilities, t2)
escorts = escorts / tf.reduce_sum(escorts, -1, True)
derivative = delta_probs_times_forget_factor - tf.multiply(
escorts, delta_forget_sum)
return tf.multiply(d_loss, derivative)
return loss_values, grad
loss_values = tf.cond(
tf.math.logical_and(tf.equal(t1, 1.0), tf.equal(t2, 1.0)),
functools.partial(tf.nn.sparse_softmax_cross_entropy_with_logits,
labels=labels, logits=activations),
functools.partial(_custom_gradient_sparse_bi_tempered_logistic_loss,
activations))
return loss_values
| python | Apache-2.0 | 1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88 | 2026-01-05T07:13:09.810991Z | false |
google/bi-tempered-loss | https://github.com/google/bi-tempered-loss/blob/1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88/tensorflow/loss_test.py | tensorflow/loss_test.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bi-tempered loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import absltest
import tensorflow as tf
import loss
python_version = "PY3"
class LossTest(tf.test.TestCase):
def test_normalization(self):
"""Test the normalization constant."""
activations = tf.random.normal(shape=[100, 50000])
for t in [0.99, 1.01]:
normalization_constants = loss.compute_normalization(
activations, t, num_iters=20)
self.assertEqual(normalization_constants.shape, [100, 1])
probabilities = tf.reduce_sum(
loss.exp_t(activations - normalization_constants, t), -1)
self.assertAllClose(probabilities.numpy(), [1.0] * 100, atol=1e-5)
for t in [0.1, 2.0]:
normalization_constants = loss.compute_normalization(
activations, t, num_iters=20)
probabilities = tf.reduce_sum(
loss.exp_t(activations - normalization_constants, t), -1)
self.assertAllClose(probabilities.numpy(), [1.0] * 100, atol=1e-5)
def test_limit_case_logistic_loss(self):
"""Test for checking if t1 = t2 = 1.0 yields the logistic loss."""
labels = tf.constant([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
activations = tf.random.normal(shape=[3, 3])
actual_loss = loss.bi_tempered_logistic_loss(activations, labels, 1.0,
1.0)
logistic_loss = tf.nn.softmax_cross_entropy_with_logits(
logits=activations, labels=labels)
actual_loss_out, logistic_loss_out = (
actual_loss.numpy(), logistic_loss.numpy())
self.assertAllClose(actual_loss_out, logistic_loss_out)
def test_loss_value(self):
"""Test the loss based on precomputed values."""
labels = tf.constant([[0.2, 0.3, 0.5], [0.6, 0.3, 0.1], [0.2, 0.8, 0.0]])
activations = [[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0], [4.0, -3.0, -6.0]]
actual_loss = loss.bi_tempered_logistic_loss(activations, labels, 0.5,
1.5)
self.assertAllClose(actual_loss.numpy(),
[0.02301914, 0.18972909, 0.93874922])
actual_loss = loss.bi_tempered_logistic_loss(activations, labels, 0.5,
0.8, num_iters=20)
self.assertAllClose(actual_loss.numpy(),
[0.21646356, 0.41836615, 1.33997854])
def test_constant_shift(self):
"""Test if adding a constant to all activations is vacuous."""
labels = tf.constant([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.7, 0.2, 0.1]])
activations = tf.random.normal(shape=[3, 3])
bias = tf.random.normal(shape=[3, 1])
for t2 in [0.8, 1.2]:
actual_loss = loss.bi_tempered_logistic_loss(
activations, labels, 0.5, t2)
shifted_loss = loss.bi_tempered_logistic_loss(
activations + bias, labels, 0.5, t2)
actual_loss_out, shifted_loss_out = (
actual_loss.numpy(), shifted_loss.numpy())
self.assertAllClose(actual_loss_out, shifted_loss_out)
def test_gradient_error(self):
"""Compare custom gradient with tf.GradientTape."""
labels = tf.constant([[0.4, 0.3, 0.3], [0.8, 0.1, 0.1], [0.0, 0.0, 1.0],
[0.0, 1.0, 0.0]])
activations = tf.random.normal(shape=[4, 3])
for t1, t2 in [[0.5, 1.0], [1.0, 1.5], [0.5, 1.5]]:
with tf.GradientTape(persistent=True) as tape:
tape.watch(activations)
internal_loss = loss._internal_bi_tempered_logistic_loss(
activations, labels, t1, t2)
actual_loss = loss.bi_tempered_logistic_loss(
activations, labels, t1, t2)
numerical_gradient = tape.gradient(internal_loss, activations)
actual_gradient = tape.gradient(actual_loss, activations)
internal_loss_out, actual_loss_out = (
internal_loss.numpy(), actual_loss.numpy())
numerical_gradient_out, actual_gradient_out = (
numerical_gradient.numpy(), actual_gradient.numpy())
self.assertEqual(actual_gradient_out.shape, (4, 3))
self.assertAllClose(actual_loss_out, internal_loss_out, atol=1e-5)
self.assertAllClose(
actual_gradient_out, numerical_gradient_out, atol=1e-4)
def test_label_smoothing(self):
"""Test label smoothing."""
labels = tf.constant([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
activations = [[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0], [4.0, -3.0, -6.0]]
actual_loss = loss.bi_tempered_logistic_loss(
activations, labels, 0.5, 1.5, label_smoothing=0.1)
actual_loss_out = actual_loss.numpy()
self.assertAllClose(
actual_loss_out, [0.76652711, 0.08627685, 1.35443510], atol=1e-5)
def test_binary_logistic_loss(self):
"""Test binary logistic loss."""
labels = tf.constant([1.0, 0.0])
activations = [0.0, 0.0]
actual_loss = loss.bi_tempered_binary_logistic_loss(activations, labels,
1.0, 1.0)
actual_loss_out = actual_loss.numpy()
self.assertAllClose(actual_loss_out, [0.69314718, 0.69314718], atol=1e-5)
def test_dynamic_temperatures(self):
"""Test changing temperatures dynamically."""
labels = tf.constant([[0.2, 0.5, 0.3]])
activations = [[-0.5, 0.1, 2.0]]
actual_loss = functools.partial(
loss.bi_tempered_logistic_loss,
activations=activations,
labels=labels,
num_iters=5)
t1_values = [1.0, 0.9, 0.8, 0.7]
t2_values = [1.0, 1.1, 1.2, 1.3]
loss_values = [[1.6583576], [0.45677936], [0.34298314], [0.26295574]]
loss_out = []
for t1_value, t2_value in zip(t1_values, t2_values):
loss_out.append(actual_loss(t1=t1_value, t2=t2_value).numpy())
self.assertAllClose(loss_values, loss_out, atol=1e-5)
def test_sparse_loss(self):
"""Test int labels."""
labels = tf.constant([0, 2, 1, 0])
activations = [[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0], [4.0, -3.0, -6.0],
[-1.5, 0.7, 5.2]]
actual_loss = loss.bi_tempered_logistic_loss(activations,
tf.one_hot(labels, 3), 0.5,
1.5)
sparse_loss = loss.sparse_bi_tempered_logistic_loss(activations, labels,
0.5, 1.5)
actual_loss_out = actual_loss.numpy()
sparse_loss_out = sparse_loss.numpy()
self.assertAllClose(actual_loss_out, sparse_loss_out)
labels = tf.constant([[0, 2], [1, 0]])
activations = [[[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0]],
[[4.0, -3.0, -6.0], [-1.5, 0.7, 5.2]]]
actual_loss = loss.bi_tempered_logistic_loss(activations,
tf.one_hot(labels, 3), 0.5,
1.5)
sparse_loss = loss.sparse_bi_tempered_logistic_loss(activations, labels,
0.5, 1.5)
actual_loss_out = actual_loss.numpy()
sparse_loss_out = sparse_loss.numpy()
self.assertAllClose(actual_loss_out, sparse_loss_out)
def test_tempered_softmax(self):
# Test softmax function with different temperatures.
activations = [[-0.5, 0.1, 2.0], [0.1, 1.5, -5.0], [4.0, -3.0, -6.0]]
# Test with temperature = 1.0, which should recover regular
# softmax probabilities.
softmax_probabilities_t_1 = loss.tempered_softmax(
activations, t=1.0).numpy()
vanilla_softmax_probabilties = tf.nn.softmax(activations).numpy()
self.assertAllClose(vanilla_softmax_probabilties,
softmax_probabilities_t_1)
softmax_probabilities_t_4 = loss.tempered_softmax(
activations, t=4.0).numpy()
expected_softmax_probabilities_t_4 = ([[
0.3205458, 0.32714278, 0.3523402
], [0.3430056, 0.36491093,
0.29220778], [0.41369352, 0.30534995, 0.28299212]])
self.assertAllClose(expected_softmax_probabilities_t_4,
softmax_probabilities_t_4)
def test_tempered_sigmoid(self):
# Test sigmoid function with different temperatures.
activations = [0.0, 3.0, 6.0]
# Test with temperature = 1.0, which should recover regular
# sigmoid probabilities.
sigmoid_probabilities_t_1 = loss.tempered_sigmoid(
activations, t=1.0).numpy()
vanilla_softmax_probabilties = tf.nn.sigmoid(activations).numpy()
self.assertAllClose(vanilla_softmax_probabilties,
sigmoid_probabilities_t_1)
sigmoid_probabilities_t_4 = loss.tempered_sigmoid(
activations, t=4.0).numpy()
expected_sigmoid_probabilities_t_4 = [0.5, 0.58516014, 0.6421035]
self.assertAllClose(expected_sigmoid_probabilities_t_4,
sigmoid_probabilities_t_4)
if __name__ == "__main__":
absltest.main()
| python | Apache-2.0 | 1c65c7770a3aa76a9515c67f15cfdbaad0e8cf88 | 2026-01-05T07:13:09.810991Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/SVM.py | SVM.py | import time
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import precision_score,recall_score
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from utils import GeneSeg
import csv,random,pickle
batch_size=50
maxlen=200
vec_dir="file\\word2vec.pickle"
epochs_num=1
log_dir="log\\MLP.log"
model_dir="file\\SVM_model"
def pre_process():
with open(vec_dir,"rb") as f :
word2vec=pickle.load(f)
dictionary=word2vec["dictionary"]
embeddings=word2vec["embeddings"]
reverse_dictionary = word2vec["reverse_dictionary"]
xssed_data=[]
normal_data=[]
with open("data\\xssed.csv","r",encoding="utf-8") as f:
reader = csv.DictReader(f, fieldnames=["payload"])
for row in reader:
payload=row["payload"]
word=GeneSeg(payload)
xssed_data.append(word)
with open("data\\normal_payload.csv","r",encoding="utf-8") as f:
reader = csv.DictReader(f, fieldnames=["payload"])
for row in reader:
payload=row["payload"]
word=GeneSeg(payload)
normal_data.append(word)
xssed_num=len(xssed_data)
normal_num=len(normal_data)
xssed_labels=[1]*xssed_num
normal_labels=[0]*normal_num
datas=xssed_data+normal_data
labels=xssed_labels+normal_labels
def to_index(data):
d_index=[]
for word in data:
if word in dictionary.keys():
d_index.append(dictionary[word])
else:
d_index.append(dictionary["UNK"])
return d_index
datas_index=[to_index(data) for data in datas]
datas_index=pad_sequences(datas_index,value=-1,maxlen=maxlen)
rand=random.sample(range(len(datas_index)),len(datas_index))
datas=[datas_index[index] for index in rand]
labels=[labels[index] for index in rand]
datas_embed=[]
dims=len(embeddings["UNK"])
n=0
for data in datas:
data_embed = []
for d in data:
if d != -1:
data_embed.extend(embeddings[reverse_dictionary[d]])
else:
data_embed.extend([0.0] * dims)
datas_embed.append(data_embed)
n+=1
if n%1000 ==0:
print(n)
train_datas,test_datas,train_labels,test_labels=train_test_split(datas_embed,labels,test_size=0.3)
return train_datas,test_datas,train_labels,test_labels
if __name__=="__main__":
train_datas, test_datas, train_labels, test_labels=pre_process()
print("Start Train Job! ")
start = time.time()
model=LinearSVC()
# model = SVC(C=1.0, kernel="linear")
model.fit(train_datas,train_labels)
# model.save(model_dir)
end = time.time()
print("Over train job in %f s" % (end - start))
print("Start Test Job!")
start=time.time()
pre=model.predict(test_datas)
end=time.time()
print("Over test job in %s s"%(end-start))
precision = precision_score(test_labels, pre)
recall = recall_score(test_labels, pre)
print("Precision score is :", precision)
print("Recall score is :", recall)
with open(model_dir,"wb") as f:
pickle.dump(model,f,protocol=2)
print("wirte to ",model_dir) | python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/word2vec.py | word2vec.py | import nltk,re,csv,random,math,pickle,time
from urllib.parse import unquote
from collections import Counter,deque
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from utils import GeneSeg
learning_rate=0.1
vocabulary_size=3000
batch_size=128
embedding_size=128
num_skips=4
skip_window=3
valid_size=16
valid_window=100
top_k=8
num_sampled=64
num_steps=100001
plot_only=100
log_dir="word2vec.log"
plt_dir="file\\word2vec.png"
vec_dir="file\\word2vec.pickle"
start=time.time()
words=[]
with open("data\\xssed.csv","r",encoding="utf-8") as f:
reader=csv.DictReader(f,fieldnames=["payload"])
for row in reader:
payload=row["payload"]
word=GeneSeg(unquote(payload))
words+=word
print("words size:",len(words))
#构建数据集
def build_dataset(words):
count=[["UNK",-1]]
counter=Counter(words)
count.extend(counter.most_common(vocabulary_size-1))
dictionary={}
for word,_ in count:
dictionary[word]=len(dictionary)
data=[]
for word in words:
if word in dictionary.keys():
data.append(dictionary[word])
else:
data.append(dictionary["UNK"])
count[0][1]+=1
reverse_dictionary=dict(zip(dictionary.values(),dictionary.keys()))
return count,data,dictionary,reverse_dictionary
count,data,dictionary,reverse_dictionary=build_dataset(words)
#生成训练Batch
data_index=0
def generate_batch(batch_size,num_skips,skip_window):
'''
:param batch_size: 生成的batch大小,必须为skip_window的整数倍
:param num_skips: 对每个skip_window生成样本数量,不能大于skip_window*2
:param skip_window: 目标单词取样本的窗口大小
:return:
'''
global data_index
assert batch_size%num_skips==0
assert num_skips<=skip_window*2
batch=np.ndarray(shape=(batch_size),dtype=np.int32)
labels=np.ndarray(shape=(batch_size,1),dtype=np.int32)
span=2*skip_window+1
buffer=deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index=(data_index+1)%len(data)
for i in range(batch_size//num_skips):
target=skip_window
target_to_avoid=[skip_window]
for j in range(num_skips):
while target in target_to_avoid:
target=random.randint(0,span-1)
target_to_avoid.append(target)
batch[i*num_skips+j]=buffer[skip_window]
labels[i*num_skips+j,0]=buffer[target]
buffer.append(data[data_index])
data_index=(data_index+1)%len(data)
return batch,labels
batch,labels=generate_batch(batch_size,num_skips,skip_window)
for i in range(100):
print(batch[i],reverse_dictionary[batch[i]],"->",labels[i,0],reverse_dictionary[labels[i,0]])
valid_examples=np.random.choice(valid_window,valid_size,replace=False)
graph=tf.Graph()
with graph.as_default():
with tf.name_scope("Inputs"):
train_inputs=tf.placeholder(tf.int32,shape=[batch_size])
train_labels=tf.placeholder(tf.int32,shape=[batch_size,1])
valid_dataset=tf.constant(valid_examples,dtype=tf.int32)
with tf.name_scope("Embeddings"):
embeddings=tf.Variable(
tf.random_uniform(shape=[vocabulary_size,embedding_size],minval=-1.0,maxval=1.0),name="embeddings"
)
embed=tf.nn.embedding_lookup(embeddings,train_inputs)
with tf.name_scope("nce_loss"):
nce_weight=tf.Variable(
tf.truncated_normal([vocabulary_size,embedding_size],stddev=1.0/math.sqrt(embedding_size)),name="nce_weights"
)
nce_biases=tf.Variable(tf.zeros([vocabulary_size]),name="nce_biases")
loss=tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
with tf.name_scope("Train"):
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
#标准化embeddings
norm=tf.sqrt(tf.reduce_sum(tf.square(embeddings),1,keep_dims=True))
normalized_embeddings=embeddings/norm
#计算验证数据与字典的相似性
valid_embeddings=tf.nn.embedding_lookup(normalized_embeddings,valid_dataset)
similarity=tf.matmul(
valid_embeddings,normalized_embeddings,transpose_b=True
)
#tf.summary.histogram("nce_weight", nce_weight)
#tf.summary.histogram("nce_biases", nce_biases)
tf.summary.scalar("loss", loss)
#tf.summary.histogram("normalized_embeddings",normalized_embeddings)
merged=tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session(graph=graph) as session:
init.run()
print("Initialized!")
average_loss=0
writer = tf.summary.FileWriter(log_dir, graph)
for step in range(num_steps):
batch_inputs,batch_labels=generate_batch(batch_size,num_skips,skip_window)
feed_dict={train_inputs:batch_inputs,train_labels:batch_labels}
loss_val,_,summary=session.run([loss,optimizer,merged],feed_dict=feed_dict)
writer.add_summary(summary,global_step=step)
average_loss+=loss_val
if step%2000==0:
if step>0:
average_loss/=2000
print("Average loss at step:",step,":",average_loss)
average_loss=0
if step%10000==0:
if step>0:
sim=similarity.eval()
for i in range(valid_size):
valid_word=reverse_dictionary[valid_examples[i]]
nearest=(-sim[i,:]).argsort()[1:top_k+1]
log_str="Nearest to %s:"%valid_word
for k in range(top_k):
close_word=reverse_dictionary[nearest[k]]
log_str="%s %s"%(log_str,close_word)
print(log_str)
final_embeddings=normalized_embeddings.eval()
writer.close()
print(final_embeddings)
def plot_with_labels(low_dim_embs,labels,filename=plt_dir):
plt.figure(figsize=(10,10))
for i,label in enumerate(labels):
x,y=low_dim_embs[i,:]
plt.scatter(x,y)
plt.annotate(label,xy=(x,y),xytext=(5,2),
textcoords="offset points",
ha="right",
va="bottom")
f_text="vocabulary_size=%d;batch_size=%d;embedding_size=%d;num_skips=%d;skip_window=%d;num_steps=%d"%(
vocabulary_size,batch_size,embedding_size,num_skips,skip_window,num_steps
)
plt.figtext(0.03,0.03,f_text,color="green",fontsize=10)
plt.show()
plt.savefig(filename)
tsne=TSNE(perplexity=30,n_components=2,init="pca",n_iter=5000)
low_dim_embs=tsne.fit_transform(final_embeddings[:plot_only,:])
labels=[reverse_dictionary[i]for i in range(plot_only)]
plot_with_labels(low_dim_embs,labels)
def save(dictionary,reverse_dictionary,final_embeddings):
word2vec={"dictionary":dictionary,"reverse_dictionary":reverse_dictionary,"embeddings":final_embeddings}
with open(vec_dir,"wb") as f:
pickle.dump(word2vec,f)
save(dictionary,reverse_dictionary,final_embeddings)
end=time.time()
print("Over job in ",end-start) | python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/MLP.py | MLP.py | import time
from keras.models import Sequential
from keras.layers import Dense,InputLayer,Dropout,Flatten
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from keras.models import load_model
from processing import build_dataset
import numpy as np
from utils import init_session
from sklearn.metrics import precision_score,recall_score
init_session()
batch_size=500
epochs_num=1
log_dir="log\\MLP.log"
model_dir="file\\MLP_model"
def train(train_generator,train_size,input_num,dims_num):
print("Start Train Job! ")
start=time.time()
inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
layer1=Dense(100,activation="relu")
layer2=Dense(20,activation="relu")
flatten=Flatten()
layer3=Dense(2,activation="softmax",name="Output")
optimizer=Adam()
model=Sequential()
model.add(inputs)
model.add(layer1)
model.add(Dropout(0.5))
model.add(layer2)
model.add(Dropout(0.5))
model.add(flatten)
model.add(layer3)
call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
# model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
model.save(model_dir)
end=time.time()
print("Over train job in %f s"%(end-start))
def test(model_dir,test_generator,test_size,input_num,dims_num,batch_size):
model=load_model(model_dir)
labels_pre=[]
labels_true=[]
batch_num=test_size//batch_size+1
steps=0
for batch,labels in test_generator:
if len(labels)==batch_size:
labels_pre.extend(model.predict_on_batch(batch))
else:
batch=np.concatenate((batch,np.zeros((batch_size-len(labels),input_num,dims_num))))
labels_pre.extend(model.predict_on_batch(batch)[0:len(labels)])
labels_true.extend(labels)
steps+=1
print("%d/%d batch"%(steps,batch_num))
labels_pre=np.array(labels_pre).round()
def to_y(labels):
y=[]
for i in range(len(labels)):
if labels[i][0]==1:
y.append(0)
else:
y.append(1)
return y
y_true=to_y(labels_true)
y_pre=to_y(labels_pre)
precision=precision_score(y_true,y_pre)
recall=recall_score(y_true,y_pre)
print("Precision score is :",precision)
print("Recall score is :",recall)
if __name__=="__main__":
train_generator, test_generator, train_size, test_size, input_num, dims_num=build_dataset(batch_size)
train(train_generator,train_size,input_num,dims_num)
test(model_dir,test_generator,test_size,input_num,dims_num,batch_size)
| python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/LSTM.py | LSTM.py | import time
from keras.models import Sequential
from keras.layers import Dense,InputLayer,Dropout,LSTM
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from processing import build_dataset
from utils import init_session
from MLP import test
init_session()
batch_size=350
epochs_num=1
process_datas_dir="file\\process_datas.pickle"
log_dir="log\\LSTM.log"
model_dir="file\\LSTM_model"
def train(train_generator,train_size,input_num,dims_num):
print("Start Train Job! ")
start=time.time()
inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
layer1=LSTM(128)
output=Dense(2,activation="softmax",name="Output")
optimizer=Adam()
model=Sequential()
model.add(inputs)
model.add(layer1)
model.add(Dropout(0.5))
model.add(output)
call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
# model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
model.save(model_dir)
end=time.time()
print("Over train job in %f s"%(end-start))
if __name__=="__main__":
train_generator, test_generator, train_size, test_size, input_num, dims_num=build_dataset(batch_size)
train(train_generator,train_size,input_num,dims_num)
test(model_dir,test_generator,test_size,input_num,dims_num,batch_size)
| python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/utils.py | utils.py | import nltk
import re
from urllib.parse import unquote
import tensorflow as tf
import keras.backend.tensorflow_backend as ktf
def GeneSeg(payload):
#数字泛化为"0"
payload=payload.lower()
payload=unquote(unquote(payload))
payload,num=re.subn(r'\d+',"0",payload)
#替换url为”http://u
payload,num=re.subn(r'(http|https)://[a-zA-Z0-9\.@&/#!#\?]+', "http://u", payload)
#分词
r = '''
(?x)[\w\.]+?\(
|\)
|"\w+?"
|'\w+?'
|http://\w
|</\w+>
|<\w+>
|<\w+
|\w+=
|>
|[\w\.]+
'''
return nltk.regexp_tokenize(payload, r)
def init_session():
#gpu_options=tf.GPUOptions(allow_growth=True)
ktf.set_session(tf.Session()) | python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/processing.py | processing.py | from utils import GeneSeg
import csv,pickle,random,json
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import tensorflow as tf
vec_dir="file\\word2vec.pickle"
pre_datas_train="file\\pre_datas_train.csv"
pre_datas_test="file\\pre_datas_test.csv"
process_datas_dir="file\\process_datas.pickle"
def pre_process():
with open(vec_dir,"rb") as f :
word2vec=pickle.load(f)
dictionary=word2vec["dictionary"]
reverse_dictionary=word2vec["reverse_dictionary"]
embeddings=word2vec["embeddings"]
xssed_data=[]
normal_data=[]
with open("data\\xssed.csv","r",encoding="utf-8") as f:
reader = csv.DictReader(f, fieldnames=["payload"])
for row in reader:
payload=row["payload"]
word=GeneSeg(payload)
xssed_data.append(word)
with open("data\\normal_examples.csv","r",encoding="utf-8") as f:
reader=csv.reader(f)
reader = csv.DictReader(f, fieldnames=["payload"])
for row in reader:
payload=row["payload"]
word=GeneSeg(payload)
normal_data.append(word)
xssed_num=len(xssed_data)
normal_num=len(normal_data)
xssed_labels=[1]*xssed_num
normal_labels=[0]*normal_num
datas=xssed_data+normal_data
labels=xssed_labels+normal_labels
labels=to_categorical(labels)
def to_index(data):
d_index=[]
for word in data:
if word in dictionary.keys():
d_index.append(dictionary[word])
else:
d_index.append(dictionary["UNK"])
return d_index
datas_index=[to_index(data) for data in datas]
datas_index=pad_sequences(datas_index,value=-1)
rand=random.sample(range(len(datas_index)),len(datas_index))
datas=[datas_index[index] for index in rand]
labels=[labels[index] for index in rand]
train_datas,test_datas,train_labels,test_labels=train_test_split(datas,labels,test_size=0.3)
train_size=len(train_labels)
test_size=len(test_labels)
input_num=len(train_datas[0])
dims_num = embeddings["UNK"].shape[0]
word2vec["train_size"]=train_size
word2vec["test_size"]=test_size
word2vec["input_num"]=input_num
word2vec["dims_num"]=dims_num
with open(vec_dir,"wb") as f :
pickle.dump(word2vec,f)
print("Saved word2vec to:",vec_dir)
print("Write trian datas to:",pre_datas_train)
with open(pre_datas_train,"w") as f:
for i in range(train_size):
data_line=str(train_datas[i].tolist())+"|"+str(train_labels[i].tolist())+"\n"
f.write(data_line)
print("Write test datas to:",pre_datas_test)
with open(pre_datas_test,"w") as f:
for i in range(test_size):
data_line=str(test_datas[i].tolist())+"|"+str(test_labels[i].tolist())+"\n"
f.write(data_line)
print("Write datas over!")
def data_generator(data_dir):
reader = tf.TextLineReader()
queue = tf.train.string_input_producer([data_dir])
_, value = reader.read(queue)
coord = tf.train.Coordinator()
sess = tf.Session()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
while True:
v = sess.run(value)
[data, label] = v.split(b"|")
data = np.array(json.loads(data.decode("utf-8")))
label = np.array(json.loads(label.decode("utf-8")))
yield (data, label)
coord.request_stop()
coord.join(threads)
sess.close()
def batch_generator(datas_dir,datas_size,batch_size,embeddings,reverse_dictionary,train=True):
batch_data = []
batch_label = []
generator=data_generator(datas_dir)
n=0
while True:
for i in range(batch_size):
data,label=next(generator)
data_embed = []
for d in data:
if d != -1:
data_embed.append(embeddings[reverse_dictionary[d]])
else:
data_embed.append([0.0] * len(embeddings["UNK"]))
batch_data.append(data_embed)
batch_label.append(label)
n+=1
if not train and n==datas_size:
break
if not train and n == datas_size:
yield (np.array(batch_data), np.array(batch_label))
break
else:
yield (np.array(batch_data),np.array(batch_label))
batch_data = []
batch_label = []
def build_dataset(batch_size):
with open(vec_dir, "rb") as f:
word2vec = pickle.load(f)
embeddings = word2vec["embeddings"]
reverse_dictionary = word2vec["reverse_dictionary"]
train_size=word2vec["train_size"]
test_size=word2vec["test_size"]
dims_num = word2vec["dims_num"]
input_num =word2vec["input_num"]
train_generator = batch_generator(pre_datas_train,train_size,batch_size,embeddings,reverse_dictionary)
test_generator = batch_generator(pre_datas_test,test_size,batch_size,embeddings,reverse_dictionary,train=False)
return train_generator,test_generator,train_size,test_size,input_num,dims_num
if __name__=="__main__":
pre_process()
| python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/Conv.py | Conv.py | import time
from keras.models import Sequential
from keras.layers import Dense,InputLayer,Dropout,LSTM,Conv1D,Flatten,GlobalAveragePooling1D,MaxPool1D
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from processing import build_dataset
from utils import init_session
from MLP import test
init_session()
batch_size=500
epochs_num=1
process_datas_dir="file\\process_datas.pickle"
log_dir="log\\Conv.log"
model_dir="file\\Conv_model"
def train(train_generator,train_size,input_num,dims_num):
print("Start Train Job! ")
start=time.time()
inputs=InputLayer(input_shape=(input_num,dims_num),batch_size=batch_size)
layer1=Conv1D(64,3,activation="relu")
layer2=Conv1D(64,3,activation="relu")
layer3=Conv1D(128,3,activation="relu")
layer4=Conv1D(128,3,activation="relu")
layer5=Dense(128,activation="relu")
output=Dense(2,activation="softmax",name="Output")
optimizer=Adam()
model=Sequential()
model.add(inputs)
model.add(layer1)
model.add(layer2)
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(0.5))
model.add(layer3)
model.add(layer4)
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(layer5)
model.add(Dropout(0.5))
model.add(output)
call=TensorBoard(log_dir=log_dir,write_grads=True,histogram_freq=1)
model.compile(optimizer,loss="categorical_crossentropy",metrics=["accuracy"])
model.fit_generator(train_generator,steps_per_epoch=train_size//batch_size,epochs=epochs_num,callbacks=[call])
# model.fit_generator(train_generator, steps_per_epoch=5, epochs=5, callbacks=[call])
model.save(model_dir)
end=time.time()
print("Over train job in %f s"%(end-start))
if __name__=="__main__":
train_generator, test_generator, train_size, test_size, input_num, dims_num=build_dataset(batch_size)
train(train_generator,train_size,input_num,dims_num)
test(model_dir,test_generator,test_size,input_num,dims_num,batch_size)
| python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
SparkSharly/DL_for_xss | https://github.com/SparkSharly/DL_for_xss/blob/aaf555a4f53bf136cf5fadbe35ac294894f3bbe1/word2vec_gensim.py | word2vec_gensim.py | import csv,pickle,time
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from utils import GeneSeg
from gensim.models.word2vec import Word2Vec
learning_rate=0.1
vocabulary_size=3000
batch_size=128
embedding_size=128
num_skips=4
skip_window=5
num_sampled=64
num_iter=5
plot_only=100
log_dir="word2vec.log"
plt_dir="file\\word2vec.png"
vec_dir="file\\word2vec.pickle"
start=time.time()
words=[]
datas=[]
with open("data\\xssed.csv","r",encoding="utf-8") as f:
reader=csv.DictReader(f,fieldnames=["payload"])
for row in reader:
payload=row["payload"]
word=GeneSeg(payload)
datas.append(word)
words+=word
#构建数据集
def build_dataset(datas,words):
count=[["UNK",-1]]
counter=Counter(words)
count.extend(counter.most_common(vocabulary_size-1))
vocabulary=[c[0] for c in count]
data_set=[]
for data in datas:
d_set=[]
for word in data:
if word in vocabulary:
d_set.append(word)
else:
d_set.append("UNK")
count[0][1]+=1
data_set.append(d_set)
return data_set
data_set=build_dataset(datas,words)
model=Word2Vec(data_set,size=embedding_size,window=skip_window,negative=num_sampled,iter=num_iter)
embeddings=model.wv
def plot_with_labels(low_dim_embs,labels,filename=plt_dir):
plt.figure(figsize=(10,10))
for i,label in enumerate(labels):
x,y=low_dim_embs[i,:]
plt.scatter(x,y)
plt.annotate(label,xy=(x,y),xytext=(5,2),
textcoords="offset points",
ha="right",
va="bottom")
f_text="vocabulary_size=%d;batch_size=%d;embedding_size=%d;skip_window=%d;num_iter=%d"%(
vocabulary_size,batch_size,embedding_size,skip_window,num_iter
)
plt.figtext(0.03,0.03,f_text,color="green",fontsize=10)
plt.show()
plt.savefig(filename)
tsne=TSNE(perplexity=30,n_components=2,init="pca",n_iter=5000)
plot_words=embeddings.index2word[:plot_only]
plot_embeddings=[]
for word in plot_words:
plot_embeddings.append(embeddings[word])
low_dim_embs=tsne.fit_transform(plot_embeddings)
plot_with_labels(low_dim_embs,plot_words)
def save(embeddings):
dictionary=dict([(embeddings.index2word[i],i)for i in range(len(embeddings.index2word))])
reverse_dictionary=dict(zip(dictionary.values(),dictionary.keys()))
word2vec={"dictionary":dictionary,"embeddings":embeddings,"reverse_dictionary":reverse_dictionary}
with open(vec_dir,"wb") as f:
pickle.dump(word2vec,f)
save(embeddings)
end=time.time()
print("Over job in ",end-start)
print("Saved words vec to",vec_dir) | python | MIT | aaf555a4f53bf136cf5fadbe35ac294894f3bbe1 | 2026-01-05T07:13:06.526795Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_ms2.py | tests/test_ms2.py | import os
import numpy as np
import pyteomics
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
import unittest
import copy
import pickle
from pyteomics.ms2 import read, read_header, MS2, IndexedMS2, chain
import data
class MS2Test(unittest.TestCase):
maxDiff = None
def setUp(self):
self.path = 'test.ms2'
self.header = read_header(self.path)
with read(self.path) as r:
self.spectra = list(r)
self.ns = len(self.spectra)
def test_read(self):
# http://stackoverflow.com/q/14246983/1258041
r = read(self.path)
self.assertEqual(data.ms2_spectra, list(r))
r.close()
for reader in [read, MS2, IndexedMS2, chain]:
with reader(self.path) as reader:
self.assertEqual(data.ms2_spectra, list(reader))
def test_read_no_charges(self):
with read(self.path, convert_arrays=False, read_charges=False) as reader:
lhs = list(map(copy.copy, data.ms2_spectra_lists))
for spec in lhs:
del spec['charge array']
self.assertEqual(lhs, list(reader))
with read(self.path, convert_arrays=1, read_charges=False) as reader:
lhs = list(map(copy.copy, data.ms2_spectra))
for spec in lhs:
del spec['charge array']
self.assertEqual(lhs, list(reader))
def test_read_no_resolution(self):
with read(self.path, convert_arrays=False, read_resolutions=False) as reader:
lhs = list(map(copy.copy, data.ms2_spectra_lists))
for spec in lhs:
del spec['resolution array']
self.assertEqual(lhs, list(reader))
with read(self.path, convert_arrays=1, read_resolutions=False) as reader:
lhs = list(map(copy.copy, data.ms2_spectra))
for spec in lhs:
del spec['resolution array']
self.assertEqual(lhs, list(reader))
def test_read_array_conversion(self):
with read(self.path, convert_arrays=0) as reader:
self.assertEqual(data.ms2_spectra_lists, list(reader))
with read(self.path, convert_arrays=1) as reader:
s = next(reader)
self.assertTrue(isinstance(s['m/z array'], np.ndarray))
with read(self.path, convert_arrays=2) as reader:
s = next(reader)
self.assertTrue(isinstance(s['m/z array'], np.ndarray))
self.assertTrue(isinstance(s['charge array'], np.ma.core.MaskedArray))
def test_header(self):
self.assertEqual(self.header, data.ms2_header)
def test_read_dtype(self):
dtypes = {'m/z array': np.float32, 'intensity array': np.int32}
with read(self.path, dtype=dtypes) as f:
for spec in f:
for k, v in dtypes.items():
self.assertEqual(spec[k].dtype, v)
def test_indexedms2_picklable(self):
with IndexedMS2(self.path, block_size=12345, convert_arrays=1, read_charges=False, read_resolutions=False) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(reader.block_size, 12345)
self.assertEqual(reader._read_charges, False)
self.assertEqual(reader._read_resolutions, False)
lhs = list(map(copy.copy, data.ms2_spectra))
for spec in lhs:
del spec['resolution array']
del spec['charge array']
self.assertEqual(lhs, list(reader))
with IndexedMS2(self.path, use_header=True) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(data.ms2_header, reader.header)
def test_ms2_picklable(self):
with MS2(self.path, convert_arrays=1, read_charges=False, read_resolutions=False) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(reader._read_charges, False)
self.assertEqual(reader._read_resolutions, False)
lhs = list(map(copy.copy, data.ms2_spectra))
for spec in lhs:
del spec['resolution array']
del spec['charge array']
self.assertEqual(lhs, list(reader))
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_idxml.py | tests/test_idxml.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics.openms.idxml import IDXML, read, chain
from data import idxml_data
from itertools import product
class IdxmlTest(unittest.TestCase):
maxDiff = None
path = 'test.idXML'
def testReadPSM(self):
for rec, refs, rs, it, ui in product((True, False), repeat=5):
for func in [IDXML, read, chain,
lambda x, **kw: chain.from_iterable([x], **kw)]:
with func(self.path, recursive=rec, retrieve_refs=refs,
read_schema=rs, iterative=it, use_index=ui) as reader:
try:
psms = list(reader)
self.assertEqual(psms, idxml_data[(rec, refs)])
except Exception:
print('Parameters causing exception: ', rec, refs, rs, it, ui)
raise
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_featurexml.py | tests/test_featurexml.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
import operator as op
from itertools import product
from data import features
from pyteomics.openms.featurexml import FeatureXML, read, chain
class FeatureXMLTest(unittest.TestCase):
maxDiff = None
path = 'test.featureXML'
def testRead(self):
for rs, it, ui in product([True, False], repeat=3):
for func in [FeatureXML, read, chain,
lambda x, **kw: chain.from_iterable([x], **kw)]:
with self.subTest(read_schema=rs, iterative=it, use_index=ui, func=func):
with func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:
self.assertEqual(features, list(r))
def test_map(self):
self.assertEqual(sorted(features, key=op.itemgetter('id')),
sorted(FeatureXML(self.path).map(), key=op.itemgetter('id')))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_fasta.py | tests/test_fasta.py | from os import path
import tempfile
import unittest
import random
import string
import pickle
import re
from collections import Counter
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from pyteomics import fasta
class ReadWriteTest(unittest.TestCase):
maxDiff = None
def setUp(self):
self.fasta_file = 'test.fasta'
self.fasta_entries_long = [
('test sequence test sequence 2', 'TEST'),
('test sequence 3', 'TEST'),
('test sequence 4', 'TEST')
]
self.fasta_entries_short = [
('test sequence', 'TEST'),
('test sequence 3', 'TEST'),
('test sequence 4', 'TEST')
]
def test_simple_read_long_comments(self):
for reader in [fasta.read, fasta.FASTA]:
with reader(self.fasta_file) as f:
self.assertEqual(self.fasta_entries_long, list(f))
def test_simple_read_short_comments(self):
for reader in [fasta.read, fasta.FASTA]:
with reader(self.fasta_file, ignore_comments=True) as f:
self.assertEqual(self.fasta_entries_short, list(f))
def test_indexed_read(self):
with fasta.TwoLayerIndexedFASTA(self.fasta_file) as tlir, fasta.IndexedFASTA(self.fasta_file) as ir:
for reader in [ir, tlir]:
self.assertEqual(self.fasta_entries_short[1:], list(reader))
def test_index_retrieve(self):
key = 'test sequence 4'
with fasta.IndexedFASTA(self.fasta_file) as ir:
self.assertEqual(self.fasta_entries_short[2], ir[key])
def test_two_layer_retrieve(self):
with fasta.TwoLayerIndexedFASTA(self.fasta_file, r'test sequence (.*)') as tlir:
self.assertEqual(self.fasta_entries_short[2], tlir['4'])
def test_indexed_picklable(self):
reader = fasta.TwoLayerIndexedFASTA(self.fasta_file, r'test sequence (.*)', block_size=7777)
reader2 = pickle.loads(pickle.dumps(reader))
self.assertEqual(reader2.block_size, reader.block_size)
self.assertEqual(self.fasta_entries_short[2], reader2['4'])
reader.close()
reader2.close()
def test_mp_map(self):
with fasta.IndexedFASTA(self.fasta_file) as ir:
self.assertEqual(
sorted(self.fasta_entries_short[1:]),
sorted(list(ir.map())))
def test_read_and_write_fasta_short(self):
with tempfile.TemporaryFile(mode='r+') as new_fasta_file:
fasta.write(fasta.read(self.fasta_file, ignore_comments=True),
new_fasta_file)
new_fasta_file.seek(0)
new_entries = list(fasta.read(new_fasta_file, ignore_comments=True))
self.assertEqual(new_entries, self.fasta_entries_short)
def test_read_and_write_long(self):
with tempfile.TemporaryFile(mode='r+') as new_fasta_file:
fasta.write(fasta.read(self.fasta_file), new_fasta_file)
new_fasta_file.seek(0)
new_entries = list(fasta.read(new_fasta_file))
self.assertEqual(new_entries, self.fasta_entries_long)
def test_write_decoy_db(self):
with tempfile.TemporaryFile(mode='r+') as decdb:
fasta.write_decoy_db(self.fasta_file, decdb,
decoy_only=False, prefix='PREFIX_')
decdb.seek(0)
all_entries = list(fasta.read(decdb, False))
self.assertEqual(all_entries, self.fasta_entries_long +
[('PREFIX_' + a, b[::-1]) for a, b in self.fasta_entries_long])
def test_decoy_entries(self):
with fasta.read(self.fasta_file) as f:
self.assertEqual(sorted(fasta.decoy_entries(f, decoy_only=False, prefix='PREFIX_', mode='reverse')),
sorted(self.fasta_entries_long + [('PREFIX_' + a, b[::-1]) for a, b in self.fasta_entries_long]))
def test_decoy_entries_only(self):
with fasta.read(self.fasta_file) as f:
self.assertEqual(list(fasta.decoy_entries(f, decoy_only=True, prefix='PREFIX_', mode='reverse')),
[('PREFIX_' + a, b[::-1]) for a, b in self.fasta_entries_long])
class DecoyTest(unittest.TestCase):
def test_decoy_sequence_reverse(self):
sequence = ''.join(random.choice(string.ascii_uppercase)
for i in range(random.randint(1, 50)))
self.assertEqual(fasta.decoy_sequence(sequence, 'reverse'), sequence[::-1])
self.assertEqual(fasta.reverse(sequence), sequence[::-1])
def test_decoy_sequence_shuffle(self):
sequences = [''.join(random.choice(string.ascii_uppercase)
for i in range(random.randint(1, 50)))
for j in range(10)]
test = True
for s in sequences:
ss = fasta.decoy_sequence(s, 'shuffle')
self.assertEqual(sorted(list(s)), sorted(list(ss)))
if not all(a == b for a, b in zip(s, ss)):
test = False
self.assertFalse(test)
test = True
for s in sequences:
ss = fasta.shuffle(s)
self.assertEqual(sorted(list(s)), sorted(list(ss)))
if not all(a == b for a, b in zip(s, ss)):
test = False
self.assertFalse(test)
test = True
for s in sequences:
n = random.randint(1, 5)
fix_aa = [random.choice(string.ascii_uppercase) for _ in range(n)]
ss = fasta.shuffle(s, fix_aa=fix_aa)
self.assertEqual(len(s), len(ss))
self.assertEqual(Counter(s), Counter(ss))
for aa in fix_aa:
self.assertEqual([_.span() for _ in re.finditer(aa, s)],
[_.span() for _ in re.finditer(aa, ss)])
if not all(a == b for a, b in zip(s, ss)):
test = False
self.assertFalse(test)
def test_decoy_sequence_fused(self):
sequences = [''.join(random.choice(string.ascii_uppercase)
for i in range(random.randint(1, 50)))
for j in range(10)]
for s in sequences:
ss = fasta.decoy_sequence(s, 'fused')
self.assertEqual(ss, s[::-1] + 'R' + s)
self.assertEqual(ss, fasta.fused_decoy(s))
def test_decoy_keep_nterm(self):
sequences = [''.join(random.choice(string.ascii_uppercase)
for i in range(random.randint(1, 50)))
for j in range(10)]
for mode in ('shuffle', 'reverse'):
for seq in sequences:
self.assertEqual(seq[0], fasta.decoy_sequence(seq, mode, keep_nterm=True)[0])
for seq in sequences:
self.assertEqual(seq[1:][::-1], fasta.reverse(seq, keep_nterm=True)[1:])
def test_decoy_keep_cterm(self):
sequences = [''.join(random.choice(string.ascii_uppercase)
for i in range(random.randint(1, 50)))
for j in range(10)]
for mode in ('shuffle', 'reverse'):
for seq in sequences:
self.assertEqual(seq[-1], fasta.decoy_sequence(seq, mode, keep_cterm=True)[-1])
for seq in sequences:
self.assertEqual(seq[:-1][::-1], fasta.reverse(seq, keep_cterm=True)[:-1])
class ParserTest(unittest.TestCase):
def test_parser_uniprotkb_decoydb(self):
header = ('sp|P27748|ACOX_RALEH Acetoin catabolism protein X OS=Ralstonia'
' eutropha (strain ATCC 17699 / H16 / DSM 428 / Stanier 337)'
' GN=acoX PE=4 SV=2')
sequence = 'SEQUENCE'
with tempfile.TemporaryFile(mode='r+') as db:
fasta.write([(header, sequence)], db)
db.seek(0)
entries = list(fasta.decoy_db(db, prefix='PREFIX_', parser=fasta.parse, decoy_only=True))
parsed = {'GN': 'acoX',
'OS': 'Ralstonia eutropha '
'(strain ATCC 17699 / H16 / DSM 428 / Stanier 337)',
'PE': 4,
'SV': 2,
'db': 'PREFIX_sp',
'entry': 'ACOX_RALEH',
'id': 'P27748',
'gene_id': 'ACOX',
'name': 'Acetoin catabolism protein X',
'taxon': 'RALEH',
fasta.RAW_HEADER_KEY: 'PREFIX_' + header}
self.assertEqual(entries[0][0], parsed)
self.assertEqual(entries[0][1], 'SEQUENCE'[::-1])
self.assertEqual(len(entries), 1)
def test_parser_uniprotkb(self):
header = ('sp|P27748|ACOX_RALEH Acetoin catabolism protein X OS=Ralstonia'
' eutropha (strain ATCC 17699 / H16 / DSM 428 / Stanier 337)'
' GN=acoX PE=4 SV=2')
parsed = {'GN': 'acoX',
'OS': 'Ralstonia eutropha '
'(strain ATCC 17699 / H16 / DSM 428 / Stanier 337)',
'PE': 4,
'SV': 2,
'db': 'sp',
'entry': 'ACOX_RALEH',
'id': 'P27748',
'gene_id': 'ACOX',
'name': 'Acetoin catabolism protein X',
'taxon': 'RALEH',
fasta.RAW_HEADER_KEY: header}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_uniprotkb_write(self):
header = ('sp|P27748|ACOX_RALEH Acetoin catabolism protein X OS=Ralstonia'
' eutropha (strain ATCC 17699 / H16 / DSM 428 / Stanier 337)'
' GN=acoX PE=4 SV=2')
parsed = {'GN': 'acoX',
'OS': 'Ralstonia eutropha '
'(strain ATCC 17699 / H16 / DSM 428 / Stanier 337)',
'PE': 4,
'SV': 2,
'db': 'sp',
'entry': 'ACOX_RALEH',
'id': 'P27748',
'gene_id': 'ACOX',
'name': 'Acetoin catabolism protein X',
'taxon': 'RALEH',
fasta.RAW_HEADER_KEY: header}
with tempfile.TemporaryFile(mode='r+') as new_fasta_file:
fasta.write([(parsed, 'SEQUENCE')], new_fasta_file)
new_fasta_file.seek(0)
new_entries = list(fasta.read(new_fasta_file))
self.assertEqual([(header, 'SEQUENCE')], new_entries)
def test_parser_uniprotkb_isoform(self):
header = 'sp|Q4R572-2|1433B_MACFA Isoform Short of 14-3-3 protein beta/alpha OS=Macaca fascicularis GN=YWHAB'
parsed = {'GN': 'YWHAB',
'OS': 'Macaca fascicularis',
'db': 'sp',
'entry': '1433B_MACFA',
'gene_id': '1433B',
'id': 'Q4R572-2',
'name': 'Isoform Short of 14-3-3 protein beta/alpha',
'taxon': 'MACFA',
fasta.RAW_HEADER_KEY: header}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_uniprot_equals(self):
header = 'tr|Q9S8M8|Q9S8M8_WHEAT FRIII-2-VIII=GAMMA-gliadin (Fragment) OS=Triticum aestivum OX=4565 PE=1 SV=1'
parsed = {
'db': 'tr',
'id': 'Q9S8M8',
'entry': 'Q9S8M8_WHEAT',
'taxon': 'WHEAT',
'gene_id': 'Q9S8M8',
'name': 'FRIII-2-VIII=GAMMA-gliadin (Fragment)',
'OS': 'Triticum aestivum',
'OX': 4565,
'PE': 1,
'SV': 1,
fasta.RAW_HEADER_KEY: header
}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_uniprot_hyphen(self):
header = 'tr|Q00M55|Q00M55_WHEAT LMW-GS P-32 OS=Triticum aestivum OX=4565 GN=GluD3-3 PE=4 SV=1'
parsed = {
'db': 'tr',
'id': 'Q00M55',
'gene_id': 'Q00M55',
'taxon': 'WHEAT',
'entry': 'Q00M55_WHEAT',
'name': 'LMW-GS P-32',
'OS': 'Triticum aestivum',
'OX': 4565,
'GN': 'GluD3-3',
'PE': 4,
'SV': 1,
fasta.RAW_HEADER_KEY: header
}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_uniref(self):
header = ('>UniRef100_A5DI11 Elongation factor 2 n=1 '
'Tax=Pichia guilliermondii RepID=EF2_PICGU')
parsed = {'RepID': 'EF2_PICGU',
# 'taxon': 'PICGU',
# 'gene_id': 'EF2',
'Tax': 'Pichia guilliermondii',
'cluster': 'Elongation factor 2',
'id': 'UniRef100_A5DI11',
# 'type': 'UniRef100',
# 'accession': 'A5DI11',
'n': 1,
fasta.RAW_HEADER_KEY: header[1:]}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_uniparc(self):
header = '>UPI0000000005 status=active'
parsed = {'id': 'UPI0000000005',
'status': 'active',
fasta.RAW_HEADER_KEY: header[1:]}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_unimes(self):
header = ('MES00000000005 Putative uncharacterized protein GOS_3018412 '
'(Fragment) OS=marine metagenome Pep=JCVI_PEP_1096688850003 SV=1')
parsed = {'OS': 'marine metagenome',
'Pep': 'JCVI_PEP_1096688850003',
'SV': 1,
'id': 'MES00000000005',
'name': 'Putative uncharacterized protein GOS_3018412 (Fragment)',
fasta.RAW_HEADER_KEY: header}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_spd(self):
header = ('>P31947|1433S_HUMAN| 14-3-3 protein sigma (Stratifin) '
'(Epithelial cell marker protein 1).')
parsed = {'description': '14-3-3 protein sigma (Stratifin) '
'(Epithelial cell marker protein 1).',
'gene': '1433S_HUMAN',
'gene_id': '1433S',
'id': 'P31947',
'taxon': 'HUMAN',
fasta.RAW_HEADER_KEY: header[1:]}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_spd_mult_ids(self):
header = ('>P02763 Q8TC16|A1AG1_HUMAN| Alpha-1-acid glycoprotein 1 '
'precursor (AGP 1) (Orosomucoid-1) (OMD 1)')
parsed = {'description': 'Alpha-1-acid glycoprotein 1 precursor (AGP 1)'
' (Orosomucoid-1) (OMD 1)',
'gene': 'A1AG1_HUMAN',
'gene_id': 'A1AG1',
'id': 'P02763 Q8TC16',
'taxon': 'HUMAN',
fasta.RAW_HEADER_KEY: header[1:]}
self.assertEqual(fasta.parse(header), parsed)
def test_parser_ncbi(self):
header = '>NP_001351877.1 acylglycerol kinase, mitochondrial isoform 2 [Homo sapiens]'
parsed = {'description': 'acylglycerol kinase, mitochondrial isoform 2',
'id': 'NP_001351877.1',
'taxon': 'Homo sapiens',
fasta.RAW_HEADER_KEY: header[1:]}
self.assertEqual(fasta.parse(header), parsed)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mzmlb.py | tests/test_mzmlb.py | import unittest
from urllib.request import urlopen
import os
import shutil
import pickle
import pyteomics
from io import BytesIO
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
from data import mzmlb_spectra
try:
from pyteomics.mzmlb import MzMLb, read, chain
reason = None
except ImportError as err:
MzMLb = read = chain = None
reason = err
from pyteomics.auxiliary import FileReader
class MzMLbTest(unittest.TestCase):
maxDiff = None
path = 'test.mzMLb'
url = 'https://raw.githubusercontent.com/mobiusklein/mzdata/refs/heads/main/test/data/small.mzMLb'
num_spectra = 1
def setUp(self):
if not os.path.exists(self.path):
with open(self.path, 'wb') as fout, urlopen(self.url) as fin:
shutil.copyfileobj(fin, fout)
def test_read(self):
for func in [MzMLb, read]:
with func(self.path) as r:
# http://stackoverflow.com/q/14246983/1258041
self.assertEqual(mzmlb_spectra, list(r[:self.num_spectra]))
# cannot use the same indexing with chain
with chain(self.path) as r:
self.assertEqual(mzmlb_spectra, list(r)[:self.num_spectra])
def test_picklable(self):
with MzMLb(self.path) as reader:
expected_data = next(reader)
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(next(reader)['id'], expected_data['id'])
def test_in_memory_buffer(self):
with open(self.path, 'rb') as fh:
data_buffer = BytesIO(fh.read())
with MzMLb(data_buffer) as reader:
spectrum = next(reader)
self.assertEqual(
spectrum['id'], 'controllerType=0 controllerNumber=1 scan=1')
data_buffer.seek(0)
with MzMLb(data_buffer, use_index=True) as reader:
spectrum = next(reader)
self.assertEqual(
spectrum['id'], 'controllerType=0 controllerNumber=1 scan=1')
def test_registered_filereader(self):
self.assertTrue(issubclass(MzMLb, FileReader))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_auxiliary.py | tests/test_auxiliary.py | import warnings
import unittest
import string
from itertools import count
import operator as op
import numpy as np
import pandas as pd
import tempfile
import os
import pyteomics
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
from pyteomics import auxiliary as aux
from pyteomics import tandem
from pyteomics import version
psms = list(zip(count(), string.ascii_uppercase + string.ascii_lowercase,
np.arange(0.01, 0.062, 0.001)))
class QvalueTest(unittest.TestCase):
key = staticmethod(op.itemgetter(0))
key_df = staticmethod(op.itemgetter('score'))
is_decoy = staticmethod(lambda x: x[1].islower())
is_decoy_df = staticmethod(lambda x: x['label'].islower())
pep = staticmethod(op.itemgetter(2))
pep_df = staticmethod(op.itemgetter('pep'))
def setUp(self):
np.random.shuffle(psms)
self.psms = iter(psms)
def _run_check(self, q, formula):
self.assertTrue(np.allclose(q['q'][:26], 0))
if formula == 2:
self.assertTrue(np.allclose(q['q'][26:], 2 * np.arange(1., 27.) / (26 + np.arange(1, 27))))
else:
self.assertTrue(np.allclose(q['q'][26:], np.arange(1., 27.) / 26))
self.assertTrue(np.allclose(q['is decoy'][:26], 0))
self.assertTrue(np.allclose(q['is decoy'][26:], 1))
self.assertTrue(np.allclose(q['score'], np.arange(52)))
self.setUp()
spsms = sorted(self.psms, key=self.key)
self.assertTrue(np.allclose([self.is_decoy(x) for x in spsms], q['is decoy']))
self.assertTrue(np.allclose([self.key(x) for x in spsms], q['score']))
self.setUp()
def _run_check_pep(self, q):
self.assertTrue(np.allclose(q['q'], np.arange(0.01, 0.036, 0.0005)))
self.setUp()
def test_qvalues(self):
q = aux.qvalues(self.psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=True)
self.assertTrue(np.allclose(q['q'], 0))
self.assertTrue(np.allclose(q['is decoy'], 0))
self.assertTrue(np.allclose(q['score'], np.arange(26)))
def test_qvalues_pep(self):
q = aux.qvalues(self.psms, pep=self.pep)
self._run_check_pep(q)
q = aux.qvalues(self.psms, pep=self.pep, key=self.key)
self._run_check_pep(q)
def test_qvalues_with_decoy(self):
q = aux.qvalues(self.psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False)
self._run_check(q, 2)
q = aux.qvalues(self.psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1)
self._run_check(q, 1)
def test_qvalues_full_output(self):
q = aux.qvalues(self.psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, full_output=True)
self._run_check(q, 2)
def test_qvalues_pep_full_output(self):
q = aux.qvalues(self.psms, pep=self.pep, full_output=True)
self._run_check_pep(q)
q = aux.qvalues(self.psms, key=self.key, pep=self.pep, full_output=True)
self._run_check_pep(q)
def test_qvalues_from_numpy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(list(self.psms), dtype=dtype)
q = aux.qvalues(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1)
self._run_check(q, 1)
q = aux.qvalues(psms, key=self.key, is_decoy=self.is_decoy, remove_decoy=False, formula=1, full_output=True)
self._run_check(q, 1)
self.assertTrue(q['psm'].dtype == dtype)
def test_qvalues_pep_from_numpy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(list(self.psms), dtype=dtype)
q = aux.qvalues(psms, pep=self.pep)
self._run_check_pep(q)
q = aux.qvalues(psms, key=self.key, pep=self.pep, full_output=True)
self._run_check_pep(q)
self.assertTrue(q['psm'].dtype == dtype)
def test_qvalues_from_dataframe(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))
q = aux.qvalues(psms, key=self.key_df, is_decoy=self.is_decoy_df, remove_decoy=False, formula=1)
self._run_check(q, 1)
q = aux.qvalues(psms, key=self.key_df, is_decoy=self.is_decoy_df, remove_decoy=False, formula=1, full_output=True)
self._run_check(q, 1)
def test_qvalues_empty_dataframe(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array([], dtype=dtype))
q = aux.qvalues(psms, key=self.key_df, is_decoy=self.is_decoy_df, remove_decoy=False, formula=1)
self.assertEqual(q.shape[0], 0)
q = aux.qvalues(psms, key=self.key_df, is_decoy=self.is_decoy_df, remove_decoy=False, formula=1, full_output=True)
self.assertEqual(q.shape[0], 0)
def test_qvalues_pep_from_dataframe(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))
q = aux.qvalues(psms, pep=self.pep_df)
self._run_check_pep(q)
q = aux.qvalues(psms, pep=self.pep_df, full_output=True)
self._run_check_pep(q)
def test_qvalues_from_numpy_string_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(list(self.psms), dtype=dtype)
q = aux.qvalues(psms, key='score', is_decoy=self.is_decoy, remove_decoy=False, formula=1)
self._run_check(q, 1)
q = aux.qvalues(psms, key='score', is_decoy=self.is_decoy, remove_decoy=False, formula=1, full_output=True)
self._run_check(q, 1)
self.assertTrue(q['psm'].dtype == dtype)
def test_qvalues_pep_from_numpy_string_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(list(self.psms), dtype=dtype)
q = aux.qvalues(psms, pep='pep')
self._run_check_pep(q)
q = aux.qvalues(psms, key='score', pep='pep')
self._run_check_pep(q)
q = aux.qvalues(psms, key='score', pep='pep', full_output=True)
self._run_check_pep(q)
def test_qvalues_from_dataframe_string_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))
q = aux.qvalues(psms, key='score', is_decoy=self.is_decoy_df, remove_decoy=False, formula=1)
self._run_check(q, 1)
q = aux.qvalues(psms, key='score', is_decoy=self.is_decoy_df, remove_decoy=False, formula=1, full_output=True)
self._run_check(q, 1)
def test_qvalues_pep_from_dataframe_string_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))
q = aux.qvalues(psms, key=self.key_df, pep='pep')
self._run_check_pep(q)
q = aux.qvalues(psms, pep='pep')
self._run_check_pep(q)
q = aux.qvalues(psms, key='score', pep='pep', full_output=True)
self._run_check_pep(q)
def test_qvalues_from_dataframe_string_key_and_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))
psms['is decoy'] = [self.is_decoy_df(row) for _, row in psms.iterrows()]
q = aux.qvalues(psms, key='score', is_decoy='is decoy', remove_decoy=False, formula=1)
self._run_check(q, 1)
q = aux.qvalues(psms, key='score', is_decoy='is decoy', remove_decoy=False, formula=1, full_output=True)
self._run_check(q, 1)
def test_qvalues_pep_from_dataframe_string_key_and_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(list(self.psms), dtype=dtype))
q = aux.qvalues(psms, key='score', pep='pep')
self._run_check_pep(q)
q = aux.qvalues(psms, key='score', pep='pep', full_output=True)
self._run_check_pep(q)
def test_qvalues_pep_exceptions(self):
self.assertRaises(aux.PyteomicsError, aux.qvalues, self.psms, pep='pep', is_decoy=self.is_decoy)
self.assertRaises(aux.PyteomicsError, aux.qvalues, self.psms, pep='pep', remove_decoy=False)
self.assertRaises(aux.PyteomicsError, aux.qvalues, self.psms, pep='pep', correction=0)
def test_qvalues_from_tandem(self):
psms = tandem.TandemXML('test.t.xml')
q0 = aux.qvalues(psms, key=op.itemgetter('expect'), is_decoy=tandem.is_decoy)
with tandem.TandemXML('test.t.xml') as psms:
q1 = aux.qvalues(psms, key=op.itemgetter('expect'), is_decoy=tandem.is_decoy)
self.assertTrue(np.allclose(q0['q'], q1['q']))
class FilterTest(unittest.TestCase):
key = staticmethod(op.itemgetter(0))
key_df = staticmethod(op.itemgetter('score'))
is_decoy = staticmethod(lambda x: x[1].islower())
is_decoy_df = staticmethod(lambda x: x['label'].islower())
pep = staticmethod(op.itemgetter(2))
pep_df = staticmethod(op.itemgetter('pep'))
def setUp(self):
self.psms = psms
np.random.shuffle(self.psms)
def _run_check(self, *args, **kwargs):
key = kwargs.get('key')
if key is None:
key = self.key_df if isinstance(args[0], pd.DataFrame) else self.key
is_decoy = kwargs.get('is_decoy')
if is_decoy is None:
is_decoy = self.is_decoy_df if isinstance(args[0], pd.DataFrame) else self.is_decoy
f11 = aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5)
f12 = aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, formula=2)
f21 = aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, remove_decoy=False, formula=1)
f22 = aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, remove_decoy=False)
self.assertEqual(f11.shape[0], 26)
self.assertEqual(f12.shape[0], 26)
self.assertEqual(f21.shape[0], 39)
self.assertEqual(f22.shape[0], 34)
with aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, full_output=False) as f:
f11 = list(f)
with aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, formula=2, full_output=False) as f:
f12 = list(f)
with aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, remove_decoy=False, formula=1, full_output=False) as f:
f21 = list(f)
with aux.filter(*args, key=key, is_decoy=is_decoy, fdr=0.5, remove_decoy=False, full_output=False) as f:
f22 = list(f)
self.assertEqual(len(f11), 26)
self.assertEqual(len(f12), 26)
self.assertEqual(len(f21), 39)
self.assertEqual(len(f22), 34)
def _run_check_pep(self, *args, **kwargs):
key = kwargs.pop('key', None)
if key is None:
key = self.key_df if isinstance(args[0], pd.DataFrame) else self.key
f11 = aux.filter(*args, key=key, fdr=0.02, **kwargs)
f12 = aux.filter(*args, fdr=0.02, **kwargs)
self.assertEqual(f11.shape[0], 21)
self.assertEqual(f12.shape[0], 21)
with aux.filter(*args, key=key, fdr=0.02, full_output=False, **kwargs) as f:
f11 = list(f)
with aux.filter(*args, fdr=0.02, full_output=False, **kwargs) as f:
f12 = list(f)
self.assertEqual(len(f11), 21)
self.assertEqual(len(f12), 21)
def test_filter(self):
self._run_check(self.psms)
def test_filter_pep(self):
self._run_check_pep(self.psms, pep=self.pep)
def test_filter_chain(self):
f = aux.filter.chain(self.psms, self.psms, key=self.key, is_decoy=self.is_decoy, fdr=0.5)
self.assertEqual(f.shape[0], 52)
def test_filter_chain_pep(self):
f = aux.filter.chain(self.psms, self.psms, pep=self.pep, fdr=0.02)
self.assertEqual(f.shape[0], 42)
def test_filter_chain_with(self):
with aux.filter.chain(self.psms, self.psms, key=self.key, is_decoy=self.is_decoy,
fdr=0.5, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 52)
def test_filter_pep_chain_with(self):
with aux.filter.chain(self.psms, self.psms, pep=self.pep,
fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 42)
def test_filter_chain_arr_str_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
f11 = aux.filter.chain(psms, psms, key='score', is_decoy=self.is_decoy, fdr=0.5)
self.assertEqual(f11.shape[0], 52)
def test_filter_pep_chain_arr_str_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
f = aux.filter.chain(psms, psms, key='score', pep=self.pep, fdr=0.02)
self.assertEqual(f.shape[0], 42)
def test_filter_chain_arr_str_key_with(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
with aux.filter.chain(psms, psms, key='score', is_decoy=self.is_decoy, fdr=0.5, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 52)
def test_filter_pep_chain_arr_str_key_with(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
with aux.filter.chain(psms, psms, key='score', pep=self.pep, fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 42)
def test_filter_chain_from_iterable(self):
f11 = aux.filter.chain.from_iterable([self.psms, self.psms], key=self.key, is_decoy=self.is_decoy, fdr=0.5)
self.assertEqual(f11.shape[0], 52)
def test_filter_pep_chain_from_iterable(self):
f = aux.filter.chain.from_iterable([self.psms, self.psms], pep=self.pep, fdr=0.02)
self.assertEqual(f.shape[0], 42)
def test_filter_chain_from_iterable_with(self):
with aux.filter.chain.from_iterable([self.psms, self.psms], key=self.key, is_decoy=self.is_decoy, fdr=0.5, full_output=False) as f:
f11 = list(f)
self.assertEqual(len(f11), 52)
def test_filter_pep_chain_from_iterable_with(self):
with aux.filter.chain.from_iterable([self.psms, self.psms], key=self.key, pep=self.pep, fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 42)
def test_filter_array(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
self._run_check(psms)
def test_filter_pep_array(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
self._run_check_pep(psms, pep=self.pep)
def test_filter_array_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
self._run_check(psms, is_decoy='is decoy')
def test_filter_pep_array_str_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
self._run_check_pep(psms, pep='pep')
def test_filter_array_str_is_decoy_str_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
self._run_check(psms, is_decoy='is decoy', key='score')
def test_filter_pep_array_str_pep_str_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
self._run_check_pep(psms, pep='pep', key='score')
def test_filter_array_list_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
key = [self.key(psm) for psm in psms]
self._run_check(psms, key=key)
def test_filter_pep_array_list_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
key = [self.key(psm) for psm in psms]
self._run_check_pep(psms, key=key, pep=self.pep)
def test_filter_pep_array_list_pep_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
pep = [self.pep(psm) for psm in psms]
self._run_check_pep(psms, pep=pep)
def test_filter_array_gen_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
key = (self.key(psm) for psm in psms)
f = aux.filter(psms, key=key, is_decoy=self.is_decoy, fdr=0.5)
self.assertEqual(f.shape[0], 26)
key = (self.key(psm) for psm in psms)
with aux.filter(psms, key=key, is_decoy=self.is_decoy, fdr=0.5, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 26)
def test_filter_pep_array_gen_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
key = (self.key(psm) for psm in psms)
f = aux.filter(psms, key=key, pep=self.pep, fdr=0.02)
self.assertEqual(f.shape[0], 21)
key = (self.key(psm) for psm in psms)
with aux.filter(psms, key=key, pep=self.pep, fdr=0.02, full_output=False) as f:
f11 = list(f)
self.assertEqual(len(f11), 21)
def test_filter_array_iter_key_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = iter([self.key(psm) for psm in psms])
f11 = aux.filter(psms, key=key, is_decoy='is decoy', fdr=0.5)
self.assertEqual(f11.shape[0], 26)
key = iter(self.key(psm) for psm in psms)
with aux.filter(psms, key=key, is_decoy='is decoy', fdr=0.5, full_output=False) as f:
f11 = list(f)
self.assertEqual(len(f11), 26)
def test_filter_pep_array_iter_key_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = iter([self.key(psm) for psm in psms])
f = aux.filter(psms, key=key, pep='pep', fdr=0.02)
self.assertEqual(f.shape[0], 21)
key = iter(self.key(psm) for psm in psms)
with aux.filter(psms, key=key, pep='pep', fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 21)
def test_filter_array_arr_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
is_decoy = np.array([self.is_decoy(psm) for psm in self.psms])
self._run_check(psms, is_decoy=is_decoy)
def test_filter_pep_array_arr_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = np.array(self.psms, dtype=dtype)
pep = np.array([self.pep(psm) for psm in self.psms])
self._run_check_pep(psms, pep=pep)
def test_filter_dataframe(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(self.psms, dtype=dtype))
self._run_check(psms)
def test_filter_empty_dataframe(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array([], dtype=dtype))
f = aux.filter(psms, key=self.key_df, is_decoy=self.is_decoy_df, remove_decoy=False, formula=1, fdr=0.1)
self.assertEqual(f.shape[0], 0)
f = aux.qvalues(psms, key=self.key_df, is_decoy=self.is_decoy_df, remove_decoy=False, formula=1, full_output=True, fdr=0.1)
self.assertEqual(f.shape[0], 0)
def test_filter_pep_dataframe(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(self.psms, dtype=dtype))
self._run_check_pep(psms, pep=self.pep_df)
def test_filter_dataframe_str_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(self.psms, dtype=dtype))
self._run_check(psms, key='score')
def test_filter_pep_dataframe_str_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms = pd.DataFrame(np.array(self.psms, dtype=dtype))
self._run_check_pep(psms, key='score', pep=self.pep_df)
def test_filter_dataframe_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
psms = pd.DataFrame(psms)
self._run_check(psms, is_decoy='is decoy')
def test_filter_pep_dataframe_str_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
psms = pd.DataFrame(psms)
self._run_check(psms, pep='pep', key=self.key_df)
def test_filter_dataframe_str_key_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
psms = pd.DataFrame(psms)
self._run_check(psms, key='score', is_decoy='is decoy')
def test_filter_empty_dataframe_str_key_str_is_decoy(self):
# dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = pd.DataFrame({'score': [], 'is decoy': []})
f = aux.filter(psms, key='score', is_decoy='is decoy', fdr=0.1)
self.assertEqual(f.shape[0], 0)
f = aux.qvalues(psms, key='score', is_decoy='is decoy', remove_decoy=False, formula=1, full_output=True, fdr=0.01)
self.assertEqual(f.shape[0], 0)
def test_filter_pep_dataframe_str_key_str_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
psms = pd.DataFrame(psms)
self._run_check_pep(psms, key='score', pep='pep')
def test_filter_dataframe_arr_key_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = psms['score']
psms = pd.DataFrame(psms)
self._run_check(psms, key=key, is_decoy='is decoy')
def test_filter_pep_dataframe_arr_key_str_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = psms['score']
psms = pd.DataFrame(psms)
self._run_check(psms, key=key, pep='pep')
def test_filter_dataframe_arr_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = psms['score']
psms = pd.DataFrame(psms)
self._run_check(psms, key=key)
def test_filter_pep_dataframe_arr_key(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = psms['score']
psms = pd.DataFrame(psms)
self._run_check_pep(psms, key=key, pep=self.pep_df)
def test_filter_dataframe_list_key_list_is_decoy(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = list(psms['score'])
is_decoy = list(psms['is decoy'])
psms = pd.DataFrame(psms)
self._run_check(psms, key=key, is_decoy=is_decoy)
def test_filter_pep_dataframe_list_key_list_pep(self):
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64), ('is decoy', np.bool_)]
psms = np.array([(s, l, p, self.is_decoy((s, l, p))) for s, l, p in self.psms], dtype=dtype)
key = list(psms['score'])
pep = list(psms['pep'])
psms = pd.DataFrame(psms)
self._run_check(psms, key=key, pep=pep)
def test_filter_two_lists(self):
i = np.random.randint(1, len(self.psms)-1)
psms1 = self.psms[:i]
psms2 = self.psms[i:]
self._run_check(psms1, psms2)
def test_filter_pep_two_lists(self):
i = np.random.randint(1, len(self.psms)-1)
psms1 = self.psms[:i]
psms2 = self.psms[i:]
self._run_check_pep(psms1, psms2, pep=self.pep)
def test_filter_two_arrays(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = np.array(self.psms[:i], dtype=dtype)
psms2 = np.array(self.psms[i:], dtype=dtype)
self._run_check(psms1, psms2)
def test_filter_pep_two_arrays(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = np.array(self.psms[:i], dtype=dtype)
psms2 = np.array(self.psms[i:], dtype=dtype)
self._run_check_pep(psms1, psms2, pep=self.pep)
def test_filter_two_dataframes(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = pd.DataFrame(np.array(self.psms[:i], dtype=dtype))
psms2 = pd.DataFrame(np.array(self.psms[i:], dtype=dtype))
self._run_check(psms1, psms2)
def test_filter_pep_two_dataframes(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = pd.DataFrame(np.array(self.psms[:i], dtype=dtype))
psms2 = pd.DataFrame(np.array(self.psms[i:], dtype=dtype))
self._run_check_pep(psms1, psms2, pep=self.pep_df)
def test_filter_two_iters(self):
i = np.random.randint(1, len(self.psms)-1)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
f11 = aux.filter(psms1, psms2, key=self.key, is_decoy=self.is_decoy, fdr=0.5)
self.assertEqual(f11.shape[0], 26)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
with aux.filter(psms1, psms2, key=self.key, is_decoy=self.is_decoy, fdr=0.5, full_output=False) as f:
f11 = list(f)
self.assertEqual(len(f11), 26)
def test_filter_pep_two_iters(self):
i = np.random.randint(1, len(self.psms)-1)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
f = aux.filter(psms1, psms2, key=self.key, pep=self.pep, fdr=0.02)
self.assertEqual(f.shape[0], 21)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
with aux.filter(psms1, psms2, key=self.key, pep=self.pep, fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 21)
def test_filter_iter(self):
psms = iter(self.psms)
f = aux.filter(psms, key=self.key, is_decoy=self.is_decoy, fdr=0.5)
self.assertEqual(f.shape[0], 26)
psms = iter(self.psms)
with aux.filter(psms, key=self.key, is_decoy=self.is_decoy, fdr=0.5, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 26)
def test_filter_pep_iter(self):
psms = iter(self.psms)
f = aux.filter(psms, key=self.key, pep=self.pep, fdr=0.02)
self.assertEqual(f.shape[0], 21)
psms = iter(self.psms)
with aux.filter(psms, key=self.key, pep=self.pep, fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 21)
def test_filter_two_iters_iter_key_iter_is_decoy(self):
i = np.random.randint(1, len(self.psms)-1)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
key = iter(self.key(p) for p in self.psms)
is_decoy = iter(self.is_decoy(p) for p in self.psms)
f11 = aux.filter(psms1, psms2, key=key, is_decoy=is_decoy, fdr=0.5)
self.assertEqual(f11.shape[0], 26)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
key = iter(self.key(p) for p in self.psms)
is_decoy = iter(self.is_decoy(p) for p in self.psms)
with aux.filter(psms1, psms2, key=key, is_decoy=is_decoy, fdr=0.5, full_output=False) as f:
f11 = list(f)
self.assertEqual(len(f11), 26)
def test_filter_pep_two_iters_iter_key_iter_is_decoy(self):
i = np.random.randint(1, len(self.psms)-1)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
key = iter(self.key(p) for p in self.psms)
pep = iter(self.pep(p) for p in self.psms)
f = aux.filter(psms1, psms2, key=key, pep=pep, fdr=0.02)
self.assertEqual(f.shape[0], 21)
psms1 = iter(self.psms[:i])
psms2 = iter(self.psms[i:])
key = iter(self.key(p) for p in self.psms)
pep = iter(self.pep(p) for p in self.psms)
with aux.filter(psms1, psms2, key=key, pep=pep, fdr=0.02, full_output=False) as f:
f1 = list(f)
self.assertEqual(len(f1), 21)
def test_filter_two_arrays_str_key(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = np.array(self.psms[:i], dtype=dtype)
psms2 = np.array(self.psms[i:], dtype=dtype)
self._run_check(psms1, psms2, key='score')
def test_filter_pep_two_arrays_str_key_str_pep(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = np.array(self.psms[:i], dtype=dtype)
psms2 = np.array(self.psms[i:], dtype=dtype)
self._run_check_pep(psms1, psms2, key='score', pep='pep')
def test_filter_two_dataframes_str_key(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = pd.DataFrame(np.array(self.psms[:i], dtype=dtype))
psms2 = pd.DataFrame(np.array(self.psms[i:], dtype=dtype))
self._run_check(psms1, psms2, key='score')
def test_filter_pep_two_dataframes_str_key(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = pd.DataFrame(np.array(self.psms[:i], dtype=dtype))
psms2 = pd.DataFrame(np.array(self.psms[i:], dtype=dtype))
self._run_check_pep(psms1, psms2, key='score', pep=self.pep_df)
def test_filter_two_arrays_str_key_arr_is_decoy(self):
i = np.random.randint(1, len(self.psms)-1)
dtype = [('score', np.int8), ('label', np.str_, 1), ('pep', np.float64)]
psms1 = np.array(self.psms[:i], dtype=dtype)
psms2 = np.array(self.psms[i:], dtype=dtype)
is_decoy = np.array([self.is_decoy(p) for p in self.psms])
self._run_check(psms1, psms2, key='score', is_decoy=is_decoy)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_peff.py | tests/test_peff.py | from os import path
import unittest
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from pyteomics import peff
class PEFFTest(unittest.TestCase):
maxDiff = None
def setUp(self):
self.peff_file = 'test.peff'
def test_parse(self, reader=None):
if reader is None:
reader = peff.IndexedPEFF(self.peff_file)
self.assertEqual(reader.number_of_entries, 5)
self.assertEqual(len(reader.header_blocks), 1)
protein = next(reader)
self.assertEqual(protein.description.Tag, "NX_P07585-1")
self.assertEqual(protein, reader.get_entry("NX_P07585-1"))
protein2 = reader.get_entry("NX_P07585-3")
self.assertEqual(protein, protein)
self.assertNotEqual(protein, protein2)
self.assertEqual(protein.description.TaxName, "Homo Sapiens")
self.assertEqual(protein.description["NcbiTaxId"], 9606)
self.assertEqual(len(protein.description.ModResPsi), 2)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_util.py | tests/test_util.py | import unittest
import platform
import os
import pyteomics
import multiprocessing as mp
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
from pyteomics import auxiliary as aux
class UtilTest(unittest.TestCase):
def test_ensure_prefix(self):
pairs = [
('file:///home/test/unimod.xml', 'file:///home/test/unimod.xml'),
('https://example.org/test/unimod.xml', 'https://example.org/test/unimod.xml'),
('ftp://example.org/test/unimod.xml', 'ftp://example.org/test/unimod.xml'),
('http://example.org/test/unimod.xml', 'http://example.org/test/unimod.xml'),
]
pairs_windows = [
('C:/Data folder/unimod.xml', 'file:///C:/Data%20folder/unimod.xml'),
('file:///C:/Data folder/unimod.xml', 'file:///C:/Data folder/unimod.xml'),
]
pairs_other = [('/home/test/unimod.xml', 'file:///home/test/unimod.xml'),]
system = platform.system()
print('Testing on', system)
if system == 'Windows':
pairs.extend(pairs_windows)
else:
pairs.extend(pairs_other)
for inp, out in pairs:
try:
self.assertEqual(aux.ensure_url_prefix(inp), out)
except Exception:
print('Failed with:', inp, out)
raise
def test_start_method(self):
self.assertNotEqual(aux.file_helpers._get_default_start_method(), 'fork')
if mp.get_start_method(allow_none=False) != 'fork':
self.assertEqual(mp.get_start_method(allow_none=False), aux.file_helpers._get_default_start_method())
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mztab.py | tests/test_mztab.py | from os import path
import unittest
import warnings
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from pyteomics import mztab
class MzTabTest(unittest.TestCase):
path_mztab1 = 'test.mztab'
path_mztab2 = 'test_mztab2.mztab'
def test_metadata_mztab1(self):
reader_mztab1 = mztab.MzTab(self.path_mztab1)
self.assertEqual(len(reader_mztab1.metadata), 208)
value_from_mztab1 = reader_mztab1.metadata['fixed_mod[1]']
self.assertEqual(value_from_mztab1, 'CHEMMOD:57.0214637236')
reader_mztab1.file.close()
def test_metadata_mztab2(self):
reader_mztab2 = mztab.MzTab(self.path_mztab2)
self.assertEqual(len(reader_mztab2.metadata), 61)
value_from_mztab2 = reader_mztab2.metadata['sample_processing[1]']
self.assertEqual(value_from_mztab2, 'high performance liquid chromatography')
reader_mztab2.file.close()
def test_metadata_variant_P(self):
reader_mztab1 = mztab.MzTab(self.path_mztab1)
self.assertEqual(reader_mztab1.variant, 'P')
reader_mztab1.file.close()
def test_metadata_variant_M(self):
reader_mztab2 = mztab.MzTab(self.path_mztab2)
self.assertEqual(reader_mztab2.variant, 'M')
reader_mztab2.file.close()
def test_iter_mztab1(self):
reader_mztab1 = mztab.MzTab(self.path_mztab1)
tables = list(reader_mztab1)
self.assertEqual(len(tables), 4)
[self.assertEqual(len(t), 2) for t in tables]
reader_mztab1.file.close()
def test_iter_mztab2(self):
reader_mztab2 = mztab.MzTab(self.path_mztab2)
tables = list(reader_mztab2)
self.assertEqual(len(tables), 3)
[self.assertEqual(len(t), 2) for t in tables]
reader_mztab2.file.close()
def test_getitem_mztab1(self):
reader_mztab1 = mztab.MzTab(self.path_mztab1)
table = reader_mztab1['psm']
self.assertIsInstance(table, mztab.pd.DataFrame)
reader_mztab1.file.close()
def test_getitem_mztab2(self):
reader_mztab2 = mztab.MzTab(self.path_mztab2)
table = reader_mztab2['sme']
self.assertIsInstance(table, mztab.pd.DataFrame)
reader_mztab2.file.close()
def test_keys_values_items(self):
reader_mztab2 = mztab.MzTab(self.path_mztab2, table_format='dict')
keys = list(reader_mztab2.keys())
self.assertEqual(keys, [k for k, v in reader_mztab2])
values = list(reader_mztab2.values())
self.assertEqual(values, [v for k, v in reader_mztab2])
items = list(reader_mztab2.items())
self.assertEqual(items, list(reader_mztab2))
reader_mztab2.file.close()
def test_generated_accessors(self):
reader = mztab.MzTab(self.path_mztab1)
self.assertEqual(reader.mode, 'Complete')
self.assertEqual(reader.version, '1.0.0')
self.assertEqual(reader.software, {1: ('MaxQuant', '1.6.3.4')})
ms_runs = reader.ms_runs
self.assertEqual(len(ms_runs), 63)
self.assertEqual(
sorted(ms_runs[1].items()),
[
('format', 'Andromeda:apl file format'),
('id_format', 'scan number only nativeID format'),
('location', 'file://c:/users/jklein/projects/msv000080527_abelin2017/combined/andromeda/allspectra.hcd.ftms.secpep.sil0_0.apl'),
])
reader.file.close()
def test_missing_version(self):
class OverridingMzTab(mztab.MzTab):
def _parse(self):
super(OverridingMzTab, self)._parse()
self.metadata.pop("mzTab-version", None)
with warnings.catch_warnings(record=True) as w:
reader = OverridingMzTab(self.path_mztab1)
assert reader.variant == 'P'
assert reader.version == '1.0.0'
reader.file.close()
assert len(w) > 0
def test_override(self):
class OverridingMzTab(mztab.MzTab):
def mode(self):
return super(OverridingMzTab, self).mode
reader = OverridingMzTab(self.path_mztab1)
self.assertEqual(reader.mode(), 'Complete')
reader.file.close()
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mgf.py | tests/test_mgf.py | import os
import numpy as np
import pyteomics
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
import tempfile
import unittest
import pickle
import shutil
import json
from collections import OrderedDict
import warnings
from pyteomics import mgf, auxiliary as aux
import data
class MGFTest(unittest.TestCase):
maxDiff = None
_encoding = 'utf-8'
def setUp(self):
self.path = 'test.mgf'
self.header = mgf.read_header(self.path)
with mgf.read(self.path) as f:
self.spectra = list(f)
self.tmpfile = tempfile.TemporaryFile(mode='r+')
mgf.write(header=self.header, spectra=self.spectra, output=self.tmpfile)
self.tmpfile.seek(0)
self.header2 = mgf.read_header(self.tmpfile)
self.tmpfile.seek(0)
tmpreader = mgf.read(self.tmpfile)
self.spectra2 = list(tmpreader)
self.ns = len(self.spectra)
self.tmpfile.close()
self.path_annotated = 'test_annotated.mgf'
self.header_annotated = mgf.read_header(self.path_annotated)
with mgf.read(self.path_annotated, read_ions=True) as f:
self.spectra_annotated = list(f)
def test_read(self):
for func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:
# http://stackoverflow.com/q/14246983/1258041
r = func(self.path)
self.assertEqual(data.mgf_spectra_long, list(r))
r.close()
r = func(self.path, False)
self.assertEqual(data.mgf_spectra_short, list(r))
r.close()
with func(self.path) as reader:
self.assertEqual(data.mgf_spectra_long, list(reader))
with func(self.path, False) as reader:
self.assertEqual(data.mgf_spectra_short, list(reader))
def test_read_source_kw(self):
for func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:
with func(source=self.path) as r:
self.assertEqual(data.mgf_spectra_long, list(r))
def test_read_decoding(self):
for func in [mgf.read, mgf.MGF, mgf.IndexedMGF]:
r = func(self.path, encoding=self._encoding)
self.assertEqual(data.mgf_spectra_long_decoded, list(r))
r.close()
r = func(self.path, False, encoding=self._encoding)
self.assertEqual(data.mgf_spectra_short_decoded, list(r))
r.close()
with func(self.path, encoding=self._encoding) as reader:
self.assertEqual(data.mgf_spectra_long_decoded, list(reader))
with func(self.path, False, encoding=self._encoding) as reader:
self.assertEqual(data.mgf_spectra_short_decoded, list(reader))
def test_read_no_charges(self):
with mgf.read(self.path, read_charges=False) as reader:
self.assertEqual(data.mgf_spectra_long_no_charges, list(reader))
with mgf.read(self.path, False, read_charges=False) as reader:
self.assertEqual(data.mgf_spectra_short_no_charges, list(reader))
def test_read_with_ions(self):
for spec_data, spec_read in zip(data.mgf_spectra_annotated_long, self.spectra_annotated):
# Check that the spectra have the same dict keys
self.assertEqual(spec_data.keys(), spec_read.keys())
for key in spec_data:
if key == 'ion array':
np.testing.assert_array_equal(spec_data[key], spec_read[key])
else:
self.assertEqual(spec_data[key], spec_read[key])
def test_read_write_with_ions(self):
formats = ['{:.6f} {:.6f} {}', '%.6f %.6f %s']
for use_numpy in range(2):
with tempfile.TemporaryFile(mode='r+') as f:
mgf.write(self.spectra_annotated, f, write_ions=True, use_numpy=use_numpy,
fragment_format=formats[use_numpy])
f.seek(0)
spectra = list(mgf.read(f, read_ions=True))
for spec_data, spec_read in zip(data.mgf_spectra_annotated_long, spectra):
# Check that the spectra have the same dict keys
self.assertEqual(spec_data.keys(), spec_read.keys())
for key in spec_data:
if key == 'ion array':
np.testing.assert_array_equal(spec_data[key], spec_read[key])
else:
self.assertEqual(spec_data[key], spec_read[key])
def test_read_array_conversion(self):
with mgf.read(self.path, convert_arrays=0) as reader:
self.assertEqual(data.mgf_spectra_lists, list(reader))
with mgf.read(self.path, convert_arrays=2) as reader:
s = next(reader)
self.assertTrue(isinstance(s['charge array'], np.ma.core.MaskedArray))
self.assertTrue(isinstance(s['m/z array'], np.ndarray))
with mgf.read(self.path, convert_arrays=1) as reader:
s = next(reader)
self.assertTrue(isinstance(s['charge array'], np.ndarray))
self.assertTrue(isinstance(s['m/z array'], np.ndarray))
def test_header(self):
self.assertEqual(self.header, self.header2)
def test_readwrite_ns(self):
self.assertEqual(self.ns, len(self.spectra2))
def test_readwrite_keys(self):
for s, s2 in zip(self.spectra, self.spectra2):
self.assertEqual(set(s), set(s2))
self.assertEqual(set(s), {'intensity array', 'm/z array', 'params', 'charge array'})
def test_readwrite_params(self):
for s, s2 in zip(self.spectra, self.spectra2):
self.assertEqual(s['params'], s2['params'])
def test_readwrite_msms_len(self):
for i in range(self.ns):
al = len(self.spectra[i]['m/z array'])
self.assertEqual(al, len(self.spectra[i]['intensity array']))
self.assertEqual(al, len(self.spectra2[i]['m/z array']))
self.assertEqual(al, len(self.spectra2[i]['intensity array']))
for j in range(al):
self.assertEqual(self.spectra[i]['m/z array'][j],
self.spectra2[i]['m/z array'][j])
self.assertEqual(self.spectra[i]['intensity array'][j],
self.spectra2[i]['intensity array'][j])
def test_readwrite_msms(self):
for i in range(self.ns):
al = len(self.spectra[i]['m/z array'])
for j in range(al):
self.assertEqual(self.spectra[i]['m/z array'][j],
self.spectra2[i]['m/z array'][j])
self.assertEqual(self.spectra[i]['intensity array'][j],
self.spectra2[i]['intensity array'][j])
def test_write_single(self):
tmpfile = tempfile.TemporaryFile(mode='r+')
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
for spectrum in self.spectra:
mgf.write(spectra=spectrum, output=tmpfile)
self.assertGreaterEqual(len(ws), 2)
n_warned = 0
for w in ws:
n_warned += (issubclass(w.category, UserWarning) and "discouraged" in str(w.message))
self.assertGreaterEqual(n_warned, 2)
tmpfile.seek(0)
tmpreader = mgf.read(tmpfile)
self.assertEqual(data.mgf_spectra_long, list(tmpreader))
tmpfile.close()
def test_read_dtype(self):
dtypes = {'m/z array': np.float32, 'intensity array': np.int32}
with mgf.read(self.path, dtype=dtypes) as f:
for spec in f:
for k, v in dtypes.items():
self.assertEqual(spec[k].dtype, v)
def test_get_spectrum(self):
key = 'Spectrum 2'
for klass in [mgf.MGF, mgf.IndexedMGF]:
f = klass(self.path)
self.assertEqual(data.mgf_spectra_long[1], f[key])
self.assertEqual(data.mgf_spectra_long[1], f.get_spectrum(key))
f.close()
self.assertEqual(data.mgf_spectra_long[1], mgf.get_spectrum(self.path, key))
def test_key_access_ions(self):
with mgf.IndexedMGF(self.path_annotated, read_ions=True) as f:
np.testing.assert_array_equal(f['RAEYWENYPPAH||3']['ion array'], self.spectra_annotated[1]['ion array'])
def test_read_list(self):
key = ['Spectrum 2', 'Spectrum 1']
with mgf.IndexedMGF(self.path) as f:
self.assertEqual(data.mgf_spectra_long[::-1], f[key])
def test_indexedmgf_picklable(self):
with mgf.IndexedMGF(self.path, block_size=12345) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(data.mgf_spectra_long[0], next(reader))
self.assertEqual(reader.block_size, 12345)
def test_mgf_picklable(self):
with mgf.MGF(self.path, convert_arrays=0) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(data.mgf_spectra_lists[0], next(reader))
def test_map(self):
with mgf.IndexedMGF(self.path) as reader:
spectra = sorted(list(reader.map()), key=lambda s: s['params']['title'])
self.assertEqual(data.mgf_spectra_long, spectra)
def test_prebuild_index(self):
test_dir = tempfile.mkdtemp()
work_path = os.path.join(test_dir, self.path)
with open(work_path, 'w') as dest, open(self.path) as source:
dest.write(source.read())
assert dest.closed
with mgf.IndexedMGF(work_path) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, aux.OffsetIndex))
self.assertTrue(inst._source.closed)
with mgf.IndexedMGF(work_path) as inst:
inst._offset_index.pop('Spectrum 1')
inst.write_byte_offsets()
with mgf.IndexedMGF(work_path) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertTrue(offsets_exist)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, aux.OffsetIndex))
self.assertEqual(len(inst), 1)
self.assertTrue(inst._source.closed)
os.remove(inst._byte_offset_filename)
with mgf.IndexedMGF(work_path) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, aux.OffsetIndex))
self.assertTrue(inst._source.closed)
shutil.rmtree(test_dir, True)
def test_write_index_keys(self):
test_dir = tempfile.mkdtemp()
work_path = os.path.join(test_dir, self.path)
with open(work_path, 'wb') as dest, open(self.path, 'rb') as source:
dest.write(source.read())
assert dest.closed
mgf.IndexedMGF.prebuild_byte_offset_file(work_path)
with mgf.IndexedMGF(work_path) as inst:
ipath = inst._byte_offset_filename
with open(ipath) as ifp:
container = json.load(ifp, object_hook=OrderedDict)
tag_key = mgf.IndexedMGF._index_class._schema_version_tag_key
self.assertEqual(set(container.keys()), {tag_key, 'index'})
self.assertEqual(tuple(container[tag_key]), mgf.IndexedMGF._index_class.schema_version)
self.assertEqual(container['index'], [['Spectrum 1', [217, 343]], ['Spectrum 2', [343, 504]]])
class UtilityTest(unittest.TestCase):
def test_charge_repr_single(self):
self.assertEqual(mgf._charge_repr('charge', 2), 'CHARGE=2+')
self.assertEqual(mgf._charge_repr('charge', '2'), 'CHARGE=2+')
self.assertEqual(mgf._charge_repr('charge', [2]), 'CHARGE=2+')
self.assertEqual(mgf._charge_repr('charge', aux.Charge(2)), 'CHARGE=2+')
self.assertEqual(mgf._charge_repr('charge', aux.ChargeList([2])), 'CHARGE=2+')
self.assertEqual(mgf._charge_repr('charge', np.int64(2)), 'CHARGE=2+')
def test_charge_repr_multiple(self):
self.assertEqual(mgf._charge_repr('charge', [2, 3]), 'CHARGE=2+ and 3+')
self.assertEqual(mgf._charge_repr('charge', aux.ChargeList([2, 3])), 'CHARGE=2+ and 3+')
self.assertEqual(mgf._charge_repr('charge', '2+, 3+'), 'CHARGE=2+ and 3+')
self.assertEqual(mgf._charge_repr('charge', np.array([2, 3])), 'CHARGE=2+ and 3+')
def test_pepmass_parsing(self):
with mgf.MGF('test_pepmass.mgf') as f:
spectra = list(f)
self.assertEqual(len(spectra), 3)
self.assertEqual(spectra[0]['params'], spectra[1]['params'])
self.assertEqual(spectra[0]['params'], spectra[2]['params'])
def test_fragment_charge_parsing(self):
for s in ['1+', '1', '1.0', '+1.0']:
with self.subTest(s=s):
self.assertEqual(mgf.MGFBase.parse_peak_charge(s), 1)
for s in ['1-', '-1', '-1.0']:
with self.subTest(s=s):
self.assertEqual(mgf.MGFBase.parse_peak_charge(s), -1)
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_ms1.py | tests/test_ms1.py | import os
import numpy as np
import pyteomics
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
import unittest
import pickle
from pyteomics.ms1 import read, read_header, MS1, IndexedMS1, chain
import data
class MS1Test(unittest.TestCase):
maxDiff = None
def setUp(self):
self.path = 'test.ms1'
self.header = read_header(self.path)
with read(self.path) as reader:
self.spectra = list(reader)
self.ns = len(self.spectra)
def test_read(self):
# http://stackoverflow.com/q/14246983/1258041
r = read(self.path)
self.assertEqual(data.ms1_spectra, list(r))
r.close()
for reader in [read, MS1, IndexedMS1, chain]:
with reader(self.path) as reader:
self.assertEqual(data.ms1_spectra, list(reader))
def test_read_array_conversion(self):
with read(self.path, convert_arrays=False) as reader:
self.assertEqual(data.ms1_spectra_lists, list(reader))
with read(self.path, convert_arrays=True) as reader:
s = next(reader)
self.assertTrue(isinstance(s['m/z array'], np.ndarray))
def test_header(self):
self.assertEqual(self.header, data.ms1_header)
def test_read_dtype(self):
dtypes = {'m/z array': np.float32, 'intensity array': np.int32}
with read(self.path, dtype=dtypes) as f:
for spec in f:
for k, v in dtypes.items():
self.assertEqual(spec[k].dtype, v)
def test_indexedms1_picklable(self):
with IndexedMS1(self.path, block_size=12345, dtype=np.float32) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(reader.block_size, 12345)
self.assertEqual(reader._dtype_dict['m/z array'], np.float32)
self.assertEqual(data.ms1_spectra, list(reader))
with IndexedMS1(self.path, use_header=True) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(data.ms1_header, reader.header)
def test_ms1_picklable(self):
with MS1(self.path, convert_arrays=0) as reader:
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(reader._convert_arrays, 0)
self.assertEqual(data.ms1_spectra_lists, list(reader))
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_parser.py | tests/test_parser.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics import parser
from string import ascii_uppercase as uppercase
import random
class ParserTest(unittest.TestCase):
def setUp(self):
self.simple_sequences = [''.join(random.choice(uppercase) for i in range(
int(random.uniform(1, 20)))) for j in range(10)]
self.labels = ['A', 'B', 'C', 'N', 'X']
self.extlabels = self.labels[:]
self.potential = {'pot': ['X', 'A', 'B'], 'otherpot': ['A', 'C'],
'N-': ['N'], '-C': ['C']}
self.constant = {'const': ['B']}
self.extlabels.extend(('pot', 'otherpot', 'const', '-C', 'N-'))
def test_parse_simple(self):
for seq in self.simple_sequences:
self.assertEqual(seq, ''.join(parser.parse(seq, labels=uppercase)))
def test_parse(self):
self.assertEqual(
[('P',), ('E',), ('P',), ('T',), ('I',), ('D',), ('E',)],
parser.parse('PEPTIDE', split=True))
self.assertEqual(['P', 'E', 'P', 'T', 'I', 'D', 'E'],
parser.parse('H-PEPTIDE'))
for seq in ['PEPTIDE', 'H-PEPTIDE', 'PEPTIDE-OH', 'H-PEPTIDE-OH']:
self.assertEqual(['H-', 'P', 'E', 'P', 'T', 'I', 'D', 'E', '-OH'],
parser.parse(seq, show_unmodified_termini=True))
self.assertEqual(['T', 'E', 'pS', 'T', 'oxM'],
parser.parse('TEpSToxM', labels=parser.std_labels + ['pS', 'oxM']))
self.assertEqual(
[('H-', 'z', 'P'), ('E',), ('P',), ('z', 'T'), ('I',), ('D',), ('z', 'E', '-OH')],
parser.parse('zPEPzTIDzE', True, True, labels=parser.std_labels + ['z']))
def test_tostring(self):
for seq in self.simple_sequences:
self.assertEqual(seq, parser.tostring(parser.parse(seq, labels=uppercase)))
self.assertEqual(seq, parser.tostring(parser.parse(
seq, True, True, labels=uppercase), False))
def test_amino_acid_composition_simple(self):
for seq in self.simple_sequences:
comp = parser.amino_acid_composition(seq, labels=uppercase)
for aa in set(seq):
self.assertEqual(seq.count(aa), comp[aa])
def test_amino_acid_composition(self):
for seq in self.simple_sequences:
comp = parser.amino_acid_composition(seq, term_aa=True, labels=uppercase)
comp_default = parser.amino_acid_composition(seq, labels=uppercase)
self.assertEqual(1, comp['nterm' + seq[0]])
if len(seq) > 1:
self.assertEqual(1, comp['cterm' + seq[-1]])
self.assertEqual(sum(comp_default.values()), sum(comp.values()))
def test_cleave(self):
self.assertEqual(parser.xcleave('PEPTIDEKS', parser.expasy_rules['trypsin']), [(0, 'PEPTIDEK'), (8, 'S')])
self.assertEqual(parser.xcleave('PEPTIDEKS', 'trypsin'), [(0, 'PEPTIDEK'), (8, 'S')])
self.assertEqual(parser.xcleave('PEPTIDEKS', 'Trypsin'), [(0, 'PEPTIDEK'), (8, 'S')])
for seq in self.simple_sequences:
for elem in parser.cleave(
seq, 'trypsin', int(random.uniform(1, 10))):
self.assertIn(elem, seq)
self.assertTrue(any(elem == seq
for elem in parser.cleave(seq, parser.expasy_rules['trypsin'], len(seq))))
def test_cleave_semi(self):
self.assertEqual(parser.xcleave('PEPTIDEKS', 'trypsin', semi=True),
[(0, 'PEPTIDEK'), (0, 'P'), (0, 'PE'), (0, 'PEP'), (0, 'PEPT'), (0, 'PEPTI'), (0, 'PEPTID'), (0, 'PEPTIDE'),
(1, 'EPTIDEK'), (2, 'PTIDEK'), (3, 'TIDEK'), (4, 'IDEK'), (5, 'DEK'), (6, 'EK'), (7, 'K'), (8, 'S')])
self.assertEqual(parser.cleave('PEPTIDEKS', parser.expasy_rules['trypsin'], semi=True),
{'PEPTIDEK', 'P', 'PE', 'PEP', 'PEPT', 'PEPTI', 'PEPTID', 'PEPTIDE', 'EPTIDEK', 'PTIDEK', 'TIDEK', 'IDEK', 'DEK', 'EK', 'K', 'S'})
def test_cleave_min_length(self):
for seq in self.simple_sequences:
ml = random.uniform(1, 5)
for elem in parser.cleave(
seq, parser.expasy_rules['trypsin'], int(random.uniform(1, 10)), ml):
self.assertTrue(len(elem) >= ml)
def test_num_sites(self):
self.assertEqual(parser.num_sites('RKCDE', 'K'), 1)
self.assertEqual(parser.num_sites('RKCDE', 'E'), 0)
self.assertEqual(parser.num_sites('RKCDE', 'R'), 1)
self.assertEqual(parser.num_sites('RKCDE', 'Z'), 0)
def test_isoforms_simple(self):
self.assertEqual(
list(parser.isoforms('PEPTIDE', variable_mods={'xx': ['A', 'B', 'P', 'E']})),
['PEPTIDE', 'PEPTIDxxE', 'PExxPTIDE', 'PExxPTIDxxE', 'PxxEPTIDE', 'PxxEPTIDxxE', 'PxxExxPTIDE',
'PxxExxPTIDxxE', 'xxPEPTIDE', 'xxPEPTIDxxE', 'xxPExxPTIDE', 'xxPExxPTIDxxE', 'xxPxxEPTIDE',
'xxPxxEPTIDxxE', 'xxPxxExxPTIDE', 'xxPxxExxPTIDxxE'])
def test_isoforms_fixed_simple(self):
self.assertEqual(
list(parser.isoforms('PEPTIDE', fixed_mods={'n-': True, '-c': True, 'x': ['P', 'T']})),
['n-xPExPxTIDE-c'])
def test_isoforms_simple_2(self):
self.assertEqual(list(parser.isoforms('PEPTIDE', variable_mods={'x': 'T', 'y': 'T'})),
['PEPTIDE', 'PEPxTIDE', 'PEPyTIDE'])
def test_isoforms_universal(self):
self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'xx-': True})), {'PEPTIDE', 'xx-PEPTIDE'})
self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'-xx': True})), {'PEPTIDE', 'PEPTIDE-xx'})
for seq in self.simple_sequences:
self.assertEqual(sum(1 for _ in parser.isoforms(seq, variable_mods={'x': True})), 2**len(seq))
def test_isoforms_terminal(self):
self.assertEqual(set(parser.isoforms('PEPTIDE', variable_mods={'xx': ['ntermP'], 'yy-': 'P'})),
{'PEPTIDE', 'xxPEPTIDE', 'yy-PEPTIDE', 'yy-xxPEPTIDE'})
def test_isoforms_len(self):
for j in range(50):
L = random.randint(1, 10)
peptide = ''.join(random.choice(self.labels) for _ in range(L))
modseqs = list(parser.isoforms(peptide, variable_mods=self.potential,
fixed_mods=self.constant, labels=self.labels))
pp = parser.parse(peptide, labels=self.extlabels)
N = (pp[0] == 'N') + (pp[-1] == 'C')
for p in modseqs:
self.assertEqual(len(pp), parser.length(p, labels=self.extlabels))
self.assertEqual(len(modseqs), (3 ** pp.count('A')) * (2 ** (pp.count('X') + pp.count('C') + N)))
def test_isoforms_maxmods(self):
for j in range(50):
L = random.randint(1, 10)
M = random.randint(1, 10)
peptide = ''.join([random.choice(self.labels) for _ in range(L)])
modseqs = parser.isoforms(peptide, variable_mods=self.potential,
labels=self.labels, max_mods=M, format='split')
pp = parser.parse(peptide, labels=self.extlabels, split=True)
for ms in modseqs:
self.assertEqual(len(pp), len(ms))
self.assertLessEqual(sum(i != j for i, j in zip(pp, ms)), M)
def test_fast_valid(self):
for j in range(50):
L = random.randint(1, 10)
peptide = ''.join([random.choice(self.labels) for _ in range(L)])
self.assertTrue(parser.fast_valid(peptide, labels=self.labels))
self.assertTrue(parser.valid(peptide, labels=self.labels))
self.assertTrue(parser.valid(peptide))
for aa in set(peptide):
bad = peptide.replace(aa, 'Z')
self.assertFalse(parser.fast_valid(bad, labels=self.labels))
self.assertFalse(parser.valid(bad, labels=self.labels))
def test_valid(self):
for j in range(50):
L = random.randint(1, 10)
peptide = ''.join([random.choice(self.labels) for _ in range(L)])
modseqs = parser.isoforms(peptide, variable_mods=self.potential,
fixed_mods=self.constant, labels=self.labels)
self.assertFalse(parser.valid('H-' + peptide, labels=self.labels))
for s in modseqs:
self.assertTrue(parser.valid(s, labels=self.extlabels))
for aa in set(peptide):
bad = s.replace(aa, 'Z')
self.assertFalse(parser.fast_valid(bad, labels=self.labels))
self.assertFalse(parser.valid(bad, labels=self.labels))
if __name__ == '__main__':
import doctest
doctest.testmod(parser)
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_tandem.py | tests/test_tandem.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics import tandem
from data import tandem_spectra
class TandemTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.path = 'test.t.xml'
def testReadPSM(self):
for func in [tandem.TandemXML, tandem.read, tandem.chain,
lambda x, **kw: tandem.chain.from_iterable([x], **kw),
lambda x, **kw: tandem.filter(x, fdr=1, full_output=False),
lambda x, **kw: tandem.filter.chain(x, fdr=1, full_output=False),
lambda x, **kw: tandem.filter.chain.from_iterable([x], fdr=1, full_output=False)]:
for it in range(2):
with func(self.path, iterative=it) as r:
self.assertEqual(list(r), tandem_spectra)
def test_df(self):
df = tandem.DataFrame(self.path)
self.assertEqual(df.shape, (1, 29))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_traml.py | tests/test_traml.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from itertools import product
import operator as op
from data import transitions
from pyteomics.traml import TraML, read, chain
from psims.controlled_vocabulary.controlled_vocabulary import obo_cache
obo_cache.cache_path = '.'
obo_cache.enabled = True
class TraMLTest(unittest.TestCase):
maxDiff = None
path = 'ToyExample1.TraML'
def test_read(self):
for rs, it, ui, rr in product([True, False], repeat=4):
for func in [TraML, read, chain, lambda x, **kw: chain.from_iterable([x], **kw)]:
with func(self.path, read_schema=rs, iterative=it, use_index=ui, retrieve_refs=rr) as r:
self.assertEqual(transitions[rr], list(r))
def test_map(self):
with TraML(self.path) as r:
self.assertEqual(
sorted(transitions[1], key=op.itemgetter('id')),
sorted(r.map(), key=op.itemgetter('id')))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mzid.py | tests/test_mzid.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics.mzid import MzIdentML, read, chain
from pyteomics import auxiliary as aux
from data import mzid_spectra
from itertools import product
from psims.controlled_vocabulary.controlled_vocabulary import obo_cache
obo_cache.cache_path = '.'
obo_cache.enabled = True
class MzidTest(unittest.TestCase):
maxDiff = None
path = 'test.mzid'
def test_read(self):
for rec, refs, rs, it, ui in product((True, False), repeat=5):
for func in [MzIdentML, read, chain, lambda x, **kw: chain.from_iterable([x], **kw)]:
with self.subTest(rec=rec, refs=refs, rs=rs, it=it, ui=ui, func=func):
with func(self.path, recursive=rec, retrieve_refs=refs, read_schema=rs, iterative=it, use_index=ui) as reader:
psms = list(reader)
self.assertEqual(psms, mzid_spectra[(rec, refs)])
def test_unit_info(self):
with MzIdentML(self.path) as handle:
for protocol in handle.iterfind("SpectrumIdentificationProtocol"):
fragment_tolerance = protocol['FragmentTolerance']
self.assertEqual(fragment_tolerance['search tolerance minus value'].unit_info, 'dalton')
parent_tolerance = protocol['ParentTolerance']
self.assertEqual(parent_tolerance['search tolerance plus value'].unit_info, 'parts per million')
def test_structure_normalization(self):
gen = read('mzid_snippet.xml').iterfind("SpectraData")
datum = next(gen)
index = aux.cvquery(datum)
assert index['MS:1000768'] == 'Thermo nativeID format'
datum = next(gen)
index = aux.cvquery(datum)
assert index['MS:1000774'] == 'multiple peak list nativeID format'
def test_map(self):
with MzIdentML(self.path) as r:
self.assertEqual(len(mzid_spectra[(1, 1)]), sum(1 for _ in r.map()))
def test_iterfind_map(self):
with MzIdentML(self.path) as r:
self.assertEqual(
len(mzid_spectra[(1, 1)]),
sum(1 for _ in r.iterfind("SpectrumIdentificationResult").map()))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mzml.py | tests/test_mzml.py | import os
import shutil
import tempfile
import pyteomics
from io import BytesIO
from lxml import etree
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
from itertools import product
import unittest
from pyteomics.mzml import MzML, PreIndexedMzML, read, chain
from pyteomics import auxiliary as aux, xml
from data import mzml_spectra
import numpy as np
import pickle
import operator as op
import pynumpress
import base64
import zlib
from psims.controlled_vocabulary.controlled_vocabulary import obo_cache
obo_cache.cache_path = '.'
obo_cache.enabled = True
class MzmlTest(unittest.TestCase):
maxDiff = None
path = 'test.mzML'
def test_read(self):
for rs, it, ui in product([True, False], repeat=3):
if rs:
continue # temporarily disable retrieval of schema
for func in [MzML, read, chain, lambda x, **kw: chain.from_iterable([x], **kw), PreIndexedMzML]:
with func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:
# http://stackoverflow.com/q/14246983/1258041
self.assertEqual(mzml_spectra, list(r))
def test_mp_read(self):
key = op.itemgetter('index')
with MzML(self.path) as f:
self.assertEqual(sorted(mzml_spectra, key=key), sorted(list(f.map()), key=key))
def test_mp_requires_index(self):
with MzML(self.path, use_index=False) as r:
self.assertRaises(aux.PyteomicsError, r.map)
def test_map_qsize(self):
key = op.itemgetter('index')
with MzML(self.path, queue_size=1000) as f:
self.assertEqual(f._queue_size, 1000)
self.assertEqual(sorted(mzml_spectra, key=key), sorted(list(f.map()), key=key))
def test_decoding(self):
with MzML(self.path, decode_binary=True) as reader:
spectrum = next(reader)
self.assertIsNotNone(spectrum['m/z array'])
validation = spectrum['m/z array']
with MzML(self.path) as reader:
spectrum = next(reader)
self.assertIsNotNone(spectrum['m/z array'])
self.assertTrue(np.allclose(spectrum['m/z array'], validation))
with MzML(self.path, decode_binary=False) as reader:
spectrum = next(reader)
record = spectrum['m/z array']
self.assertEqual(record.compression, "no compression")
self.assertEqual(record.dtype, np.float64)
array = record.decode()
self.assertTrue(np.allclose(validation, array))
record = spectrum['intensity array']
self.assertEqual(record.dtype, np.float32)
self.assertEqual(record.compression, "no compression")
spectrum = next(reader)
record = spectrum['intensity array']
self.assertEqual(record.compression, "zlib compression")
self.assertEqual(mzml_spectra[1]['intensity array'], record.decode())
def test_read_dtype(self):
dtypes = {'m/z array': np.float32, 'intensity array': np.int32}
with read(self.path, dtype=dtypes) as f:
for spec in f:
for k, v in dtypes.items():
self.assertEqual(spec[k].dtype, v)
def test_has_built_index(self):
with read(self.path, use_index=True) as f:
self.assertGreater(len(f._offset_index), 0)
with read(self.path, use_index=False) as f:
self.assertEqual(f._offset_index, None)
def test_prebuild_index(self):
test_dir = tempfile.mkdtemp()
work_path = os.path.join(test_dir, self.path)
with open(work_path, 'w') as dest, open(self.path) as source:
dest.write(source.read())
assert dest.closed
with MzML(work_path, use_index=False) as inst:
self.assertRaises(IOError, inst._read_byte_offsets)
with open(inst._byte_offset_filename, 'wt') as fh:
fh.write("{}")
self.assertRaises(TypeError, inst._read_byte_offsets)
os.remove(inst._byte_offset_filename)
with MzML(work_path, use_index=True) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))
self.assertTrue(inst._source.closed)
MzML.prebuild_byte_offset_file(work_path)
with open(inst._byte_offset_filename, 'rt') as fh:
index = MzML._index_class.load(fh)
assert inst._offset_index['spectrum'] == index['spectrum']
with MzML(work_path, use_index=True) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertTrue(offsets_exist)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))
self.assertTrue(inst._source.closed)
os.remove(inst._byte_offset_filename)
with MzML(work_path, use_index=True) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))
self.assertTrue(inst._source.closed)
shutil.rmtree(test_dir, True)
def test_unit_extract(self):
with MzML(self.path) as handle:
for scan in handle:
scan_inst = scan['scanList']['scan'][0]
scan_time = scan_inst['scan start time']
scan_window_lower_limit = scan_inst['scanWindowList']['scanWindow'][0]['scan window lower limit']
self.assertEqual(scan_time.unit_info, 'minute')
self.assertEqual(scan_window_lower_limit.unit_info, 'm/z')
def test_cv_query(self):
with MzML(self.path) as handle:
scan = next(handle)
index = aux.cvquery(scan)
self.assertEqual(index['MS:1000511'], 1)
self.assertEqual(aux.cvquery(scan, "MS:1000511"), 1)
# test deep traversal
self.assertEqual(index['MS:1000016'], 0.004935)
self.assertEqual(aux.cvquery(scan, 'MS:1000016'), 0.004935)
def test_retrieve_refs(self):
with MzML(self.path) as reader:
derefed = list(reader.iterfind("instrumentConfiguration", retrieve_refs=True))
reader.reset()
raw = list(reader.iterfind("instrumentConfiguration", retrieve_refs=False))
self.assertEqual(raw[0].get("softwareRef"),
{'ref': 'Xcalibur'})
self.assertNotIn("ref", derefed[0]['softwareRef'])
self.assertEqual(derefed[0].get('softwareRef'), {
'version': '1.1 Beta 7', 'Xcalibur': ''})
def test_in_memory_buffer(self):
with open(self.path, 'rb') as fh:
data_buffer = BytesIO(fh.read())
with MzML(data_buffer) as reader:
spectrum = next(reader)
self.assertEqual(spectrum['id'], 'controllerType=0 controllerNumber=1 scan=1')
data_buffer.seek(0)
with MzML(data_buffer, use_index=True) as reader:
spectrum = next(reader)
self.assertEqual(spectrum['id'], 'controllerType=0 controllerNumber=1 scan=1')
def test_picklable(self):
with MzML(self.path) as reader:
expected_data = next(reader)
spec = pickle.dumps(reader)
with pickle.loads(spec) as reader:
self.assertEqual(next(reader)['id'], expected_data['id'])
def test_indexing(self):
with MzML(self.path) as reader:
self.assertEqual(mzml_spectra[0], reader[0])
self.assertEqual(mzml_spectra[0], reader['controllerType=0 controllerNumber=1 scan=1'])
self.assertEqual(mzml_spectra, reader[0:2])
self.assertEqual(mzml_spectra,
[reader['controllerType=0 controllerNumber=1 scan=1'],
reader['controllerType=0 controllerNumber=1 scan=2']])
self.assertEqual(mzml_spectra, reader[[0, 1]])
self.assertEqual(mzml_spectra, reader[
['controllerType=0 controllerNumber=1 scan=1', 'controllerType=0 controllerNumber=1 scan=2']])
self.assertEqual(mzml_spectra, reader[
'controllerType=0 controllerNumber=1 scan=2':'controllerType=0 controllerNumber=1 scan=1'])
def test_time_locator(self):
with MzML(self.path) as reader:
self.assertEqual(mzml_spectra[0], reader.time[0])
self.assertEqual(mzml_spectra[1], reader.time[0.1])
self.assertEqual(mzml_spectra, reader.time[0:0.1])
def test_numpress_slof(self):
data = mzml_spectra[0]['intensity array']
encoded = base64.b64encode(pynumpress.encode_slof(data, pynumpress.optimal_slof_fixed_point(data)).tobytes()).decode('ascii')
record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress short logged float compression', data.dtype)
self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))
def test_numpress_slof_zlib(self):
data = mzml_spectra[0]['intensity array']
encoded = base64.b64encode(zlib.compress(pynumpress.encode_slof(data, pynumpress.optimal_slof_fixed_point(data)).tobytes())).decode('ascii')
record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress short logged float compression followed by zlib compression', data.dtype)
self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))
def test_numpress_linear(self):
data = mzml_spectra[0]['intensity array']
encoded = base64.b64encode(pynumpress.encode_linear(data, pynumpress.optimal_linear_fixed_point(data)).tobytes()).decode('ascii')
record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress linear prediction compression', data.dtype)
self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))
def test_numpress_linear_zlib(self):
data = mzml_spectra[0]['intensity array']
encoded = base64.b64encode(zlib.compress(pynumpress.encode_linear(data, pynumpress.optimal_linear_fixed_point(data)).tobytes())).decode('ascii')
record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress linear prediction compression followed by zlib compression', data.dtype)
self.assertTrue(np.allclose(data, record.decode(), rtol=0.001))
def test_numpress_pic(self):
data = mzml_spectra[0]['intensity array']
encoded = base64.b64encode(pynumpress.encode_pic(data).tobytes()).decode('ascii')
record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress positive integer compression', data.dtype)
self.assertTrue(np.allclose(data, record.decode(), atol=0.6))
def test_numpress_pic_zlib(self):
data = mzml_spectra[0]['intensity array']
encoded = base64.b64encode(zlib.compress(pynumpress.encode_pic(data).tobytes())).decode('ascii')
record = aux.BinaryDataArrayTransformer()._make_record(encoded, 'MS-Numpress positive integer compression followed by zlib compression', data.dtype)
self.assertTrue(np.allclose(data, record.decode(), atol=0.6))
def test_userparam_units(self):
xml_str = '<userParam name="some quantity" value="42" unitName="cats"/>'
parser = etree.XMLParser()
parser.feed(xml_str)
element = parser.close()
with MzML(self.path) as reader:
param = reader._handle_param(element)
self.assertEqual(param.value.unit_info, 'cats')
def test_cvparam_unitname_lookup(self):
# uniName omitted
xml_str = '<cvParam cvRef="MS" accession="MS:1000504" name="base peak m/z" value="810.415283203125" unitCvRef="MS" unitAccession="MS:1000040"/>'
parser = etree.XMLParser()
parser.feed(xml_str)
element = parser.close()
with MzML(self.path) as reader:
param = reader._handle_param(element)
self.assertEqual(param.value.unit_info, 'm/z')
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mass.py | tests/test_mass.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
import random
import pickle
from pyteomics import mass, auxiliary, parser
import gzip
class MassTest(unittest.TestCase):
def setUp(self):
self.mass_data = {
'A' : {0: (1.0, 1.0),
1: (1.0, 0.5),
2: (2.0, 0.5)},
'B' : {0: (2.0, 1.0),
2: (2.0, 0.5),
3: (3.0, 0.5)},
'C' : {0: (3.0, 1.0),
3: (3.0, 0.5),
4: (4.0, 0.5)},
'D' : {0: (4.0, 1.0),
4: (4.0, 0.5),
5: (5.0, 0.5)},
'E' : {0: (5.0, 1.0),
5: (5.0, 0.5),
6: (6.0, 0.5)},
'F' : {0: (6.0, 1.0),
6: (6.0, 0.7),
7: (7.0, 0.3)},
'H+': {0: (5.0, 1.0),
5: (5.0, 1.0)},
}
self.mass_H = mass.nist_mass['H'][0][0]
self.mass_O = mass.nist_mass['O'][0][0]
self.test_aa_mass = {'X': 1.0, 'Y': 2.0, 'Z': 3.0}
self.random_peptides = [
''.join([random.choice('XYZ') for i in range(20)])
for i in range(10)]
self.aa_comp = {
'X': mass.Composition({'A': 1}),
'Y': mass.Composition({'B': 1}),
'Z': mass.Composition({'C': 1}),
'F': mass.Composition({'F': 1}),
'H-': mass.Composition({'D': 1}),
'-OH': mass.Composition({'E': 1}),
}
self.ion_comp = {
'M': mass.Composition({}),
'a': mass.Composition({'A': -1})
}
self.mods = {'a': mass.Composition(A=1), 'b': mass.Composition(B=1)}
self.d = {atom: 1 for atom in 'ABCDE'}
def test_fast_mass(self):
for pep in self.random_peptides:
self.assertAlmostEqual(
mass.fast_mass(pep, aa_mass=self.test_aa_mass),
sum(pep.count(aa) * m for aa, m in self.test_aa_mass.items()) + self.mass_H * 2.0 + self.mass_O)
def test_fast_mass2_no_term(self):
for pep in self.random_peptides:
self.assertAlmostEqual(
mass.fast_mass2(pep, aa_mass=self.test_aa_mass),
sum(pep.count(aa) * m for aa, m in self.test_aa_mass.items()) + self.mass_H * 2.0 + self.mass_O)
def test_fast_mass2_sanity(self):
self.assertAlmostEqual(mass.fast_mass2('PEPTIDE'), mass.fast_mass('PEPTIDE'))
self.assertAlmostEqual(mass.fast_mass2('PEPTIDE'), 799.359964)
def test_fast_mass2_term(self):
for pep in self.random_peptides:
nterm = 'AB2C3-'
cterm = '-DE2F3'
self.assertAlmostEqual(
mass.fast_mass2(nterm + pep + cterm, aa_mass=self.test_aa_mass, mass_data=self.mass_data),
sum(pep.count(aa) * m for aa, m in self.test_aa_mass.items()) + (
self.mass_data['A'][0][0] + self.mass_data['B'][0][0] * 2 + self.mass_data['C'][0][0] * 3 +
self.mass_data['D'][0][0] + self.mass_data['E'][0][0] * 2 + self.mass_data['F'][0][0] * 3))
def test_fast_mass2_term_label(self):
mass_data = dict(self.mass_data)
mass_data['H'] = {0: (self.mass_H, 1.0)}
mass_data['O'] = {0: (self.mass_O, 1.0)}
aa_mass = self.test_aa_mass.copy()
aa_mass.update({k: mass.calculate_mass(composition=v, mass_data=mass_data) for k, v in self.mods.items()})
for pep in self.random_peptides:
for mlabel, mcomp in self.mods.items():
mpep = mlabel + '-' + pep + '-' + mlabel
self.assertRaises(auxiliary.PyteomicsError,
mass.fast_mass2, mpep, mass_data=mass_data, aa_mass=aa_mass)
def test_composition_term(self):
aa_comp = self.aa_comp.copy()
aa_comp.update(self.mods)
for pep in self.random_peptides:
for mlabel, mcomp in self.mods.items():
mpep = mlabel + '-' + pep + '-' + mlabel
self.assertRaises(auxiliary.PyteomicsError, mass.Composition, sequence=mpep, aa_comp=aa_comp)
def test_composition_term_sseq(self):
aa_comp = self.aa_comp.copy()
aa_comp.update(self.mods)
for pep in self.random_peptides:
for mlabel, mcomp in self.mods.items():
split_sequence = parser.parse(pep, split=True)
self.assertRaises(auxiliary.PyteomicsError, mass.Composition, split_sequence=[
(mlabel + '-',) + split_sequence[0]] + split_sequence[1:-1] + [split_sequence[-1] + ('-' + mlabel,)], aa_comp=aa_comp)
def test_Composition_dict(self):
# Test Composition from a dict.
self.assertEqual(mass.Composition(self.d, mass_data=self.mass_data), self.d)
def test_Composition_formula(self):
# Test Composition from a formula.
self.assertEqual(self.d, mass.Composition(formula='ABCDE', mass_data={atom: {0: (1.0, 1.0)} for atom in 'ABCDE'}))
def test_Composition_seq(self):
# Test Composition from a sequence.
self.assertEqual(self.d, mass.Composition(sequence='XYZ', aa_comp=self.aa_comp))
def test_Composition_pseq(self):
# Test Composition from a parsed sequence.
self.assertEqual(
mass.Composition(parsed_sequence=['X', 'Y', 'Z'], aa_comp=self.aa_comp),
{atom: 1 for atom in 'ABC'})
def test_Composition_sseq(self):
# Test Composition from a split sequence.
self.assertEqual(
mass.Composition(split_sequence=[('X',), ('Y',), ('Z',)], aa_comp=self.aa_comp),
{atom: 1 for atom in 'ABC'})
def test_Composition_term_formula(self):
self.assertEqual(mass.Composition(sequence='A2B-XYZ-DE2F3', aa_comp=self.aa_comp),
{'A': 3, 'B': 2, 'C': 1, 'D': 1, 'E': 2, 'F': 3})
def test_Composition_nterm_formula(self):
self.assertEqual(mass.Composition(sequence='AB-XYZ', aa_comp=self.aa_comp),
{'A': 2, 'B': 2, 'C': 1, 'E': 1})
def test_Composition_cterm_formula(self):
self.assertEqual(mass.Composition(sequence='XYZ-AB', aa_comp=self.aa_comp),
{'A': 2, 'B': 2, 'C': 1, 'D': 1})
def test_Composition_sum(self):
# Test sum of Composition objects.
self.assertEqual(
mass.Composition(sequence='XXY', aa_comp=self.aa_comp) + mass.Composition(sequence='YZZ', aa_comp=self.aa_comp),
{atom: 2 for atom in 'ABCDE'})
def test_Composition_sub(self):
# Test subtraction of Composition objects
self.assertEqual({} - mass.Composition(sequence='XYZ', aa_comp=self.aa_comp),
{atom: -1 for atom in 'ABCDE'})
def test_Composition_mul(self):
# Test multiplication of Composition by integers
self.assertEqual(
2 * mass.Composition(sequence='XYZ', aa_comp=self.aa_comp),
{atom: 2 for atom in 'ABCDE'})
self.assertEqual(
mass.Composition(sequence='XYZ', aa_comp=self.aa_comp) * 2,
{atom: 2 for atom in 'ABCDE'})
def test_Composition_positional(self):
# Test creation from positional args
ac = self.aa_comp.copy()
ac.update(self.mods)
self.assertEqual(mass.Composition('aXbYZ', aa_comp=ac), {'A': 2, 'B': 2, 'C': 1, 'D': 1, 'E': 1})
self.assertEqual(mass.Composition('AB2C3', mass_data=self.mass_data), {'A': 1, 'B': 2, 'C': 3})
def test_calculate_mass(self):
# Calculate mass by a formula.
self.assertEqual(
mass.calculate_mass(formula='ABCDE', mass_data=self.mass_data),
sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))
# Calculate mass by a sequence.
self.assertEqual(
mass.calculate_mass(sequence='XYZ',
aa_comp=self.aa_comp,
mass_data=self.mass_data),
sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))
# Calculate mass by a parsed sequence.
self.assertEqual(
mass.calculate_mass(parsed_sequence=['H-', 'X', 'Y', 'Z', '-OH'], aa_comp=self.aa_comp, mass_data=self.mass_data),
sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))
# Calculate mass by composition
self.assertEqual(
mass.calculate_mass(composition={'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1}, mass_data=self.mass_data),
sum(self.mass_data[atom][0][0] for atom in 'ABCDE'))
# Calculate average mass by a formula.
self.assertEqual(
mass.calculate_mass(formula='ABCDE', average=True, mass_data=self.mass_data),
sum(self.mass_data[atom][isotope][0] * self.mass_data[atom][isotope][1]
for atom in 'ABCDE'for isotope in self.mass_data[atom] if isotope != 0))
# Calculate m/z of an ion.
for charge in [1, 2, 3]:
self.assertEqual(
mass.calculate_mass(formula='ABCDE', ion_type='M', charge=charge, mass_data=self.mass_data),
mass.calculate_mass(formula='ABCDE' + 'H+%d' % (charge,), mass_data=self.mass_data))
self.assertEqual(
mass.calculate_mass(formula='ABCDE', ion_type='M', charge=charge * 2, charge_carrier='AB+2', mass_data=self.mass_data),
(mass.calculate_mass(formula='ABCDE', mass_data=self.mass_data) + charge * (
self.mass_data['A'][0][0] + self.mass_data['B'][0][0])) / charge / 2)
self.assertEqual(
mass.calculate_mass(formula='ABCDE', ion_type='M', charge=charge * 2, charge_carrier={'A': 1, 'B': 1},
carrier_charge=2, mass_data=self.mass_data),
(mass.calculate_mass(formula='ABCDE', mass_data=self.mass_data) + charge * (
self.mass_data['A'][0][0] + self.mass_data['B'][0][0])) / charge / 2)
self.assertEqual(
mass.calculate_mass(formula='ABCDE', ion_type='M', charge=charge, mass_data=self.mass_data),
(mass.calculate_mass(formula='ABCDE', mass_data=self.mass_data) + self.mass_data['H+'][0][0] * charge) / charge)
self.assertAlmostEqual(
mass.calculate_mass(composition={'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1}, charge=charge, charge_carrier='BC+', mass_data=self.mass_data),
mass.calculate_mass(composition={'A': 1, 'B': 1 + charge, 'C': 1 + charge, 'D': 1, 'E': 1}, mass_data=self.mass_data) / charge)
self.assertRaises(auxiliary.PyteomicsError, mass.calculate_mass, **{'formula': 'ABCDEH+%d' % charge,
'ion_type': 'M', 'charge': charge, 'mass_data': self.mass_data})
self.assertRaises(auxiliary.PyteomicsError, mass.calculate_mass, **{'formula': 'ABCDE',
'ion_type': 'M', 'charge': 3, 'carrier_charge': 2, 'mass_data': self.mass_data})
# Sanity check.
for pep in self.random_peptides:
self.assertEqual(
mass.calculate_mass(sequence=pep, aa_comp=self.aa_comp, mass_data=self.mass_data, ion_comp=self.ion_comp),
mass.calculate_mass(parsed_sequence=parser.parse(pep, labels=['X', 'Y', 'Z'], show_unmodified_termini=True),
aa_comp=self.aa_comp, mass_data=self.mass_data, ion_comp=self.ion_comp))
def test_calculate_proforma_mass(self):
seq_modX = 'PEPTIcamCIDE'
aa_comp = mass.std_aa_comp.copy()
aa_comp['cam'] = mass.Composition(formula='H3C2NO')
seq_proforma = 'PEPTIC[+57.021464]IDE'
for charge in [None, 0, 1, 2]:
self.assertAlmostEqual(
mass.calculate_mass(sequence=seq_modX, charge=charge, aa_comp=aa_comp),
mass.calculate_mass(proforma=seq_proforma, charge=charge),
places=6)
def test_most_probable_isotopic_composition(self):
self.assertEqual(
mass.most_probable_isotopic_composition(formula='F', mass_data=self.mass_data),
(mass.Composition({'F[6]': 1, 'F[7]': 0}, mass_data=self.mass_data), 0.7))
self.assertEqual(
mass.most_probable_isotopic_composition(formula='F10', mass_data=self.mass_data),
(mass.Composition({'F[6]': 7, 'F[7]': 3}, mass_data=self.mass_data), (0.3)**3 * (0.7)**7 * 120))
self.assertEqual(
mass.most_probable_isotopic_composition(formula='A20F10', elements_with_isotopes=['F'], mass_data=self.mass_data),
(mass.Composition({'A': 20, 'F[6]': 7, 'F[7]': 3}, mass_data=self.mass_data), (0.3)**3 * (0.7)**7 * 120))
def test_isotopic_composition_abundance(self):
for peplen in range(1, 10):
self.assertAlmostEqual(
mass.isotopic_composition_abundance(formula='F[6]' * peplen, mass_data=self.mass_data),
self.mass_data['F'][6][1] ** peplen)
self.assertAlmostEqual(
mass.isotopic_composition_abundance(formula='AF[6]' * peplen, mass_data=self.mass_data),
self.mass_data['F'][6][1] ** peplen)
self.assertAlmostEqual(
mass.isotopic_composition_abundance(formula='A[1]F[6]' * peplen, mass_data=self.mass_data),
(self.mass_data['A'][1][1] * self.mass_data['F'][6][1]) ** peplen)
def test_Unimod_mass(self):
db = mass.Unimod(gzip.open('unimod.xml.gz'))
for x in db.mods:
self.assertGreater(0.00001,
abs(x['mono_mass'] - mass.calculate_mass(x['composition'], mass_data=db.mass_data)))
def test_Unimod_methods(self):
db = mass.Unimod(gzip.open('unimod.xml.gz'))
rec_id = 1
rec_name = 'Acetylation'
rec_title = 'Acetyl'
record = db.by_id(rec_id)
self.assertEqual(record['title'], rec_title)
self.assertEqual(record['full_name'], rec_name)
self.assertEqual(record, db[rec_id])
self.assertEqual(record, db.by_title(rec_title))
self.assertEqual(record, db.by_name(rec_name))
def test_nist_mass(self):
self.assertTrue(all(abs(g[0][1] - 1) < 1e-6 for g in mass.nist_mass.values()))
for g in mass.nist_mass.values():
s = sum(p[1] for num, p in g.items() if num)
self.assertTrue(abs(s - 1) < 1e-6 or abs(s) < 1e-6)
def test_composition_objects_are_pickleable(self):
dict_ = mass.Composition(self.d, mass_data=self.mass_data)
formula = mass.Composition(formula='ABCDE',
mass_data={atom: {0: (1.0, 1.0)} for atom in 'ABCDE'})
sequence = mass.Composition(sequence='XYZ', aa_comp=self.aa_comp)
parsed_sequence = mass.Composition(parsed_sequence=['X', 'Y', 'Z'],
aa_comp=self.aa_comp)
split_sequence = mass.Composition(split_sequence=[('X',), ('Y',), ('Z',)],
aa_comp=self.aa_comp)
self.assertEqual(dict_, pickle.loads(pickle.dumps(dict_)))
self.assertEqual(formula, pickle.loads(pickle.dumps(formula)))
self.assertEqual(sequence, pickle.loads(pickle.dumps(sequence)))
self.assertEqual(parsed_sequence, pickle.loads(pickle.dumps(parsed_sequence)))
self.assertEqual(split_sequence, pickle.loads(pickle.dumps(split_sequence)))
def test_aa_mass(self):
h2o = mass.calculate_mass(formula='H2O')
for aa, m in mass.std_aa_mass.items():
self.assertEqual(m + h2o, mass.fast_mass(aa))
def test_isotopologues(self):
peptide = 'XYF'
states = [{'F[6]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}, {'F[7]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}]
abundances = [0.7, 0.3]
kw_common = dict(elements_with_isotopes='F', aa_comp=self.aa_comp, mass_data=self.mass_data)
kwlist = [
{},
{'sequence': 'XYF'},
{'parsed_sequence': parser.parse('XYF', show_unmodified_termini=True)},
{'split_sequence': parser.parse('XYF', show_unmodified_termini=True, split=True)},
{'formula': 'ABDEF'},
{'composition': mass.Composition(sequence='XYF', aa_comp=self.aa_comp)}]
arglist = [(peptide,), (), (), (), (), ()]
for args, kw in zip(arglist, kwlist):
kwargs = kw_common.copy()
kwargs.update(kw)
isotopologues = mass.isotopologues(*args, **kwargs)
for state in isotopologues:
i = states.index(state)
self.assertNotEqual(i, -1)
self.assertAlmostEqual(abundances[i], mass.isotopic_composition_abundance(state,
aa_comp=self.aa_comp, mass_data=self.mass_data))
def test_isotopologues_with_abundances(self):
peptide = 'XYF'
states = [{'F[6]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}, {'F[7]': 1, 'A': 1, 'B': 1, 'D': 1, 'E': 1}]
abundances = [0.7, 0.3]
for state, abundance in mass.isotopologues(peptide, elements_with_isotopes='F',
aa_comp=self.aa_comp, mass_data=self.mass_data, report_abundance=True):
i = states.index(state)
self.assertNotEqual(i, -1)
self.assertAlmostEqual(abundances[i], abundance)
def test_std_aa_mass(self):
for key, value in mass.std_aa_mass.items():
self.assertAlmostEqual(value, mass.calculate_mass(parsed_sequence=[key]), places=4)
def test_fragment_series_modx_vs_proforma(self):
"""Test that fragment_series produces identical results for modX and ProForma formats."""
# Define sequences with phosphorylation on threonine
# modX format: phosphorylated threonine
seq_modx = 'PEPpTIDE'
# ProForma format: threonine with +79.966331 (phosphorylation)
seq_proforma = 'PEPT[+79.966331]IDE'
# Define amino acid compositions including phosphorylated threonine
aa_comp_modx = mass.std_aa_comp.copy()
aa_comp_modx['pT'] = mass.Composition(formula='H8C4O5NP') # T + phosphate group
# Test parameters
ion_types = ('b', 'y')
maxcharge = 2
# Generate fragments for modX sequence
fragments_modx = mass.fragment_series(
seq_modx,
ion_types=ion_types,
maxcharge=maxcharge,
aa_mass={k: mass.calculate_mass(composition=v) for k, v in aa_comp_modx.items()}
)
# Generate fragments for ProForma sequence
fragments_proforma = mass.fragment_series(
seq_proforma,
ion_types=ion_types,
maxcharge=maxcharge
)
# Verify that both formats produce the same ion types
self.assertEqual(set(fragments_modx.keys()), set(fragments_proforma.keys()),
"Ion types should be identical for modX and ProForma")
# Verify that m/z values are identical (within tolerance)
for ion_type in ion_types:
modx_fragments = fragments_modx[ion_type]
proforma_fragments = fragments_proforma[ion_type]
self.assertEqual(len(modx_fragments), len(proforma_fragments),
f"Number of {ion_type} ions should be identical")
# Check that fragment names match
self.assertEqual(set(modx_fragments), set(proforma_fragments),
f"{ion_type} ion names should be identical")
# Check m/z values are close (allowing for small numerical differences)
for fragment_name in modx_fragments:
self.assertAlmostEqual(modx_fragments[fragment_name], proforma_fragments[fragment_name], places=5,
msg=f"{ion_type} ion {fragment_name} m/z values should be identical")
# Additional checks to ensure we got reasonable results
self.assertGreater(len(fragments_modx['b']), 0, "Should generate b ions")
self.assertGreater(len(fragments_modx['y']), 0, "Should generate y ions")
# Check that we have the expected number of fragments for a 7-residue peptide
# For each charge state, we expect 6 b ions and 6 y ions
expected_fragments_per_charge = 6
expected_total_fragments = expected_fragments_per_charge * maxcharge
self.assertEqual(len(fragments_modx['b']), expected_total_fragments,
f"Expected {expected_total_fragments} b ions")
self.assertEqual(len(fragments_modx['y']), expected_total_fragments,
f"Expected {expected_total_fragments} y ions")
def test_fragment_series_basic_functionality(self):
"""Test basic functionality of fragment_series with a simple sequence."""
seq = 'PEPTIDE'
# Test with default parameters
fragments = mass.fragment_series(seq)
# Should have b and y ions by default
self.assertIn('b', fragments)
self.assertIn('y', fragments)
# For PEPTIDE (7 residues), with maxcharge=1, expect 6 b ions and 6 y ions
self.assertEqual(len(fragments['b']), 6)
self.assertEqual(len(fragments['y']), 6)
# Check that names are correct
expected_b_names = {'b1+', 'b2+', 'b3+', 'b4+', 'b5+', 'b6+'}
expected_y_names = {'y1+', 'y2+', 'y3+', 'y4+', 'y5+', 'y6+'}
self.assertEqual(set(fragments['b'].keys()), expected_b_names)
self.assertEqual(set(fragments['y'].keys()), expected_y_names)
# Check that m/z values are reasonable (all positive)
for ion_type in ['b', 'y']:
for fragment_name, mz_val in fragments[ion_type].items():
self.assertGreater(mz_val, 0, f"m/z value for {fragment_name} should be positive")
# Test with different ion types
fragments_abc = mass.fragment_series(seq, ion_types=('a', 'b', 'c'))
self.assertIn('a', fragments_abc)
self.assertIn('b', fragments_abc)
self.assertIn('c', fragments_abc)
# Test with higher charge states
fragments_z2 = mass.fragment_series(seq, maxcharge=2)
# With maxcharge=2, should have twice as many fragments
self.assertEqual(len(fragments_z2['b']), 12) # 6 fragments × 2 charge states
self.assertEqual(len(fragments_z2['y']), 12)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_pepxml.py | tests/test_pepxml.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from itertools import product
import unittest
from pyteomics.pepxml import PepXML, read, chain, filter
from data import pepxml_results
class PepxmlTest(unittest.TestCase):
maxDiff = None
path = 'test.pep.xml'
_kw = {'full_output': False, 'fdr': 1,
'key': lambda x: min(
sh['search_score'].get('expect', 1)
for sh in x['search_hit'])
}
def testReadPSM(self):
for rs, it in product([True, False], repeat=2):
for func in [PepXML, read, chain,
lambda x, **kw: chain.from_iterable([x], **kw),
lambda x, **kw: filter(x, **PepxmlTest._kw),
lambda x, **kw: filter.chain(x, **PepxmlTest._kw),
lambda x, **kw: filter.chain.from_iterable([x], **PepxmlTest._kw)]:
with func(self.path, read_schema=rs, iterative=it) as r:
self.assertEqual(list(r), pepxml_results)
def test_index(self):
with PepXML(self.path) as reader:
self.assertEqual(list(reader.index), ['spectrum_query'])
specs = [item['spectrum'] for item in reader]
self.assertEqual(list(reader.index['spectrum_query']), specs)
self.assertEqual(reader[specs[-1]], pepxml_results[-1])
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_achrom.py | tests/test_achrom.py | from os import path
import doctest
import unittest
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from pyteomics import achrom
# doctest.testmod(achrom, verbose=True)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(achrom))
return tests
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_proforma.py | tests/test_proforma.py | from os import path
import unittest
import pickle
import pyteomics
pyteomics.__path__ = [path.abspath(
path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from pyteomics.proforma import (
PSIModModification, ProForma, TaggedInterval, parse, MassModification, ProFormaError, TagTypeEnum,
ModificationRule, StableIsotope, GenericModification, Composition, to_proforma, ModificationMassNotFoundError,
AdductParser, ChargeState,
std_aa_comp, obo_cache, process_tag_tokens)
class ProFormaTest(unittest.TestCase):
maxDiff = None
def test_complicated_short(self):
complicated_short = r"<[Carbamidomethyl]@C><13C>[Hydroxylation]?{HexNAc}[Hex]-ST[UNIMOD:Oxidation](EPP)[+18.15]ING"
tokens, properties = parse(complicated_short)
assert len(tokens) == 8
assert len(properties['n_term']) == 1
assert properties['n_term'][0] == 'Hex'
assert len(properties['intervals']) == 1
assert properties['intervals'][0] == TaggedInterval(2, 5, [MassModification(18.15)])
assert len(properties['isotopes']) == 1
assert properties['isotopes'][0] == StableIsotope("13C")
assert properties['fixed_modifications'][0] == ModificationRule(
GenericModification('Carbamidomethyl', None, None), ['C'])
# The newer serializer enforces the ordering of sections
assert (
to_proforma(tokens, **properties)
== r"<13C><[Carbamidomethyl]@C>[Hydroxylation]?{HexNAc}[Hex]-ST[UNIMOD:Oxidation](EPP)[+18.15]ING"
)
self.assertAlmostEqual(ProForma(tokens, properties).mass, 1228.6588, 3)
def test_range(self):
seq = "PRQT(EQC[Carbamidomethyl]FQRMS)[+19.0523]ISK"
parsed = ProForma.parse(seq)
assert str(parsed) == seq
chunk = parsed[:6]
assert chunk.intervals
def test_ambiguous_range(self):
seq = "PRQT(?EQC[Carbamidomethyl]FQRMS)ISK"
parsed = ProForma.parse(seq)
assert str(parsed) == seq
self.assertRaises(ValueError, lambda: parsed[:6])
def test_error_on_nested_range(self):
self.assertRaises(ProFormaError, lambda: parse(
"PRQT(EQ(CFQR)[Carbamidomethyl]MS)[+19.0523]ISK"))
def test_localization_scores(self):
seq = "EM[Oxidation]EVT[#g1(0.01)]S[#g1(0.09)]ES[Phospho#g1(0.90)]PEK"
obj = ProForma.parse(seq)
tags = obj.find_tags_by_id("#g1")
solutions = {4: 0.01, 5: 0.09, 7: 0.9}
for i, tag in tags:
marker = tag.find_tag_type(TagTypeEnum.localization_marker)[0]
expected = solutions[i]
assert expected == marker.value
def test_multiple_info(self):
i = ProForma.parse(
"ELVIS[Phospho|INFO:newly discovered|info:really awesome]K")
tags = i[4][1][0].find_tag_type(TagTypeEnum.info)
messages = set(['newly discovered', 'really awesome'])
assert len(tags) == 2
for tag in tags:
messages.remove(tag.value)
assert len(messages) == 0
def test_formula(self):
i = ProForma.parse("SEQUEN[Formula:[13C2]CH6N]CE")
mod = i[-3][1][0]
assert mod.composition == Composition(
{'H': 6, 'C[13]': 2, 'C': 1, 'N': 1})
def test_gnome(self):
gp = ProForma.parse("NEEYN[GNO:G59626AS]K")
self.assertAlmostEqual(gp.mass, 2709.016, 3)
def test_glycan(self):
gp = ProForma.parse("NEEYN[Glycan:Hex5HexNAc4NeuAc1]K")
self.assertAlmostEqual(gp.mass, 2709.016, 3)
def test_c_terminal_modification(self):
i = ProForma.parse(
"[iTRAQ4plex]-EM[U:Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]-[Methyl]")
self.assertEqual(i.c_term[0].name, "Methyl")
self.assertEqual(i[-1][1][0].name, "iTRAQ4plex")
def test_fragments(self):
i = ProForma.parse("PEPTIDE")
masses = i.fragments('b', 1)
expected = [98.06004032, 227.1026334, 324.15539725, 425.20307572,
538.2871397, 653.31408272]
for o, e in zip(masses, expected):
self.assertAlmostEqual(o, e, 3)
masses = i.fragments('y', 1)
expected = [148.06043424, 263.08737726, 376.17144124, 477.21911971,
574.27188356, 703.31447664]
for o, e in zip(masses, expected):
self.assertAlmostEqual(o, e, 3)
def test_slice(self):
i = ProForma.parse('[U:1]-MPEP-[UNIMOD:2]/2')
assert i.n_term is not None
assert i.c_term is not None
assert i[:1].n_term is not None
assert i[:1].c_term is None
assert i[1:].n_term is None
assert i[1:].c_term is not None
def test_charge_adducts(self):
sequences = ['PEPTIDE/1[+2Na+,-H+]', 'PEPTIDE/-1[+e-]', 'PEPTIDE/1[+2H+,+e-]']
charges = [1, -1, 1]
adducts_list = [[('Na', 1, 2), ('H', 1, -1)], [('e-', -1, 1)], [('H', 1, 2), ('e-', -1, 1)]]
for seq, charge, adducts in zip(sequences, charges, adducts_list):
i = ProForma.parse(seq)
self.assertEqual(i.charge_state.charge, charge)
self.assertEqual(i.charge_state.adducts, adducts)
def test_composition_with_adducts(self):
sequences = ['PEPTIDE/1[+2Na+,-H+]', 'PEPTIDE/-1[+e-]', 'PEPTIDE/1[+2H+,+e-]', 'PEPTIDE', 'PEPTIDE/1']
neutral_comp = Composition(sequence='PEPTIDE')
adducts_list = [Composition({'Na': 2, 'H': -1}),
Composition({'e-': 1}),
Composition({'H': 2, 'e-': 1}),
Composition({}),
Composition({'H': 1})]
for seq, adducts in zip(sequences, adducts_list):
i = ProForma.parse(seq)
self.assertEqual(i.composition(), neutral_comp)
self.assertEqual(i.composition(include_charge=True), neutral_comp + adducts)
def test_adduct_formatting(self):
ap = AdductParser()
ap.buffer.extend('+2Na+')
ap.bound()
ap.buffer.extend('-H+')
c = ChargeState.from_adducts(ap())
self.assertEqual(str(c), "[Na:z+1^2,H:z+1]")
ap = AdductParser("Na:z+1^2")
ap.bound()
ap.extend("H:z+1")
c = ChargeState.from_adducts(ap())
self.assertEqual(str(c), "[Na:z+1^2,H:z+1]")
def test_default_adduct_formatting(self):
c = ChargeState(2, None)
self.assertEqual(str(c), '2')
def test_composition_fixed(self):
sequences = ['<[UNIMOD:4]@C>ATPEILTCNSIGCLK']
aa_comp = std_aa_comp.copy()
aa_comp['cam'] = Composition(formula='H3C2NO')
comps = [Composition(sequence='ATPEILTcamCNSIGcamCLK', aa_comp=aa_comp)]
for seq, comp in zip(sequences, comps):
i = ProForma.parse(seq)
self.assertEqual(i.composition(), comp)
def test_missing_composition(self):
sequences = ['P[+79.966]EPTIDE']
comps = [Composition(sequence='PEPTIDE')]
for seq, comp in zip(sequences, comps):
i = ProForma.parse(seq)
self.assertEqual(i.composition(ignore_missing=True), comp)
with self.assertRaises(ProFormaError):
ProForma.parse(seq).composition()
def test_localization_composition(self):
seq0 = "EMEVT[Phospho]SESPEK"
test_seq = [
"[Phospho]?EMEVTSESPEK",
"EMEVT[#g1]S[#g1]ES[Phospho#g1]PEK",
"EMEV(TS)[Phospho]ESPEK"
]
base_comp = ProForma.parse(seq0).composition()
for seq in test_seq:
with self.subTest(seq=seq):
i = ProForma.parse(seq)
self.assertEqual(i.composition(), base_comp)
def test_from_spec(self):
positive = [
"AA",
"A[+1]",
"AA[+1]",
"A(AAAA)[+1][+1]",
"UWAKJDNLASNOIJPojkjjdakjn[U:Oxidation]",
"[+1]-A[+1]-[+1]",
# "AA+AA",
"EMK[XLMOD:02000#XL1]EVTKSE[XLMOD:02010#XL2]SK[#XL1]PEK[#XL2]AR",
# "SEK[XLMOD:02001#XL1]UENCE//EMEVTK[XLMOD:02001#XL1]SESPEK",
"EM[Oxidation]EVEES[Phospho]PEK",
"EM[R: Methionine sulfone]EVEES[O-phospho-L-serine]PEK",
"EMEVTK[X:DSS#XL1]SESPEK",
"EM[U:Oxidation]EVEES[U:Phospho]PEK",
"EM[+15.9949]EVEES[+79.9663]PEK",
"EM[U:+15.995]EVEES[U:+79.966]PEK",
"EM[U:+15.995]EVEES[Obs:+79.978]PEK",
"RTAAX[+367.0537]WT",
"{Glycan:Hex}EM[Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]",
"[iTRAQ4plex]-EM[Oxidation]EVNES[Phospho]PEK",
"[iTRAQ4plex]-EM[Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]-[Methyl]",
"<[S-carboxamidomethyl-L-cysteine]@C>ATPEILTCNSIGCLK",
"<[MOD:01090]@C>ATPEILTCNSIGCLK",
"[Phospho]?EM[Oxidation]EVTSESPEK",
"[Phospho][Phospho]?[Acetyl]-EM[Oxidation]EVTSESPEK",
"EM[Oxidation]EVT[#g1]S[#g1]ES[Phospho#g1]PEK",
"EM[Oxidation]EVT[#g1(0.01)]S[#g1(0.09)]ES[Phospho#g1(0.90)]PEK",
"[Phospho#s1]?EM[Oxidation]EVT[#s1(0.01)]S[#s1(0.90)]ES[#s1(0.90)]PEK",
"PROT(EOSFORMS)[+19.0523]ISK",
"PROT(EOC[Carbamidomethyl]FORMS)[+19.0523]ISK",
"SEQUEN[Formula:C12H20O2]CE",
"SEQUEN[Formula:HN-1O2]CE",
"SEQUEN[Formula:[13C2][12C-2]H2N]CE",
"SEQUEN[Glycan:HexNAc]CE",
"EMEVTK[XLMOD:02001#XL1]SESPEK[#XL1]",
"EMEVTK[XLMOD:02001#XL1]SESPEK",
# "SEK[XLMOD:02001#XL1]UENCE//EMEVTK[XLMOD:02001#XL1]SESPEK",
# "ETFGD[MOD:00093#BRANCH]//R[#BRANCH]ATER",
"(?DQ)NGTWEM[Oxidation]ESNENFEGYM[Oxidation]K",
"ELVIS[Phospho|+79.966331]K",
"ELVIS[Phospho|Obs:+79.978]K",
"ELV[INFO:xxxxx]IS",
"ELVIS[Phospho|INFO:newly discovered|INFO:really awesome]K",
"ELVIS[Phospho|INFO:newly discovered|INFO:Created on 2021-06]K",
"ELVIS[Phospho|INFO:newly discovered|INFO:Created by software Tool1]K",
"<13C>ATPEILTVNSIGQLK",
"EMEVEESPEK/2",
# "EMEVEESPEK+ELVISLIVER",
# "EMEVEESPEK/2+ELVISLIVER/3",
# "A[X:DSS#XL1]//B[#XL1]+C[X:DSS#XL1]//D[#XL1]",
"<[Carbamidomethyl]@C>ATPEILTCNSIGCLK",
"<[Oxidation]@C,M>MTPEILTCNSIGCLK",
"<[TMT6plex]@K,N-term>ATPEILTCNSIGCLK",
"<[TMT6plex]@K,N-term:A>ATPEILTCNSIGCLK",
"<[TMT6plex]@K,N-term:A,N-term:B>ATPEILTCNSIGCLK",
"EM[Oxidation]EVEES[Phospho]PEK",
"EM[L-methionine sulfoxide]EVEES[O-phospho-L-serine]PEK",
"EM[R: L-methionine sulfone]EVEES[O-phospho-L-serine]PEK",
"EMEVTK[X:DSS#XL1]SESPEK",
"NEEYN[GNO:G59626AS]K",
"NEEYN[G:G59626AS]K",
"EM[U:Oxidation]EVEES[U:Phospho]PEK",
"EM[M:L-methionine sulfoxide]EVEES[M:O-phospho-L-serine]PEK",
"EM[U:Oxidation]EVEES[M:O-phospho-L-serine]PEK",
"EM[Oxidation]EVEES[O-phospho-L-serine]PEK",
"EM[Oxidation]EVE[Cation:Mg[II]]ES[Phospho]PEK",
"EM[MOD:00719]EVEES[MOD:00046]PEK",
"EM[UNIMOD:35]EVEES[UNIMOD:56]PEK",
"EM[RESID:AA0581]EVEES[RESID:AA0037]PEK",
"EMEVTK[XLMOD:02001#XL1]SESPEK[#XL1]",
"EMK[XLMOD:02000#XL1]EVTKSE[XLMOD:02010#XL2]SK[#XL1]PEK[#XL2]AR",
"EMEVTK[XLMOD:02001#XL1]SESPEK",
"EMEVTK[XLMOD:02001]SESPEK",
# "SEK[XLMOD:02001#XL1]UENCE//EMEVTK[XLMOD:02001#XL1]SESPEK",
# "SEK[XLMOD:02001#XL1]UENCE//EMEVTK[#XL1]SESPEK",
"EVTSEKC[MOD:00034#XL1]LEMSC[#XL1]EFD",
"EVTSEKC[L-cystine (cross-link)#XL1]LEMSC[#XL1]EFD",
"FVNQHLC[MOD:00034#XL1]GSHLVEALYLVC[MOD:00034#XL2]GERGFFYTPK",
# "A//GIVEQC[MOD:00034#XL3]C[#XL1]TSIC[#XL3]SLYQLENYC[#XL2]N",
"EVTSEKC[XLMOD:02009#XL1]LEMSC[#XL1]EFD",
"EVTSEKC[X:Disulfide#XL1]LEMSC[#XL1]EFD",
"EVTSEKC[half cystine]LEMSC[half cystine]EFD",
"EVTSEKC[MOD:00798]LEMSC[MOD:00798]EFDEVTSEKC[MOD:00798]LEMSC[MOD:00798]EFD",
"EVTSEKC[UNIMOD:374#XL1]LEMSC[#XL1]EFD",
"EVTSEKC[Dehydro#XL1]LEMSC[#XL1]EFD",
# "ETFGD[MOD:00093#BRANCH]//R[#BRANCH]ATER",
# "AVTKYTSSK[MOD:00134#BRANCH]//AGKQLEDGRTLSDYNIQKESTLHLVLRLRG-[#BRANCH]",
"NEEYN[GNO:G59626AS]K",
"YPVLN[GNO:G62765YT]VTMPN[GNO:G02815KT]NSNGKFDK",
"EM[+15.9949]EVEES[+79.9663]PEK",
"EM[+15.995]EVEES[-18.01]PEK",
"EM[U:+15.9949]EVEES[U:+79.9663]PEK",
"EM[U:+15.995]EVEES[U:+79.966]PEK",
"EM[U:+15.995]EVEES[Obs:+79.978]PEK",
"EM[U:+15.995]EVEES[Obs:+79.978]PEK",
"RTAAX[+367.0537]WT",
"SEQUEN[Formula:C12H20O2]CE",
"SEQUEN[Formula:[13C2]CH6N]CE",
"SEQUEN[Formula:[13C2][12C-2]H2N]CE",
"SEQUEN[Glycan:HexNAc1Hex2]CE",
"[iTRAQ4plex]-EM[Oxidation]EVNES[Phospho]PEK",
"[iTRAQ4plex]-EM[U:Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]-[Methyl]",
"{Glycan:Hex}EM[U:Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]",
"{Glycan:Hex}[iTRAQ4plex]-EM[Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]",
"{Glycan:Hex}[iTRAQ4plex]-EM[Oxidation]EVNES[Phospho]PEK[iTRAQ4plex]-[Methyl]",
"{Glycan:Hex}{Glycan:NeuAc}EMEVNESPEK",
"[Phospho]?EM[Oxidation]EVTSESPEK",
"[Phospho][Phospho]?[Acetyl]-EM[Oxidation]EVTSESPEK",
"[Phospho]^2?[Acetyl]-EM[Oxidation]EVTSESPEK",
"[Phospho]^2?[Acetyl]-EM[Oxidation]EVTSESPEK",
"EM[Oxidation]EVT[#g1]S[#g1]ES[Phospho#g1]PEK",
"PRT(ESFRMS)[+19.0523]ISK",
"PRT(EC[Carbamidomethyl]FRMS)[+19.0523]ISK",
"EM[Oxidation]EVT[#g1(0.01)]S[#g1(0.09)]ES[Phospho#g1(0.90)]PEK",
"[Phospho#s1]?EM[Oxidation]EVT[#s1(0.01)]S[#s1(0.09)]ES[#s1(0.90)]PEK",
"MPGLVDSNPAPPESQEKKPLK(PCCACPETKKARDACIIEKGEEHCGHLIEAHKECMRALGFKI)[Oxidation][Oxidation][half cystine][half cystine]",
"<13C>ATPEILTVNSIGQLK",
"<15N>ATPEILTVNSIGQLK",
"<D>ATPEILTVNSIGQLK",
"<13C><15N>ATPEILTVNSIGQLK",
"<[S-carboxamidomethyl-L-cysteine]@C>ATPEILTCNSIGCLK",
"<[MOD:01090]@C>ATPEILTCNSIGCLK",
"<[Oxidation]@C,M>MTPEILTCNSIGCLK",
"<[MOD:01090]@C>[Phospho]?EM[Oxidation]EVTSECSPEK",
"<[MOD:01090]@C>[Acetyl]-EM[Oxidation]EVTSECSPEK",
"(?DQ)NGTWEM[Oxidation]ESNENFEGYM[Oxidation]K",
"(?N)NGTWEM[Oxidation]ESNENFEGYM[Oxidation]K",
"ELV[INFO:AnyString]IS",
"ELV[info:AnyString]IS",
"ELVIS[Phospho|INFO:newly discovered]K",
"ELVIS[Phospho|INFO:newly discovered|INFO:really awesome]K",
"ELVIS[Phospho|INFO:newly discovered|INFO:Created on 2021-06]K",
"ELVIS[Phospho|INFO:newly discovered|INFO:Created by software Tool1]K",
"ELVIS[U:Phospho|+79.966331]K",
"ELVIS[U:Phospho|Obs:+79.978]K",
"ELVIS[Phospho|O-phospho-L-serine]K",
"ELVIS[UNIMOD:21|MOD:00046]K",
"ELVIS[UNIMOD:21|Phospho]K",
"ELVIS[Phospho|O-phospho-L-serine|Obs:+79.966]K",
"ELVIS[Obs:+79.966|Phospho|Sulfo]K",
"EMEVEESPEK/2",
"EM[U:Oxidation]EVEES[U:Phospho]PEK/3",
"[U:iTRAQ4plex]-EM[U:Oxidation]EVNES[U:Phospho]PEK[U:iTRAQ4plex]-[U:Methyl]/3",
# "EMEVEESPEK/2+ELVISLIVER/3",
"AA(?AA)",
"AA(?AA)AA",
"[dehydro]^3?[gln->pyro-glu]-QSC",
"[deamidated#1]-FEEAQ[#1]A",
"[#1]-FEEAQ[deamidated#1]A",
"AHAM[oxidation#1]TEG-[#1]",
"AHAM[#1]TEG-[oxidation#1]",
"SEQUEN[Formula:Zn1:z+2]CE",
"<[TMT6plex]@K,N-term>ATPEILTCNSIGCLK",
"<[Oxidation]@W,C-term:G>QATPEILTWCNSIGCLKG",
"<[Gln->pyro-Glu]@N-term:Q><[Oxidation]@W,C-term:G>QATPEILTWCNSIGCLKG",
"<[Amidated]@C-term>QATPEILTWCNSIGCLKG",
"PEPTID-[a-type-ion]",
"PEPTID[Formula:H-1C-1O-2|Info:d-ion]-[a-type-ion]",
"PEPTIDE/[Na:z+1]",
"PEPTIDE/[Na:z+1,H:z+1]",
"PEPTIDE/[Na:z+1^2]",
"PEPT[Formula:Zn:z+2]IDE/[Na:z+1^2]",
"PE[Cation:Al[III]]PTIDE/2",
"PE[Formula:Al H-3:z+1]PTIDE/1",
"PE[Formula:Al H-3:z+1]PTIDE/[H:z+1]",
"[Cation:Al[III]]?PEPTIDE/2",
"PEPTIDE/[Al H-3:z+1,H:z+1]",
"PEPTIDEG-[Methyl][Amidated]",
"[Acetyl][Carbamyl]-QPEPTIDE",
"[Formula:Zn:z+2|Position:N-term,C-term]^5[Carbamidomethyl|Position:C]^5?MDPETCPCPSGGSCTCADSCKCEGCKCTSCKKSCCSCCPAECEKCAKDCVCKGGEAAEAEAEKCSCCQ",
"PEPTI(MERMERMERM)[Oxidation|Position:M][Oxidation|Position:M]DE",
"PEPTI(MERMERMERM)[+32|Position:E]PEPTIDE",
"PETIEM[Dioxidation#1][Oxidation#2]REM[#1][#2]REM[#2]RM[#1]PEPTIDE",
"[Oxidation|CoMKP]?PEPT[Phospho]IDE",
# "(>Trypsin)AANSIPYQVSLNS+(>Keratin)AKEQFERQTA",
# "(>P07225 Vitamin K-dependent protein S OS=Homo sapiens OX=9606 GN=PROS1 PE=1 (SV=1) RANGE=12..42)GGK[xlink:dss[138]#XLDSS]IEVQLK//(>P07225 Vitamin K-dependent protein S OS=Homo sapiens OX=9606 GN=PROS1 PE=1 SV=1)KVESELIK[#XLDSS]PINPR/4",
# "(>>>Trastuzumab Fab and coeluting Fc)(>>Fab)(>Heavy chain)EVQLVESGGGLVQPGGSLRLSC[M:l-cystine (cross-link)#XL1]AASGFNIKDTYIHWVRQAPGKGLEWVARIYPTNGYTRYADSVKGRFTISADTSKNTAYLQMNSLRAEDTAVYYC[#XL1]SRWGGDGFYAMDYWGQGTLVTVSSASTKGPSVFPLAPSSKSTSGGTAALGC[M:l-cystine (cross-link)#XL2]LVKDYFPEPVTVSWNSGALTSGVHTFPAVLQSSGLYSLSSVVTVPSSSLGTQTYIC[#XL2]NVNHKPSNTKVDKKVEPKSC[M:l-cystine (cross-link)#XL3]DKT//(>Light chain)DIQMTQSPSSLSASVGDRVTITC[M:l-cystine (cross-link)#XL4]RASQDVNTAVAWYQQKPGKAPKLLIYSASFLYSGVPSRFSGSRSGTDFTLTISSLQPEDFATYYC[#XL4]QQHYTTPPTFGQGTKVEIKRTVAAPSVFIFPPSDEQLKSGTASVVC[M:l-cystine (cross-link)#XL5]LLNNFYPREAKVQWKVDNALQSGNSQESVTEQDSKDSTYSLSSTLTLSKADYEKHKVYAC[#XL5]EVTHQGLSSPVTKSFNRGEC[#XL3]+(>Fc)HTCPPCPAPELLGGPSVFLFPPKPKDTLMISRTPEVTCVVVDVSHEDPEVKFNWYVDGVEVHNAKTKPREEQYNSTYRVVSVLTVLHQDWLNGKEYKCKVSNKALPAPIEKTISKAKGQPREPQVYTLPPSREEMTKNQVSLTCLVKGFYPSDIAVEWESNGQPENNYKTTPPVLDSDGSFFLYSKLTVDKSRWQQGNVFSCSVMHEALHNHYTQKSLSLSPGK",
]
for seq in positive:
parsed = ProForma.parse(seq)
assert parsed is not None
def test_nonstandard_amino_acid(self):
seq = ProForma.parse("PEPTX[MOD:01001]IDE")
bad_seq = ProForma.parse("PEPTXIDE")
assert seq.mass != bad_seq.mass
self.assertAlmostEqual(seq.mass, 884.4127280267099, 4)
class TestTagProcessing(unittest.TestCase):
def test_process_tag_tokens(self):
tokens = list('UNIMOD:Deamidation')
tag = process_tag_tokens(tokens)
assert tag.value == "Deamidation"
assert tag.type == TagTypeEnum.unimod
def test_process_tag_tokens_generic(self):
tokens = list('Deamidation')
tag = process_tag_tokens(tokens)
assert tag.value == "Deamidation"
assert tag.type == TagTypeEnum.generic
def test_process_tag_tokens_contains_colon(self):
tokens = list('UNIMOD:Cation:Na')
tag = process_tag_tokens(tokens)
assert tag.value == "Cation:Na"
assert tag.type == TagTypeEnum.unimod
def test_process_tag_tokens_generic_contains_colon(self):
for name in ['Cation:Na', 'Cation:Li', 'Unknown:210', 'QAT:2H(3)',
'Dimethyl:2H(4)', 'Label:13C(9)', 'Cation:K']:
tag = process_tag_tokens(list(name))
assert tag.value == name
assert tag.type == TagTypeEnum.generic
state = tag.resolve()
assert state['name'] == name
assert state['provider'] == 'unimod'
class GenericModificationResolverTest(unittest.TestCase):
def test_generic_resolver(self):
mod = "Oxidation"
state = GenericModification(mod)
state.resolve()
self.assertEqual(state.provider, 'unimod')
self.assertAlmostEqual(state.mass, 15.994915, 3)
def test_generic_resolver_with_dangerous_synonyms(self):
mod = "TMT6plex"
state = GenericModification(mod)
state.resolve()
self.assertEqual(state.provider, 'unimod')
self.assertAlmostEqual(state.mass, 229.162932, 3)
class PSIModModificationResolverTest(unittest.TestCase):
def test_unknown_mass(self):
mod = "MOD:01716" # 'TMT6plex reporter fragment'
state = PSIModModification(mod)
self.assertRaises(ModificationMassNotFoundError, lambda: state.resolve())
class ModificationHashingTest(unittest.TestCase):
def test_mass_modification(self):
mod = MassModification(57.08)
container = set()
container.add(mod.key)
self.assertIn(mod.key, container)
mod2 = MassModification(57.08 + 1e-19)
self.assertIn(mod2.key, container)
self.assertIn(mod2, container)
class ModificationPicklingTest(unittest.TestCase):
def test_pickle(self):
mod = GenericModification("UNIMOD:1")
payload = pickle.dumps(mod)
dup = pickle.loads(payload)
self.assertEqual(mod, dup)
assert mod.mass is not None
payload = pickle.dumps(mod)
dup = pickle.loads(payload)
self.assertEqual(mod, dup)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_protxml.py | tests/test_protxml.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from itertools import product
import unittest
from pyteomics import protxml
from data import protxml_results
import operator as op
class ProtXMLTest(unittest.TestCase):
maxDiff = None
_kw = {'full_output': False, 'fdr': 1.1,
'key': op.itemgetter('probability'),
'reverse': True,
'remove_decoy': False,
}
path = 'test.prot.xml'
def test_read(self):
for rs, it in product([True, False], repeat=2):
for func in [protxml.ProtXML, protxml.read, protxml.chain,
lambda x, **kw: protxml.chain.from_iterable([x], **kw),
lambda x, **kw: protxml.filter(x, **ProtXMLTest._kw),
lambda x, **kw: protxml.filter.chain(x, **ProtXMLTest._kw),
lambda x, **kw: protxml.filter.chain.from_iterable([x], **ProtXMLTest._kw)
]:
with func(self.path, read_schema=rs, iterative=it) as r:
self.assertEqual(list(r), protxml_results)
def test_fdr(self):
with protxml.ProtXML(self.path) as f:
self.assertEqual(protxml.fdr(f), 1.0)
def test_filter(self):
kw = self._kw.copy()
kw['remove_decoy'] = True
x = protxml.filter(self.path, **kw)
self.assertEqual(list(x), [protxml_results[0]])
def test_qvalues(self):
q = protxml.qvalues(self.path, **self._kw)
self.assertEqual(list(q['q']), [0, 1])
def test_qvalues_prefix(self):
q = protxml.qvalues(self.path, decoy_prefix='DECO', **self._kw)
self.assertEqual(list(q['q']), [0, 1])
def test_df(self):
df = protxml.DataFrame(self.path)
self.assertEqual(df.shape, (2, 15))
def test_filter_df(self):
kw = self._kw.copy()
del kw['full_output']
del kw['key']
fdf = protxml.filter_df(self.path, **kw)
self.assertEqual(fdf.shape, (2, 17))
def test_filter_df_suffix(self):
kw = self._kw.copy()
del kw['full_output']
del kw['key']
kw['remove_decoy'] = True
df = protxml.DataFrame(self.path)
df['protein_name'] = df.protein_name.str.replace(r'DECOY_(.*)', r'\1_SUF', regex=True)
fdf = protxml.filter_df(df, decoy_suffix='_SUF', **kw)
self.assertEqual(fdf.shape, (1, 17))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_usi.py | tests/test_usi.py | from data import usi_proxi_data
from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics.usi import USI, proxi, AGGREGATOR_KEY
from pyteomics.auxiliary import PyteomicsError
class USITest(unittest.TestCase):
def test_parse(self):
usi_str = "mzspec:MSV000085202:210320_SARS_CoV_2_T:scan:131256"
inst = USI.parse(usi_str)
assert str(inst) == usi_str
assert inst.protocol == 'mzspec'
assert inst.dataset == "MSV000085202"
assert inst.datafile == "210320_SARS_CoV_2_T"
assert inst.scan_identifier_type == "scan"
assert inst.scan_identifier == "131256"
assert inst.interpretation is None
class PROXITest(unittest.TestCase):
def test_request(self):
usi_str = "mzspec:MSV000085202:210320_SARS_CoV_2_T:scan:131256"
response = proxi(usi_str, backend='massive')
assert set(usi_proxi_data.keys()) <= set(response.keys())
for a, b in zip(response['m/z array'], usi_proxi_data['m/z array']):
self.assertAlmostEqual(a, b, 3)
for a, b in zip(response['intensity array'], usi_proxi_data['intensity array']):
self.assertAlmostEqual(a, b, 3)
def test_errors(self):
usi_str = "mzspec:MSV000085202:210320_SARS_CoV_2_T:scan:131256"
with self.assertRaises(TypeError, msg='Unrecognized backend type: NoneType'):
proxi(usi_str, backend=None)
with self.assertRaises(PyteomicsError, msg='Unknown PROXI backend name: BackendName'):
proxi(usi_str, backend='BackendName')
class PROXIAggregatorTest(unittest.TestCase):
def test_request(self):
usi_str = "mzspec:MSV000085202:210320_SARS_CoV_2_T:scan:131256"
response = proxi(usi_str, backend=AGGREGATOR_KEY)
assert set(usi_proxi_data.keys()) <= set(response.keys())
for a, b in zip(response['m/z array'], usi_proxi_data['m/z array']):
self.assertAlmostEqual(a, b, 3)
for a, b in zip(response['intensity array'], usi_proxi_data['intensity array']):
self.assertAlmostEqual(a, b, 3)
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_trafoxml.py | tests/test_trafoxml.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from itertools import product
from data import pairs
from pyteomics.openms.trafoxml import *
class TrafoXMLTest(unittest.TestCase):
maxDiff = None
def testRead(self):
for rs, it in product([True, False], repeat=2):
for func in [TrafoXML, read, chain,
lambda x, **kw: chain.from_iterable([x], **kw)]:
with func('test.trafoXML', read_schema=rs, iterative=it) as r:
self.assertEqual(pairs, list(r))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_mzxml.py | tests/test_mzxml.py | import os
import pyteomics
pyteomics.__path__ = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'pyteomics'))]
from itertools import product
import unittest
from pyteomics.mzxml import MzXML, read, chain
from pyteomics import xml
from data import mzxml_spectra
import tempfile
import shutil
import numpy as np
class MzXMLTest(unittest.TestCase):
maxDiff = None
path = 'test.mzXML'
def testReadSpectrum(self):
for rs, it, ui in product([True, False], repeat=3):
for func in [MzXML, read, chain,
lambda x, **kw: chain.from_iterable([x], **kw)]:
with func(self.path, read_schema=rs, iterative=it, use_index=ui) as r:
# http://stackoverflow.com/q/14246983/1258041
self.assertEqual(mzxml_spectra, list(r))
def test_decoding(self):
with MzXML(self.path, decode_binary=True) as reader:
spectrum = next(reader)
self.assertIsNotNone(spectrum['m/z array'])
validation = spectrum['m/z array']
with MzXML(self.path) as reader:
spectrum = next(reader)
self.assertIsNotNone(spectrum['m/z array'])
self.assertTrue(np.allclose(spectrum['m/z array'], validation))
with MzXML(self.path, decode_binary=False) as reader:
spectrum = next(reader)
self.assertIsNotNone(spectrum['m/z array'])
record = spectrum['m/z array']
array = record.decode()
self.assertTrue(np.allclose(array, validation))
def test_prebuild_index(self):
test_dir = tempfile.mkdtemp()
work_path = os.path.join(test_dir, self.path)
with open(work_path, 'w') as dest, open(self.path) as source:
dest.write(source.read())
assert dest.closed
with MzXML(work_path, use_index=True) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))
self.assertTrue(inst._source.closed)
MzXML.prebuild_byte_offset_file(work_path)
with MzXML(work_path, use_index=True) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertTrue(offsets_exist)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))
self.assertTrue(inst._source.closed)
os.remove(inst._byte_offset_filename)
with MzXML(work_path, use_index=True) as inst:
offsets_exist = os.path.exists(inst._byte_offset_filename)
self.assertEqual(offsets_exist, inst._check_has_byte_offset_file())
self.assertTrue(isinstance(inst._offset_index, xml.HierarchicalOffsetIndex))
self.assertTrue(inst._source.closed)
shutil.rmtree(test_dir, True)
def test_coerce_duration_type(self):
with MzXML(self.path) as handle:
scan = next(handle)
time = scan['retentionTime']
self.assertEqual(time.unit_info, 'minute')
def test_read_dtype(self):
dtypes = {'m/z array': np.float32, 'intensity array': np.int32}
with read(self.path, dtype=dtypes) as f:
for spec in f:
for k, v in dtypes.items():
self.assertEqual(spec[k].dtype, v)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_electrochem.py | tests/test_electrochem.py | from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
import unittest
from pyteomics.electrochem import charge, pI
from pyteomics.auxiliary import PyteomicsError
class ElectrochemTest(unittest.TestCase):
def setUp(self):
pass
def test_charge_calculations_str(self):
self.assertTrue(
abs(charge('AAA', 5.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})) < 0.01)
self.assertTrue(
abs(charge('H-AAA-OH', 0.0) - 1.0) < 0.01)
self.assertTrue(
abs(charge('H-AAA-OH', 14.0) + 1.0) < 0.01)
self.assertTrue(
abs(charge('H-AAA-OH', (2.34 + 9.69) / 2.0)) < 0.01)
def test_charge_calculations_list(self):
self.assertRaises(PyteomicsError,
charge, ['A','A','A'], 5.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertTrue(
abs(charge(['H-','A','A','A','-OH'], 0.0) - 1.0) < 0.01)
self.assertTrue(
abs(charge(['H-','A','A','A','-OH'], 14.0) + 1.0) < 0.01)
self.assertTrue(
abs(charge(['H-','A','A','A','-OH'], (2.34 + 9.69) / 2.0)) < 0.01)
def test_charge_calculations_dict(self):
self.assertRaises(PyteomicsError, charge, {'H-': 1, '-OH': 1, 'E': 1},
7, pK_nterm={'H-': {'A': [(9., 1)]}})
self.assertTrue(
abs(charge({'A': 3, 'H-': 1, '-OH': 1}, 14.0) + 1.0) < 0.01)
self.assertTrue(
abs(charge({'A': 1, 'H-': 1, '-OH': 1, 'ntermB': 1, 'ctermA': 1},
14.0, pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)], 'B': [(3., 1)]}}) + 1.0)
< 0.01)
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, '-OH': 1, 'ctermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, '-OH': 1, 'ntermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, '-OH': 1, 'ntermA': 2, 'ctermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
self.assertRaises(PyteomicsError, charge,
{'A': 1, 'H-': 1, 'ntermA': 1, 'ctermA': 1}, 14.0,
pK={'H-': [(9., 1)], '-OH': [(8., -1)]},
pK_nterm={'H-': {'A': [(3., 1)]}})
def test_pI_calculations(self):
self.assertTrue(
abs(pI('H-AAA-OH') - (2.34 + 9.69) / 2.0) < 0.01)
def test_pI_precision(self):
pI_best = pI('PEPTIDE', precision_pI=1e-15)
for i in range(16):
precision = 10 ** (-i)
self.assertTrue(
abs(pI('PEPTIDE', precision_pI=precision) - pI_best) < precision)
def test_charge_input(self):
for i in range(0, 14):
self.assertAlmostEqual(
charge('H-ACDEFGH-OH', i),
charge(['H-', 'A', 'C', 'D', 'E', 'F', 'G', 'H', '-OH'], i))
for i in range(0, 14):
self.assertAlmostEqual(
charge('H-ACDEFGH-OH', i),
charge({'H-': 1, 'A': 1, 'C': 1, 'D': 1,
'E': 1, 'F': 1, 'G': 1, 'H': 1, '-OH': 1}, i))
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/data.py | tests/data.py | """
Bulky data structures for assertion in pyteomics test suites.
"""
import numpy as np
from copy import copy
import sys
# http://stackoverflow.com/q/14246983/1258041
# updated to avoid calling np.allclose: since numpy 2.0 this results in a RecursionError
class ComparableArray(np.ndarray):
def __new__(cls, *args, **kwargs):
inst = super(ComparableArray, cls).__new__(cls, *args, **kwargs)
inst._atol = kwargs.pop('atol', 1e-8)
inst._rtol = kwargs.pop('rtol', 1e-5)
return inst
def __eq__(self, other):
if not isinstance(other, np.ndarray):
return False
other = np.asarray(other, dtype=float)
return self.shape == other.shape and np.all(np.abs(self - other) <= self._atol + self._rtol * np.abs(other))
def makeCA(arr):
if not isinstance(arr, np.ndarray):
arr = np.array(arr)
return ComparableArray(arr.shape, arr.dtype, arr)
pepxml_results = [
{'spectrum': 'pps_sl20060731_18mix_25ul_r1_1154456409.0100.0100.1',
'end_scan': 100,
'start_scan': 100,
'index': 1,
'assumed_charge': 1,
'precursor_neutral_mass': 860.392,
'search_hit': [{
'num_missed_cleavages': 0,
'tot_num_ions': 12,
'is_rejected': False,
'search_score': {
'deltacn': 0.081,
'sprank': 1.0,
'deltacnstar': 0.0,
'spscore': 894.0,
'xcorr': 1.553},
'hit_rank': 1,
'num_matched_ions': 11,
'num_tot_proteins': 1,
'peptide': 'SLNGEWR',
'massdiff': -0.5,
'analysis_result': [{'analysis': 'peptideprophet',
'peptideprophet_result':
{'all_ntt_prob': [0.0422, 0.509, 0.96],
'parameter':
{'fval': 1.4723, 'massd': -0.5, 'nmc': 0.0, 'ntt': 2.0},
'probability': 0.96}}],
'modifications': [],
'modified_peptide': 'SLNGEWR',
'proteins': [{'num_tol_term': 2,
'protein': 'sp|P00722|BGAL_ECOLI',
'peptide_prev_aa': 'R',
'protein_descr': 'BETA-GALACTOSIDASE (EC 3.2.1.23) '
'(LACTASE) - Escherichia coli.',
'peptide_next_aa': 'F'}],
'calc_neutral_pep_mass': 860.892}]},
{'precursor_neutral_mass': 677.392,
'spectrum': 'pps_sl20060731_18mix_25ul_r1_1154456409.0040.0040.1',
'start_scan': 40,
'assumed_charge': 1,
'index': 2,
'end_scan': 40,
'search_hit': [{'tot_num_ions': 10,
'num_missed_cleavages': 1,
'is_rejected': False,
'hit_rank': 1,
'num_matched_ions': 8,
'search_score': {
'sprank': 1.0,
'deltacn': 0.165,
'deltacnstar': 0.0,
'spscore': 427.0,
'xcorr': 1.644},
'num_tot_proteins': 1,
'peptide': 'GKKFAK',
'massdiff': -0.5,
'analysis_result': [{'analysis': 'peptideprophet',
'peptideprophet_result': {
'all_ntt_prob': [0.0491, 0.548, 0.9656],
'parameter': {
'fval': 2.0779, 'massd': -0.5, 'nmc': 1.0, 'ntt': 1.0},
'probability': 0.548}}],
'modifications': [],
'modified_peptide': 'GKKFAK',
'proteins': [{'num_tol_term': 1,
'protein': 'gi|3212198|gb|AAC22319.1|',
'peptide_prev_aa': 'N',
'protein_descr': 'hemoglobin-binding protein '
'[Haemophilus influenzae Rd]',
'peptide_next_aa': 'I'}],
'calc_neutral_pep_mass': 677.892}]},
{'assumed_charge': 2,
'end_scan': 1366,
'index': 29,
'precursor_neutral_mass': 718.4136,
'retention_time_sec': 38.426123,
'search_hit': [{'calc_neutral_pep_mass': 718.4126,
'search_score': {
'expect': 0.0,
'homologyscore': 46.61,
'identityscore': 25.38,
'star': 0.0,
'ionscore': 36.45},
'hit_rank': 1,
'is_rejected': False,
'massdiff': 0.0011,
'modifications': [],
'modified_peptide': 'VGQFIR',
'num_matched_ions': 5,
'num_missed_cleavages': 0,
'num_tot_proteins': 1,
'peptide': 'VGQFIR',
'analysis_result': [{'analysis': 'peptideprophet',
'peptideprophet_result':
{'all_ntt_prob': [0., 0.5741, 0.7264],
'parameter': {
'fval': 0.6052, 'massd': 0.001, 'nmc': 0.0, 'ntt': 2.0},
'probability': 0.7264}}],
'proteins': [{'num_tol_term': 2,
'peptide_next_aa': 'L',
'peptide_prev_aa': 'K',
'protein': 'IPI00200898',
'protein_descr': None}],
'tot_num_ions': 10}],
'spectrum': 'MASCOT',
'start_scan': 1366},
{'assumed_charge': 2,
'end_scan': 6862,
'index': 49,
'precursor_neutral_mass': 1404.7476,
'search_hit': [{'search_score': {
'bscore': 2.0,
'expect': 0.012,
'nextscore': 14.6,
'hyperscore': 23.5,
'yscore': 8.7},
'calc_neutral_pep_mass': 1404.7435,
'hit_rank': 1,
'is_rejected': False,
'massdiff': 0.004,
'modifications': [{'mass': 1.0079, 'position': 0},
{'mass': 147.0354, 'position': 10},
{'mass': 17.0031, 'position': 13}],
'modified_peptide': 'EVPLNTIIFM[147]GR',
'num_matched_ions': 8,
'num_missed_cleavages': 0,
'num_tot_proteins': 2,
'peptide': 'EVPLNTIIFMGR',
'proteins': [{'num_tol_term': 2,
'peptide_next_aa': 'V',
'peptide_prev_aa': 'R',
'protein': 'sp|P01008|ANT3_HUMAN',
'protein_descr': 'Antithrombin-III OS=Homo sapiens GN=SERPINC1 PE=1 SV=1'},
{'num_tol_term': 2, 'protein': 'tr|Q8TCE1|Q8TCE1_HUMAN',
'protein_descr': 'SERPINC1 protein OS=Homo sapiens GN=SERPINC1 PE=2 SV=1'}],
'tot_num_ions': 22}],
'spectrum': 'X!Tandem',
'start_scan': 6862},
{'assumed_charge': 3,
'end_scan': 23,
'index': 3,
'precursor_neutral_mass': 3254.044921875,
'search_hit': [{'calc_neutral_pep_mass': 3254.04711914062,
'search_score': {
'expect': 13690.946579388728,
'pvalue': 59.52585469299447},
'hit_rank': 1,
'is_rejected': False,
'massdiff': -0.002197265625,
'modifications': [{'mass': 166.99803, 'position': 6},
{'mass': 166.99803, 'position': 7},
{'mass': 166.99803, 'position': 9},
{'mass': 160.03019, 'position': 15},
{'mass': 160.03019, 'position': 21}],
'modified_peptide': 'DQQFDS[166]S[166]SS[166]MALEDCGEETNCQSDFK',
'num_matched_ions': 3,
'num_tot_proteins': 1,
'peptide': 'DQQFDSSSSMALEDCGEETNCQSDFK',
'proteins': [{'num_tol_term': 0,
'peptide_next_aa': 'I',
'peptide_prev_aa': 'R',
'protein': 'BL_ORD_ID:125453',
'protein_descr': 'sp|O43149|ZZEF1_HUMAN Zinc finger ZZ-type and EF-hand domain-containing protein 1 OS=Homo sapiens GN=ZZEF1 PE=1 SV=6:reversed'}],
'tot_num_ions': 50},
{'calc_neutral_pep_mass': 3254.04711914062,
'search_score': {'expect': 14837.682803311733,
'pvalue': 64.51166436222492},
'hit_rank': 2,
'is_rejected': False,
'massdiff': -0.002197265625,
'modifications': [{'mass': 243.02933, 'position': 6},
{'mass': 170.10596, 'position': 8},
{'mass': 181.01368, 'position': 11},
{'mass': 181.01368, 'position': 13},
{'mass': 181.01368, 'position': 18},
{'mass': 181.01368, 'position': 21},
{'mass': 160.03019, 'position': 1},
{'mass': 160.03019, 'position': 4}],
'modified_peptide': 'CENCNY[243]PK[170]EGT[181]HT[181]NQHET[181]LHT[181]SR',
'num_matched_ions': 6,
'num_tot_proteins': 2,
'peptide': 'CENCNYPKEGTHTNQHETLHTSR',
'proteins': [{'num_tol_term': 0,
'peptide_next_aa': 'S',
'peptide_prev_aa': 'R',
'protein': 'BL_ORD_ID:144314',
'protein_descr': 'tr|Q6ZND3|Q6ZND3_HUMAN Zinc finger protein 184 OS=Homo sapiens GN=ZNF184 PE=2 SV=1:reversed'},
{'protein': 'BL_ORD_ID:154629',
'protein_descr': 'sp|Q99676|ZN184_HUMAN Zinc finger protein 184 OS=Homo sapiens GN=ZNF184 PE=1 SV=4:reversed'}],
'tot_num_ions': 44}],
'spectrum': '"Cmpd 24, +MSn(1085.6886), 1.2 min.23.23.3"',
'start_scan': 23}]
mzid_spectra = {
(False, False): [{'id': 'SEQ_spec1',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=1'},
{'id': 'SEQ_spec2a',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=2'},
{'id': 'SEQ_spec3a',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=3'},
{'id': 'SEQ_spec10',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=10'},
{'id': 'SEQ_spec11a',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=11'},
{'id': 'SEQ_spec12',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=12'},
{'id': 'SEQ_spec13',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=13'},
{'id': 'SEQ_spec15',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=15'},
{'id': 'SEQ_spec20',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=20'},
{'id': 'Mas_spec2b',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=2'},
{'id': 'Mas_spec3b',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=3'},
{'id': 'Mas_spec4',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=4'},
{'id': 'Mas_spec6',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=6'},
{'id': 'Mas_spec11b',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=11'},
{'id': 'Mas_spec12',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=12'},
{'id': 'Mas_spec35',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=35'},
{'id': 'Mas_spec36b1',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=36'},
{'id': 'Mas_spec40',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=40'}],
(False, True): [{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=1'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=2'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=3'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=10'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=11'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=12'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=13'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=15'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=20'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=2'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=3'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=4'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=6'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=11'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=12'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=35'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=36'},
{'FileFormat': 'Proteinscape spectra',
'SpectrumIDFormat': 'spectrum from database nativeID format',
'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X',
'spectrumID': 'databasekey=40'}],
(True, False): [{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec1_pep1'}],
'ProteinScape:IntensityCoverage': 0.3919545603809718,
'ProteinScape:SequestMetaScore': 7.59488518903425,
'calculatedMassToCharge': 1507.695,
'chargeState': 1,
'experimentalMassToCharge': 1507.696,
'id': 'SEQ_spec1_pep1',
'passThreshold': True,
'peptide_ref': 'prot1_pep1',
'rank': 1}],
'id': 'SEQ_spec1',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=1'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec2a_pep1'}],
'ProteinScape:IntensityCoverage': 0.5070386909133888,
'ProteinScape:SequestMetaScore': 10.8810331335713,
'calculatedMassToCharge': 1920.9224,
'chargeState': 1,
'experimentalMassToCharge': 1920.923,
'id': 'SEQ_spec2a_pep1',
'passThreshold': True,
'peptide_ref': 'prot1_pep2',
'rank': 1}],
'id': 'SEQ_spec2a',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=2'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec3a_pep1'}],
'ProteinScape:IntensityCoverage': 0.43376827663349576,
'ProteinScape:SequestMetaScore': 6.1021771936508955,
'calculatedMassToCharge': 864.4752,
'chargeState': 1,
'experimentalMassToCharge': 864.474,
'id': 'SEQ_spec3a_pep1',
'passThreshold': True,
'peptide_ref': 'prot1_pep3',
'rank': 1}],
'id': 'SEQ_spec3a',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=3'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec10_pep1'}],
'ProteinScape:IntensityCoverage': 0.16164593872706742,
'ProteinScape:SequestMetaScore': 5.635013787097159,
'calculatedMassToCharge': 1832.862115,
'chargeState': 1,
'experimentalMassToCharge': 1832.863,
'id': 'SEQ_spec10_pep1',
'passThreshold': True,
'peptide_ref': 'prot1_pep4',
'rank': 1}],
'id': 'SEQ_spec10',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=10'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec11a_pep1'}],
'ProteinScape:IntensityCoverage': 0.6146634530945828,
'ProteinScape:SequestMetaScore': 10.17510605321669,
'calculatedMassToCharge': 911.4144,
'chargeState': 1,
'experimentalMassToCharge': 911.413,
'id': 'SEQ_spec11a_pep1',
'passThreshold': True,
'peptide_ref': 'prot2_pep1',
'rank': 1},
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec11a_pep2'}],
'ProteinScape:IntensityCoverage': 0.2517734933944088,
'ProteinScape:SequestMetaScore': 6.005532583410669,
'calculatedMassToCharge': 1365.722015,
'chargeState': 1,
'experimentalMassToCharge': 1365.721,
'id': 'SEQ_spec11a_pep2',
'passThreshold': True,
'peptide_ref': 'prot3_pep1',
'rank': 2}],
'id': 'SEQ_spec11a',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=11'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec12_pep1'}],
'ProteinScape:IntensityCoverage': 0.4884754815768041,
'ProteinScape:SequestMetaScore': 12.042955809241318,
'calculatedMassToCharge': 2255.9515,
'chargeState': 1,
'experimentalMassToCharge': 2255.95,
'id': 'SEQ_spec12_pep1',
'passThreshold': True,
'peptide_ref': 'prot3_pep2',
'rank': 1},
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec12_pep2'}],
'ProteinScape:IntensityCoverage': 0.554279316913958,
'ProteinScape:SequestMetaScore': 3.1184106313104283,
'calculatedMassToCharge': 3941.036315,
'chargeState': 1,
'experimentalMassToCharge': 3941.081,
'id': 'SEQ_spec12_pep2',
'passThreshold': True,
'peptide_ref': 'prot2_pep2',
'rank': 2}],
'id': 'SEQ_spec12',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=12'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec13_pep1'}],
'ProteinScape:IntensityCoverage': 0.39717937427768873,
'ProteinScape:SequestMetaScore': 4.159878401845841,
'calculatedMassToCharge': 911.4144,
'chargeState': 1,
'experimentalMassToCharge': 911.415,
'id': 'SEQ_spec13_pep1',
'passThreshold': True,
'peptide_ref': 'prot2_pep1',
'rank': 1},
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec13_pep2'}],
'ProteinScape:IntensityCoverage': 0.136423966822031,
'ProteinScape:SequestMetaScore': 5.725397508852668,
'calculatedMassToCharge': 2192.932715,
'chargeState': 1,
'experimentalMassToCharge': 2192.9,
'id': 'SEQ_spec13_pep2',
'passThreshold': True,
'peptide_ref': 'prot3_pep3',
'rank': 2}],
'id': 'SEQ_spec13',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=13'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec15_pep1'}],
'ProteinScape:IntensityCoverage': 0.2854129700126088,
'ProteinScape:SequestMetaScore': 6.181682868401155,
'calculatedMassToCharge': 1469.8071,
'chargeState': 1,
'experimentalMassToCharge': 1469.806,
'id': 'SEQ_spec15_pep1',
'passThreshold': True,
'peptide_ref': 'prot4_pep1',
'rank': 1}],
'id': 'SEQ_spec15',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=15'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec20_pep1'}],
'ProteinScape:IntensityCoverage': 0.29049959198538566,
'ProteinScape:SequestMetaScore': 6.669916225794168,
'calculatedMassToCharge': 1225.6059,
'chargeState': 1,
'experimentalMassToCharge': 1225.604,
'id': 'SEQ_spec20_pep1',
'passThreshold': True,
'peptide_ref': 'prot4_pep2',
'rank': 1}],
'id': 'SEQ_spec20',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=20'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec2b_pep1'}],
'calculatedMassToCharge': 2035.0745,
'chargeState': 1,
'experimentalMassToCharge': 2035.075,
'id': 'Mas_spec2b_pep1',
'passThreshold': True,
'peptide_ref': 'prot5_pep1',
'rank': 1}],
'id': 'Mas_spec2b',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=2'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec3b_pep1'}],
'calculatedMassToCharge': 1834.8856,
'chargeState': 1,
'experimentalMassToCharge': 1834.884,
'id': 'Mas_spec3b_pep1',
'passThreshold': True,
'peptide_ref': 'prot5_pep2',
'rank': 1}],
'id': 'Mas_spec3b',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=3'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec4_pep1'}],
'calculatedMassToCharge': 1097.5049,
'chargeState': 1,
'experimentalMassToCharge': 1097.503,
'id': 'Mas_spec4_pep1',
'passThreshold': True,
'peptide_ref': 'prot5_pep3',
'rank': 1}],
'id': 'Mas_spec4',
'spectraData_ref': 'LCMALDI_spectra',
'spectrumID': 'databasekey=4'},
{'SpectrumIdentificationItem': [
{'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec6_pep1'}],
'calculatedMassToCharge': 975.4457,
'chargeState': 1,
'experimentalMassToCharge': 975.446,
'id': 'Mas_spec6_pep1',
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/tests/test_unimod.py | tests/test_unimod.py | import unittest
from os import path
import pyteomics
pyteomics.__path__ = [path.abspath(path.join(path.dirname(__file__), path.pardir, 'pyteomics'))]
from pyteomics.mass import unimod
class UnimodTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.handle = unimod.Unimod()
def test_modifications_have_composition(self):
for modification in self.handle:
if modification.composition is None:
assert modification._composition is None
else:
assert modification.composition.mass() != 0
def test_find_modification_by_name(self):
for modification in ['Acetyl', 'Deamidated', 'GG', 'GlyGly']:
mod = self.handle.get(modification)
names = ({mod.full_name, mod.code_name, mod.ex_code_name} |
{alt for alt in mod.alternative_names})
assert modification in names
def test_composition_parser_sign(self):
amid = self.handle['Amidated']
deamid = self.handle['Deamidated']
assert amid.composition != deamid.composition
def test_queries(self):
mods = self.handle.session.query(
unimod.Modification).join(
unimod.Specificity).join(
unimod.Classification).filter(
unimod.Classification.classification == 'AA substitution').all()
for mod in mods:
assert '->' in mod.full_name
def test_unimod_get(self):
mod_by_id = self.handle[2]
mod_by_name = self.handle['Amidation']
mod_by_alt_name = self.handle['Top-Down sequencing c-type fragment ion']
mod_by_name_partial = self.handle.get('Amid', strict=False)
mod_by_alt_name_partial = self.handle.get('c-type fragment ion', strict=False)
self.assertEqual(mod_by_id, mod_by_name)
self.assertEqual(mod_by_id, mod_by_alt_name)
self.assertEqual(mod_by_id, mod_by_name_partial)
self.assertEqual(mod_by_id, mod_by_alt_name_partial)
def test_raise_on_nonmatching_id(self):
self.assertRaises(KeyError, self.handle.get, -1)
self.assertRaises(KeyError, self.handle.get, 'NotAModification')
def test_constructor(self):
import os
if os.path.exists('unimod.db'):
os.remove('unimod.db')
for name in [None, 'sqlite:///unimod.db']:
handle = unimod.Unimod(name)
self.assertEqual(self.handle[1], handle[1])
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/protxml.py | pyteomics/protxml.py | """
protxml - parsing of ProteinProphet output files
================================================
Summary
-------
**protXML** is the output format of the `ProteinProphet software <http://proteinprophet.sourceforge.net/>`_.
It contains information about identified proteins and their statistical significance.
This module provides minimalistic infrastructure for access to data stored in
protXML files. The central class is :py:class:`ProtXML`, which
reads protein entries and related information and saves them into
Python dicts.
Data access
-----------
:py:class:`ProtXML` - a class representing a single protXML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through peptide-spectrum matches in a protXML
file. Calling the function is synonymous to instantiating the :py:class:`ProtXML` class.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`DataFrame` - read protXML files into a :py:class:`pandas.DataFrame`.
Target-decoy approach
---------------------
:py:func:`filter` - filter protein groups from a chain of protXML files to a specific FDR
using TDA.
:py:func:`filter.chain` - chain a series of filters applied independently to
several files.
:py:func:`filter.chain.from_iterable` - chain a series of filters applied
independently to an iterable of files.
:py:func:`filter_df` - filter protXML files and return a :py:class:`pandas.DataFrame`.
:py:func:`fdr` - estimate the false discovery rate of a set of protein groups using the
target-decoy approach.
:py:func:`qvalues` - get an array of scores and *q* values for protein groups using the target-decoy approach.
:py:func:`is_decoy` - determine whether a protein group is decoy or not. This function may not suit your use case.
Dependencies
------------
This module requres :py:mod:`lxml`.
--------------------------------------------------------------------------------
"""
# Copyright 2018 Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import xml, auxiliary as aux, _schema_defaults
import operator as op
class ProtXML(xml.MultiProcessingXML):
"""Parser class for protXML files."""
file_format = 'protXML'
_root_element = 'protein_summary'
_default_schema = _schema_defaults._protxml_schema_defaults
# _default_version = None
_default_iter_tag = 'protein_group'
_indexed_tag_keys = {'protein_group': 'group_number'}
_default_id_attr = 'group_number'
_indexed_tags = {'protein_group'}
_structures_to_flatten = {'annotation'}
# attributes which contain unconverted values
_convert_items = {'float': {'pct_spectrum_ids'},
'int': {'group_number', 'prot_length'},
'bool': {'is_contributing_evidence', 'is_nondegenerate_evidence'}
}.items()
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
try:
name = kwargs.pop('ename')
except KeyError:
name = xml._local_name(element)
rec = kwargs.pop('recursive', None)
if name == self._root_element:
info = self._get_info(element, ename=name, recursive=(rec if rec is not None else False), **kwargs)
else:
info = self._get_info(element, ename=name, recursive=(rec if rec is not None else True), **kwargs)
converters = {'float': float, 'int': int, 'bool': lambda x: x.lower() in {'1', 'true', 'y'}}
for k, v in dict(info).items():
for t, s in self._convert_items:
if k in s:
del info[k]
info[k] = converters[t](v)
p = info.get('parameter')
if isinstance(p, list) and len(p) == 1 and isinstance(p[0], dict):
info.update(info.pop('parameter')[0])
if 'modification_info' in info:
# this is a list with one element
info.update(info.pop('modification_info')[0])
if 'unique_stripped_peptides' in info:
info['unique_stripped_peptides'] = info['unique_stripped_peptides'].split('+')
return info
def read(source, read_schema=False, iterative=True, **kwargs):
"""Parse `source` and iterate through protein groups.
Parameters
----------
source : str or file
A path to a target protXML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the protXML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
Returns
-------
out : ProtXML
An iterator over dicts with protein group properties.
"""
return ProtXML(source, read_schema=read_schema, iterative=iterative)
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(ProtXML)
def _is_decoy_prefix(pg, prefix='DECOY_'):
"""Determine if a protein group should be considered decoy.
This function checks that all protein names in a group start with `prefix`.
You may need to provide your own function for correct filtering and FDR estimation.
Parameters
----------
pg : dict
A protein group dict produced by the :py:class:`ProtXML` parser.
prefix : str, optional
A prefix used to mark decoy proteins. Default is `'DECOY_'`.
Returns
-------
out : bool
"""
return all(p['protein_name'].startswith(prefix) for p in pg['protein'])
def _is_decoy_suffix(pg, suffix='_DECOY'):
"""Determine if a protein group should be considered decoy.
This function checks that all protein names in a group end with `suffix`.
You may need to provide your own function for correct filtering and FDR estimation.
Parameters
----------
pg : dict
A protein group dict produced by the :py:class:`ProtXML` parser.
suffix : str, optional
A suffix used to mark decoy proteins. Default is `'_DECOY'`.
Returns
-------
out : bool
"""
return all(p['protein_name'].endswith(suffix) for p in pg['protein'])
is_decoy = _is_decoy_prefix
fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)
_key = op.itemgetter('probability')
qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)
filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)
filter.chain = aux._make_chain(filter, 'filter', True)
def DataFrame(*args, **kwargs):
"""Read protXML output files into a :py:class:`pandas.DataFrame`.
.. note :: Rows in the DataFrame correspond to individual proteins, not protein groups.
Requires :py:mod:`pandas`.
Parameters
----------
sep : str or None, keyword only, optional
Some values related to protein groups are variable-length lists.
If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
pd_kwargs : dict, optional
Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.
*args
Passed to :py:func:`chain`.
**kwargs
Passed to :py:func:`chain`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs = kwargs.copy()
sep = kwargs.pop('sep', None)
pd_kwargs = kwargs.pop('pd_kwargs', {})
def gen_items():
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
if 'protein' in item:
for prot in item['protein']:
out = dict(info)
out.update(prot)
if 'unique_stripped_peptides' in out:
if sep is not None:
out['unique_stripped_peptides'] = sep.join(out['unique_stripped_peptides'])
if 'indistinguishable_protein' in out:
if sep is None:
out['indistinguishable_protein'] = [p['protein_name'] for p in out['indistinguishable_protein']]
else:
out['indistinguishable_protein'] = sep.join(p['protein_name'] for p in out['indistinguishable_protein'])
if 'analysis_result' in out:
for ar in out['analysis_result']:
if ar['analysis'] == 'stpeter':
out.update(ar['StPeterQuant'])
yield out
return pd.DataFrame(gen_items(), **pd_kwargs)
def filter_df(*args, **kwargs):
"""Read protXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be protXML files or DataFrames.
.. note :: Rows in the DataFrame correspond to individual proteins, not protein groups.
Requires :py:mod:`pandas`.
Parameters
----------
key : str / iterable / callable, keyword only, optional
Default is 'probability'.
is_decoy : str / iterable / callable, keyword only, optional
Default is to check that "protein_name" starts with `'DECOY_'`.
reverse : bool, keyword only, optional
Should be :py:const:`True` if higher score is better.
Default is :py:const:`True` (because the default key is 'probability').
*args
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
**kwargs
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs.setdefault('key', 'probability')
kwargs.setdefault('reverse', True)
if all(isinstance(arg, pd.DataFrame) for arg in args):
if len(args) > 1:
df = pd.concat(args)
else:
df = args[0]
else:
read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}
df = DataFrame(*args, **read_kw)
if 'is_decoy' not in kwargs:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein_name'].str.endswith(kwargs['decoy_suffix'])
else:
kwargs['is_decoy'] = df['protein_name'].str.startswith(kwargs.get('decoy_prefix', 'DECOY_'))
return aux.filter(df, **kwargs)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/pylab_aux.py | pyteomics/pylab_aux.py | """
pylab_aux - auxiliary functions for plotting with pylab
=======================================================
This module serves as a collection of useful routines for data plotting with
matplotlib.
Generic plotting
----------------
:py:func:`plot_line` - plot a line.
:py:func:`scatter_trend` - plot a scatter plot with a regression line.
:py:func:`plot_function_3d` - plot a 3D graph of a function of two variables.
:py:func:`plot_function_contour` - plot a contour graph of a function of
two variables.
Spectrum visualization
----------------------
:py:func:`plot_spectrum` - plot a single spectrum (m/z vs intensity).
:py:func:`annotate_spectrum` - plot and annotate peaks in MS/MS spectrum.
:py:func:`mirror` - create a mirror plot of two spectra (using :py:mod:`spectrum_utils`).
FDR control
-----------
:py:func:`plot_qvalue_curve` - plot the dependence of q-value on the amount of PSMs
(similar to a ROC curve).
See also
--------
- `Matplotlib cookbook <http://www.scipy.org/Cookbook/Matplotlib/>`_
- `Matplotlib tutorial
<http://matplotlib.sourceforge.net/mpl_toolkits/mplot3d/tutorial.html>`_
Dependencies
------------
This module requires :py:mod:`matplotlib`. Optional dependencies: :py:mod:`adjustText`, :py:mod:`spectrum_utils`.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylab
import numpy as np
from .auxiliary import linear_regression, PyteomicsError
from .version import VersionInfo
from . import parser, mass, mgf, proforma
try:
import spectrum_utils
if VersionInfo(spectrum_utils.__version__) < VersionInfo('0.4'):
raise ImportError("Supported spectrum_utils version is 0.4.0 or newer.")
import spectrum_utils.spectrum as sus
import spectrum_utils.plot as sup
except ImportError:
sus = sup = None
def plot_line(a, b, xlim=None, *args, **kwargs):
"""Plot a line y = a * x + b.
Parameters
----------
a : float
The slope of the line.
b : float
The intercept of the line.
xlim : tuple, optional
Minimal and maximal values of `x`. If not given, :py:func:`pylab.xlim` will be called.
*args
Passed to :py:func:`pylab.plot` after `x` and `y` values.
**kwargs
Passed to :py:func:`pylab.plot`.
Returns
-------
out : matplotlib.lines.Line2D
The line object.
"""
if xlim is None:
xlim = pylab.xlim()
return pylab.plot([xlim[0], xlim[1]], [a * xlim[0] + b, a * xlim[1] + b], *args, **kwargs)
def scatter_trend(x, y=None, **kwargs):
"""Make a scatter plot with a linear regression.
Parameters
----------
x : array_like of float
1-D array of floats. If `y` is omitted, `x` must be a 2-D array of shape (N, 2).
y : array_like of float, optional
1-D arrays of floats. If `y` is omitted or :py:const:`None`, `x` must be a 2-D array of shape (N, 2).
plot_trend : bool, optional
If :py:const:`True` then plot a trendline (default).
plot_sigmas : bool, optional
If :py:const:`True` then plot confidence intervals of the linear fit.
:py:const:`False` by default.
show_legend : bool, optional
If :py:const:`True`, a legend will be shown with linear fit equation,
correlation coefficient, and standard deviation from the fit. Default is
:py:const:`True`.
title : str, optional
The title. Empty by default.
xlabel, ylabel : str, optional
The axes labels. Empty by default.
alpha_legend : float, optional
Legend box transparency. 1.0 by default
scatter_kwargs : dict, optional
Keyword arguments for :py:func:`pylab.scatter`.
Empty by default.
plot_kwargs : dict, optional
Keyword arguments for :py:func:`plot_line`.
By default, sets `xlim` and `label`.
legend_kwargs : dict, optional
Keyword arguments for :py:func:`pylab.legend`.
Default is :py:const:`{'loc': 'upper left'}`.
sigma_kwargs : dict, optional
Keyword arguments for :py:func:`pylab.plot` used for sigma lines.
Default is :py:const:`{'color': 'red', 'linestyle': 'dashed'}`.
sigma_values : iterable, optional
Each value will be multiplied with standard error of the fit, and the line
shifted by the resulting value will be plotted. Default is :py:const:`range(-3, 4)`.
regression : callable, optional
Function to perform linear regression. Will be given ``x`` and ``y`` as arguments.
Must return a 4-tuple: (a, b, r, stderr).
Default is :py:func:`pyteomics.auxiliary.linear_regression`.
Returns
-------
out : tuple
A (scatter_plot, trend_line, sigma_lines, legend) tuple.
"""
regression = kwargs.get('regression', linear_regression)
a, b, r, stderr = regression(x, y)
pylab.title(kwargs.get('title', ''))
pylab.xlabel(kwargs.get('xlabel', ''))
pylab.ylabel(kwargs.get('ylabel', ''))
equation = (
r'$y\,=\,{:.3f}x\,{}\,{:.3f}$, '
r'$R^2=\,{:.3f}$ \n$\sigma\,=\,{:.3f}$'.format(
a, '-' if b < 0 else '+', abs(b), r*r, stderr))
if y is None:
x = np.array(x, copy=False)
y = x[:, 1]
x = x[:, 0]
else:
x = np.array(x)
y = np.array(y)
sc = pylab.scatter(x, y, **kwargs.get('scatter_kwargs', {}))
xlim = (x.min(), x.max())
plkw = kwargs.get('plot_kwargs', {}).copy()
plkw.setdefault('xlim', xlim)
plkw.setdefault('label', equation)
if kwargs.get('plot_trend', True):
line = plot_line(a, b, **plkw)
else:
line = None
if kwargs.get('plot_sigmas', False):
s_lines = []
sigma_kwargs = kwargs.get('sigma_kwargs', {'color': 'red', 'linestyle': 'dashed'})
for i in kwargs.get('sigma_values', range(-3, 4)):
s_lines.append(plot_line(a, b + i * stderr, xlim, **sigma_kwargs))
else:
s_lines = None
if kwargs.get('show_legend', True):
legend = pylab.legend(**kwargs.get('legend_kwargs', {'loc': 'upper left'}))
legend_frame = legend.get_frame()
legend_frame.set_alpha(kwargs.get('alpha_legend', 1.0))
else:
legend = None
return sc, line, s_lines, legend
def plot_function_3d(x, y, function, **kwargs):
"""Plot values of a function of two variables in 3D.
More on 3D plotting in pylab:
http://www.scipy.org/Cookbook/Matplotlib/mplot3D
Parameters
----------
x : array_like of float
The plotting range on X axis.
y : array_like of float
The plotting range on Y axis.
function : function
The function to plot.
plot_type : {'surface', 'wireframe', 'scatter', 'contour', 'contourf'}, keyword only, optional
The type of a plot, see
`scipy cookbook <http://www.scipy.org/Cookbook/Matplotlib/mplot3D>`_
for examples. The default value is 'surface'.
num_contours : int
The number of contours to plot, 50 by default.
xlabel : str, keyword only, optional
The X axis label. Empty by default.
ylabel : str, keyword only, optional
The Y axis label. Empty by default.
zlabel : str, keyword only, optional
The Z axis label. Empty by default.
title : str, keyword only, optional
The title. Empty by default.
**kwargs
Passed to the respective plotting function.
"""
import mpl_toolkits.mplot3d.axes3d as pylab3d
ax = pylab3d.Axes3D(pylab.gcf())
ax.set_xlabel(kwargs.pop('xlabel', ''))
ax.set_ylabel(kwargs.pop('ylabel', ''))
ax.set_zlabel(kwargs.pop('zlabel', ''))
ax.set_title(kwargs.pop('title', ''))
X, Y = np.meshgrid(x, y)
Z = []
for y_value in y:
Z.append([])
for x_value in x:
Z[-1].append(function(x_value, y_value))
Z = np.array(Z)
plot_type = kwargs.pop('plot_type', 'surface')
if plot_type == 'surface':
ax.plot_surface(X, Y, Z,
rstride=kwargs.pop('rstride', 1),
cstride=kwargs.pop('cstride', 1),
cmap=kwargs.pop('cmap', pylab.cm.jet),
**kwargs)
elif plot_type == 'wireframe':
ax.plot_wireframe(X, Y, Z, cmap=kwargs.pop('cmap', pylab.cm.jet), **kwargs)
elif plot_type == 'scatter':
ax.scatter3D(np.ravel(X), np.ravel(Y), np.ravel(Z), **kwargs)
elif plot_type == 'contour':
num_contours = kwargs.pop('num_contours', 50)
ax.contour3D(X, Y, Z, num_contours, cmap=kwargs.pop('cmap', pylab.cm.jet), **kwargs)
elif plot_type == 'contourf':
num_contours = kwargs.pop('num_contours', 50)
ax.contourf3D(X, Y, Z, num_contours, cmap=kwargs.pop('cmap', pylab.cm.jet), **kwargs)
else:
raise PyteomicsError('Unknown plot type: {}'.format(plot_type))
def plot_function_contour(x, y, function, **kwargs):
"""Make a contour plot of a function of two variables.
Parameters
----------
x, y : array_like of float
The positions of the nodes of a plotting grid.
function : function
The function to plot.
filling : bool
Fill contours if True (default).
num_contours : int
The number of contours to plot, 50 by default.
xlabel, ylabel : str, optional
The axes labels. Empty by default.
title : str, optional
The title. Empty by default.
**kwargs
Passed to :py:func:`pylab.contour` or :py:func:`pylab.contourf`.
"""
pylab.xlabel(kwargs.pop('xlabel', ''))
pylab.ylabel(kwargs.pop('ylabel', ''))
pylab.title(kwargs.pop('title', ''))
X, Y = np.meshgrid(x, y)
Z = []
for y_value in y:
Z.append([])
for x_value in x:
Z[-1].append(function(x_value, y_value))
Z = np.array(Z)
num_contours = kwargs.pop('num_contours', 50)
if kwargs.pop('filling', True):
plot_func = pylab.contourf
else:
plot_func = pylab.contour
plot_func(X, Y, Z, num_contours, cmap=kwargs.pop('cmap', pylab.cm.jet), **kwargs)
def plot_qvalue_curve(qvalues, *args, **kwargs):
"""
Plot a curve with q-values on the X axis and corresponding PSM number
(starting with ``1``) on the Y axis.
Parameters
----------
qvalues : array-like
An array of q-values for sorted PSMs.
xlabel : str, keyword only, optional
Label for the X axis. Default is "q-value".
ylabel : str, keyword only, optional
Label for the Y axis. Default is "# of PSMs".
title : str, keyword only, optional
The title. Empty by default.
*args
Given to :py:func:`pylab.plot` after `x` and `y`.
**kwargs
Given to :py:func:`pylab.plot`.
Returns
-------
out : matplotlib.lines.Line2D
"""
pylab.xlabel(kwargs.pop('xlabel', 'q-value'))
pylab.ylabel(kwargs.pop('ylabel', '# of PSMs'))
pylab.title(kwargs.pop('title', ''))
return pylab.plot(qvalues, 1 + np.arange(qvalues.size), *args, **kwargs)
def _default_plot_spectrum(spectrum, *args, **kwargs):
ax = kwargs.pop('ax', None) or pylab.gca()
if kwargs.pop('centroided', True):
kwargs.setdefault('align', 'center')
kwargs.setdefault('width', 0)
kwargs.setdefault('linewidth', 1)
kwargs.setdefault('edgecolor', 'k')
ax.bar(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)
else:
ax.plot(spectrum['m/z array'], spectrum['intensity array'], *args, **kwargs)
return ax
def _spectrum_utils_plot(spectrum, *args, **kwargs):
with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):
spectrum = _spectrum_utils_create_spectrum(spectrum, None, *args, **kwargs)
return sup.spectrum(spectrum)
def _spectrum_utils_iplot(spectrum, *args, **kwargs):
import spectrum_utils.iplot as supi
with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):
spectrum = _spectrum_utils_create_spectrum(spectrum, None, *args, **kwargs)
return supi.spectrum(spectrum)
_plot_backends = {
'default': _default_plot_spectrum,
'spectrum_utils': _spectrum_utils_plot,
'spectrum_utils.iplot': _spectrum_utils_iplot,
}
def plot_spectrum(spectrum, *args, **kwargs):
"""
Plot a spectrum, assuming it is a dictionary containing "m/z array" and "intensity array".
Parameters
----------
spectrum : dict
A dictionary, as returned by pyteomics MS data parsers.
Must contain "m/z array" and "intensity array" keys with decoded arrays.
backend : str, keyword only, optional
One of `{'default', 'spectrum_utils', 'spectrum_utils.iplot'}`.
The `spectrum_utils` backend requires installing :py:mod:`spectrum_utils`.
The `spectrum_utils.iplot` backend requires installing :py:mod:`spectrum_utils[iplot]`.
xlabel : str, keyword only, optional
Label for the X axis. Default is "m/z".
ylabel : str, keyword only, optional
Label for the Y axis. Default is "intensity".
title : str, keyword only, optional
The title. Empty by default.
centroided : bool, keyword only, optional
Works only for the `default` backend.
If :py:const:`True` (default), peaks of the spectrum are plotted using :py:func:`pylab.bar`.
If :py:const:`False`, the arrays are simply plotted using :py:func:`pylab.plot`.
*args
When using `default` backend: given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).
**kwargs
When using `default` backend: given to :py:func:`pylab.plot` or :py:func:`pylab.bar` (depending on `centroided`).
min_intensity : float, keyword only, optional
Remove low-intensity peaks; this is a factor of maximum peak intensity. Default is 0 (no filtering).
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
max_num_peaks : int or None, keyword only, optional
Remove low-intensity peaks; this is the number of peaks to keep. Default is :py:const:`None` (no filtering).
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
scaling : one of `{'root', 'log', 'rank'}` or None, keyword only, optional
Scaling to apply to peak intensities. Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
max_intensity : float or None, keyword only, optional
Intensity of the most intense peak relative to which the peaks will be scaled
(the default is :py:const:`None`, which means that no scaling
relative to the most intense peak will be performed).
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
Returns
-------
out : matplotlib.pyplot.Axes
"""
bname = kwargs.pop('backend', 'default')
backend = _plot_backends.get(bname)
if backend is None:
raise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(
bname, '; '.join(_plot_backends)))
pylab.xlabel(kwargs.pop('xlabel', 'm/z'))
pylab.ylabel(kwargs.pop('ylabel', 'intensity'))
if 'title' in kwargs:
pylab.title(kwargs.pop('title'))
return backend(spectrum, *args, **kwargs)
def _default_annotate_spectrum(spectrum, peptide, *args, **kwargs):
# common kwargs
types = kwargs.pop('ion_types', ('b', 'y'))
aa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)
mass_data = kwargs.pop('mass_data', mass.nist_mass)
ion_comp = kwargs.pop('ion_comp', mass.std_ion_comp)
colors = {
'a': '#388E3C',
'b': '#1976D2',
'c': '#00796B',
'x': '#7B1FA2',
'y': '#D32F2F',
'z': '#F57C00',
}
colors.update(kwargs.pop('colors', {}))
ftol = kwargs.pop('ftol', None)
if ftol is None:
rtol = kwargs.pop('rtol', 1e-5)
text_kw = kwargs.pop('text_kw', dict(ha='center', clip_on=True, backgroundcolor='#ffffff99'))
precursor_charge = kwargs.pop('precursor_charge', None)
if precursor_charge is None:
precursor_charge = _get_precursor_charge(spectrum)
if precursor_charge is None:
raise PyteomicsError('Could not extract precursor charge from spectrum. Please specify `precursor_charge` kwarg.')
maxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))
ax = kwargs.get('ax', None)
# end of common kwargs
# backend-specific kwargs
centroided = kwargs.pop('centroided', True)
adjust = kwargs.pop('adjust_text', None)
if adjust or adjust is None:
try:
from adjustText import adjust_text
adjust_kw = kwargs.pop('adjust_kw', dict(
only_move={'text': 'y', 'points': 'y', 'objects': 'y'}, autoalign=False, force_text=(1, 1)))
except ImportError:
if adjust:
raise PyteomicsError('Install adjustText for text adjustment')
adjust = False
else:
if adjust is None:
adjust = True
# end of backend-specific kwargs
# Generate fragment m/z and name series
series = mass.fragment_series(peptide, ion_types=types, maxcharge=maxcharge,
aa_mass=aa_mass, mass_data=mass_data, ion_comp=ion_comp)
maxpeak = spectrum['intensity array'].max()
texts = []
for ion in types:
names, mz = zip(*series[ion].items())
c = colors.get(ion, colors.get(ion[0], 'blue'))
matrix = np.abs(spectrum['m/z array'] - np.array(mz).reshape(-1, 1))
if ftol is not None:
match = np.where(matrix < ftol)
else:
match = np.where(matrix / spectrum['m/z array'] < rtol)
pseudo_spec = {'m/z array': spectrum['m/z array'][match[1]], 'intensity array': spectrum['intensity array'][match[1]]}
plot_spectrum(pseudo_spec, centroided=True, edgecolor=c, ax=ax)
for j, i in zip(*match):
x = spectrum['m/z array'][i]
y = spectrum['intensity array'][i] + maxpeak * 0.02
name = names[j]
texts.append(pylab.text(x, y, name, color=c, **text_kw))
if adjust:
adjust_text(texts, **adjust_kw)
kwargs.setdefault('zorder', -1)
return plot_spectrum(spectrum, *args, centroided=centroided, **kwargs)
def _get_precursor_charge(spectrum):
try:
return mgf.MGFBase.parse_precursor_charge(spectrum['params']['charge'], list_only=True)[0]
except (PyteomicsError, KeyError):
pass
try:
return int(spectrum['precursorList']['precursor'][0]['selectedIonList']['selectedIon'][0]['charge state'])
except KeyError:
pass
return None
def _get_precursor_mz(spectrum):
try:
return spectrum['params']['pepmass'][0]
except KeyError:
pass
try:
return spectrum['precursorList']['precursor'][0]['selectedIonList']['selectedIon'][0]['selected ion m/z']
except KeyError:
pass
if 'attributes' in spectrum:
for attr in spectrum['attributes']:
if attr in {"MS:1000827", "MS:1000744", "MS:1002234"}:
return spectrum['attributes'][attr]
return None
def _spectrum_utils_create_spectrum(spectrum, *args, **kwargs):
if sus is None:
raise PyteomicsError('This backend requires `spectrum_utils>=0.4`.')
# backend-specific parameters
mz_range = kwargs.pop('mz_range', None)
min_intensity = kwargs.pop('min_intensity', 0.0)
max_num_peaks = kwargs.pop('max_num_peaks', None)
scaling = kwargs.pop('scaling', None)
max_intensity = kwargs.pop('max_intensity', None)
spectrum = sus.MsmsSpectrum(
'None', kwargs.pop('precursor_mz', None), kwargs.pop('precursor_charge', None),
spectrum['m/z array'], spectrum['intensity array'])
if mz_range:
spectrum = spectrum.set_mz_range(*mz_range)
spectrum = spectrum.filter_intensity(
min_intensity=min_intensity, max_num_peaks=max_num_peaks
).scale_intensity(scaling, max_intensity)
return spectrum
def _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs):
# common kwargs
aa_mass = kwargs.pop('aa_mass', mass.std_aa_mass)
types = kwargs.pop('ion_types', ('b', 'y'))
tol = kwargs.pop('ftol', None)
if tol is None:
tol = kwargs.pop('rtol', 1e-5) * 1e6
tol_mode = 'ppm'
else:
tol_mode = 'Da'
# kwargs.pop('text_kw', None) # not used
precursor_charge = kwargs.pop('precursor_charge', None)
if precursor_charge is None:
precursor_charge = _get_precursor_charge(spectrum)
if precursor_charge is None:
raise PyteomicsError(
'Could not extract precursor charge from spectrum. '
'Please specify `precursor_charge` keyword argument.')
maxcharge = kwargs.pop('maxcharge', max(1, precursor_charge - 1))
# end of common kwargs
# backend-specific parameters
remove_precursor_peak = kwargs.pop('remove_precursor_peak', False)
# peptide can be modX or proforma. spectrum_utils supports proforma only
aa_comp = kwargs.get('aa_comp')
mod_names = kwargs.get('mod_names')
prefix = kwargs.get('prefix')
try:
parsed_proforma = proforma.ProForma.parse(peptide, case_sensitive_aa=True)
peptide_pro = peptide
except Exception:
parsed_proforma = None
try:
peptide_pro = parser.to_proforma(peptide, aa_mass=aa_mass, aa_comp=aa_comp, mod_names=mod_names, prefix=prefix)
except Exception:
raise PyteomicsError("Cannot parse {} as ProForma or convert from modX".format(peptide))
precursor_mz = kwargs.pop('precursor_mz', None)
if precursor_mz is None:
precursor_mz = _get_precursor_mz(spectrum)
if precursor_mz is None:
try:
if aa_comp:
precursor_mz = mass.calculate_mass(peptide, aa_comp=aa_comp, charge=precursor_charge)
elif not parsed_proforma:
precursor_mz = mass.fast_mass2(peptide, aa_mass=aa_mass, charge=precursor_charge)
else:
precursor_mz = mass.mass_charge_ratio(parsed_proforma.mass, precursor_charge)
except PyteomicsError:
raise PyteomicsError('Cannot obtain precursor m/z, please specify `precursor_mz` argument.')
spectrum = _spectrum_utils_create_spectrum(spectrum, *args,
precursor_mz=precursor_mz, precursor_charge=precursor_charge, **kwargs)
if remove_precursor_peak:
spectrum = spectrum.remove_precursor_peak(tol, tol_mode)
spectrum = spectrum.annotate_proforma(peptide_pro, tol, tol_mode, types, max_ion_charge=maxcharge)
return spectrum
class SpectrumUtilsColorScheme:
"""Context manager that temporarily changes `spectrum_utils.plot.colors`."""
def __init__(self, colors):
self.colors = colors
self.previous_colors = sup.colors.copy()
def __enter__(self):
if self.colors:
sup.colors.update(self.colors)
def __exit__(self, *args, **kwargs):
sup.colors = self.previous_colors
def _spectrum_utils_annotate_plot(spectrum, peptide, *args, **kwargs):
with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):
spectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)
return spectrum, sup.spectrum(
spectrum, annot_kws=kwargs.pop('text_kw', None), ax=kwargs.pop('ax', None),
annot_fmt=kwargs.pop('annot_fmt', str), grid=kwargs.pop('grid', True))
def _spectrum_utils_annotate_iplot(spectrum, peptide, *args, **kwargs):
import spectrum_utils.iplot as supi
with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):
spectrum = _spectrum_utils_annotate_spectrum(spectrum, peptide, *args, **kwargs)
return spectrum, supi.spectrum(
spectrum, annot_kws=kwargs.pop('text_kw', None),
annot_fmt=kwargs.pop('annot_fmt', str), grid=kwargs.pop('grid', True))
_annotation_backends = {
'default': _default_annotate_spectrum,
'spectrum_utils': _spectrum_utils_annotate_plot,
'spectrum_utils.iplot': _spectrum_utils_annotate_iplot,
}
def annotate_spectrum(spectrum, peptide, *args, **kwargs):
"""Plot a spectrum and annotate matching fragment peaks.
Parameters
----------
spectrum : dict
A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.
peptide : str
A modX or ProForma sequence.
backend : str, keyword only, optional
One of `{'default', 'spectrum_utils', 'spectrum_utils.iplot'}`.
The `spectrum_utils` backend requires installing :py:mod:`spectrum_utils`.
The `spectrum_utils.iplot` backend requires installing :py:mod:`spectrum_utils[iplot]`.
ion_types : Container, keyword only, optional
Ion types to be considered for annotation. Default is `('b', 'y')`.
precursor_charge : int, keyword only, optional
If not specified, an attempt is made to extract it from `spectrum`.
maxcharge : int, keyword only, optional
Maximum charge state for fragment ions to be considered. Default is `precursor_charge - 1`.
colors : dict, keyword only, optional
Keys are ion types, values are colors to plot the annotated peaks with. Default depends on backend.
ftol : float, keyword only, optional
A fixed m/z tolerance value for peak matching. Alternative to `rtol`.
rtol : float, keyword only, optional
A relative m/z error for peak matching. Default is 10 ppm.
aa_mass : dict, keyword only, optional
A dictionary of amino acid residue masses.
text_kw : dict, keyword only, optional
Keyword arguments for :py:func:`pylab.text`.
xlabel : str, keyword only, optional
Label for the X axis. Default is "m/z". Does not work with `spectrum_utils.iplot` backend.
ylabel : str, keyword only, optional
Label for the Y axis. Default is "intensity". Does not work with `spectrum_utils.iplot` backend.
title : str, keyword only, optional
The title. Empty by default. Does not work with `spectrum_utils.iplot` backend.
ax : matplotlib.pyplot.Axes, keyword only, optional
Axes to draw the spectrum. Does not work with `spectrum_utils.iplot` backend.
*args
Passed to the plotting backend.
**kwargs
Passed to the plotting backend.
centroided : bool, keyword only, optional
Passed to :py:func:`plot_spectrum`. Only works with `default` backend.
ion_comp : dict, keyword only, optional
A dictionary defining ion compositions to override :py:const:`pyteomics.mass.std_ion_comp`.
Only works with `default` backend.
mass_data : dict, keyword only, optional
A dictionary of element masses to override :py:const:`pyteomics.mass.nist_mass`.
Only works with `default` backend.
adjust_text : bool, keyword only, optional
Adjust the overlapping text annotations using :py:mod:`adjustText`. Only works with `default` backend.
adjust_kw : dict, keyword only, optional
Keyword arguments for :py:func:`adjust_text`. Only works with `default` backend.
remove_precursor_peak : bool, keyword only, optional
Remove precursor peak from spectrum before annotation. Default is :py:const:`False`.
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
min_intensity : float, keyword only, optional
Remove low-intensity peaks; this is a factor of maximum peak intensity. Default is 0 (no filtering).
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
max_num_peaks : int or None, keyword only, optional
Remove low-intensity peaks; this is the number of peaks to keep. Default is :py:const:`None` (no filtering).
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
scaling : one of `{'root', 'log', 'rank'}` or None, keyword only, optional
Scaling to apply to peak intensities. Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
max_intensity : float or None, keyword only, optional
Intensity of the most intense peak relative to which the peaks will be scaled
(the default is :py:const:`None`, which means that no scaling
relative to the most intense peak will be performed).
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
annot_fmt : callable, keyword-only, optional
Passed to :py:func:`spectrum_utils.plot.spectrum`.
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
grid : bool, keyword-only, optional
Passed to :py:func:`spectrum_utils.plot.spectrum`. Default is :py:const:`True`.
Only works with `spectrum_utils` and `spectrum_utils.iplot` backends.
aa_comp : dict, keyword only, optional
Amino acid compositions, including modified ones. If given, will be used for conversion from *modX* to ProForma.
mod_names : dict or callable, keyword only, optional
If given, will be used for conversion from *modX* to ProForma.
prefix : str, keyword only, optional
If given, will be used for conversion from *modX* to ProForma.
Returns
-------
out : matplotlib.pyplot.Axes
"""
bname = kwargs.pop('backend', 'default')
backend = _annotation_backends.get(bname)
if backend is None:
raise PyteomicsError('Unknown backend name: {}. Should be one of: {}.'.format(
bname, '; '.join(_annotation_backends)))
pylab.xlabel(kwargs.pop('xlabel', 'm/z'))
pylab.ylabel(kwargs.pop('ylabel', 'intensity'))
pylab.title(kwargs.pop('title', ''))
return backend(spectrum, peptide, *args, **kwargs)
def _spectrum_utils_mirror(spec_top, spec_bottom, spectrum_kws=None, ax=None, **kwargs):
with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):
ax = sup.mirror(spec_top, spec_bottom, spectrum_kws=spectrum_kws, ax=ax)
ax.set_xlabel(kwargs.pop('xlabel', 'm/z'))
ax.set_ylabel(kwargs.pop('ylabel', 'intensity'))
ax.set_title(kwargs.pop('title', ''))
return ax
def _spectrum_utils_iplot_mirror(spec_top, spec_bottom, spectrum_kws=None, **kwargs):
import spectrum_utils.iplot as supi
with SpectrumUtilsColorScheme(kwargs.pop('colors', None)):
return supi.mirror(spec_top, spec_bottom, spectrum_kws=spectrum_kws)
_mirror_backends = {
'spectrum_utils': _spectrum_utils_mirror,
'spectrum_utils.iplot': _spectrum_utils_iplot_mirror,
}
def mirror(spec_top, spec_bottom, peptide=None, spectrum_kws=None, ax=None, **kwargs):
"""Create a mirror plot of two (possible annotated) spectra using `spectrum_utils`.
Parameters
----------
spec_top : dict
A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.
spec_bottom : dict
A spectrum as returned by Pyteomics parsers. Needs to have 'm/z array' and 'intensity array' keys.
peptide : str or None, optional
A modX sequence or ProForma. If provided, the peaks will be annotated as peptide fragments.
spectrum_kws : dict or None, optional
Passed to :py:func:`spectrum_utils.plot.mirror`.
backend : str, keyword only, optional
One of {'spectrum_utils', 'spectrum_utils.iplot'}. Default is 'spectrum_utils'.
.. note ::
Requires :py:mod:`spectrum_utils` or :py:mod:`spectrun_utils[iplot]`, respectively.
ax : matplotlib.pyplot.Axes or None, optional
Passed to :py:func:`spectrum_utils.plot.mirror`. Works only for the 'spectrum_utils' backend.
xlabel : str, keyword only, optional
Label for the X axis. Default is "m/z". Works only for the 'spectrum_utils' backend.
ylabel : str, keyword only, optional
Label for the Y axis. Default is "intensity". Works only for the 'spectrum_utils' backend.
title : str, keyword only, optional
The title. Empty by default. Works only for the 'spectrum_utils' backend.
**kwargs : same as for :py:func:`annotate_spectrum` for `spectrum_utils` backends.
Returns
-------
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/fasta.py | pyteomics/fasta.py | """
fasta - manipulations with FASTA databases
==========================================
FASTA is a simple file format for protein sequence databases. Please refer to
`the NCBI website <http://www.ncbi.nlm.nih.gov/blast/fasta.shtml>`_
for the most detailed information on the format.
Data manipulation
-----------------
Classes
.......
Several classes of FASTA parsers are available. All of them have common features:
- context manager support;
- header parsing;
- direct iteration.
Available classes:
:py:class:`FASTABase` - common ancestor, suitable for type checking.
Abstract class.
:py:class:`FASTA` - text-mode, sequential parser.
Good for iteration over database entries.
:py:class:`IndexedFASTA` - binary-mode, indexing parser.
Supports direct indexing by header string.
:py:class:`TwoLayerIndexedFASTA` - additionally supports
indexing by extracted header fields.
:py:class:`UniProt` and :py:class:`IndexedUniProt`,
:py:class:`UniParc` and :py:class:`IndexedUniParc`,
:py:class:`UniMes` and :py:class:`IndexedUniMes`,
:py:class:`UniRef` and :py:class:`IndexedUniRef`,
:py:class:`SPD` and :py:class:`IndexedSPD`,
:py:class:`NCBI` and :py:class:`IndexedNCBI`,
:py:class:`RefSeq` and :py:class:`IndexedRefSeq`, - format-specific parsers.
Functions
.........
:py:func:`read` - returns an instance of the appropriate reader class,
for sequential iteration or random access.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`write` - write entries to a FASTA database.
:py:func:`parse` - parse a FASTA header.
Decoy sequence generation
-------------------------
:py:func:`decoy_sequence` - generate a decoy sequence from a given sequence, using
one of the other functions listed in this section or any other callable.
:py:func:`reverse` - generate a reversed decoy sequence.
:py:func:`shuffle` - generate a shuffled decoy sequence.
:py:func:`fused_decoy` - generate a "fused" decoy sequence.
Decoy database generation
-------------------------
:py:func:`write_decoy_db` - generate a decoy database and write it to a file.
:py:func:`decoy_db` - generate entries for a decoy database from a given FASTA
database.
:py:func:`decoy_entries` - generate decoy entries for an iterator.
:py:func:`decoy_chain` - a version of :py:func:`decoy_db` for multiple files.
:py:func:`decoy_chain.from_iterable` - like :py:func:`decoy_chain`, but with
an iterable of files.
Auxiliary
---------
:py:data:`std_parsers` - a dictionary with parsers for known FASTA header
formats.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
from collections import namedtuple
import re
import abc
from . import auxiliary as aux
from .auxiliary.utils import add_metaclass
Protein = namedtuple('Protein', ('description', 'sequence'))
DECOY_PREFIX = 'DECOY_'
RAW_HEADER_KEY = '__raw__'
def _add_raw_field(parser):
"""
Add :py:const:`RAW_HEADER_KEY` field to the parsed dictinary.
Parameters
----------
parser : func
parser function.
Returns
-------
None.
"""
def _new_parser(instance, descr):
parsed = parser(instance, descr)
if RAW_HEADER_KEY not in parsed:
parsed[RAW_HEADER_KEY] = descr
elif parsed[RAW_HEADER_KEY] != descr:
raise aux.PyteomicsError('Cannot save raw protein header, since the corresponsing'
'key ({}) already exists.'.format(RAW_HEADER_KEY))
return parsed
return _new_parser
class FASTABase(object):
"""Abstract base class for FASTA file parsers.
Can be used for type checking.
"""
parser = None
_ignore_comments = False
_comments = set('>;')
def __init__(self, source, **kwargs):
self._ignore_comments = kwargs.pop('ignore_comments', False)
parser = kwargs.pop('parser', None)
if parser is not None:
self.parser = parser
super(FASTABase, self).__init__(source, **kwargs)
def _is_comment(self, line):
return line[0] in self._comments
def get_entry(self, key):
raise NotImplementedError
class FASTA(FASTABase, aux.FileReader):
"""Text-mode, sequential FASTA parser.
Suitable for iteration over the file to obtain all entries in order.
"""
def __init__(self, source, ignore_comments=False, parser=None, encoding=None):
"""Create a new FASTA parser object. Supports iteration,
yields `(description, sequence)` tuples. Supports `with` syntax.
Parameters
----------
source : str or file-like
File to read. If file object, it must be opened in *text* mode.
ignore_comments : bool, optional
If :py:const:`True` then ignore the second and subsequent lines of description.
Default is :py:const:`False`, which concatenates multi-line descriptions into
a single string.
parser : function or None, optional
Defines whether the FASTA descriptions should be parsed. If it is a
function, that function will be given the description string, and
the returned value will be yielded together with the sequence.
The :py:data:`std_parsers` dict has parsers for several formats.
Hint: specify :py:func:`parse` as the parser to apply automatic
format recognition.
Default is :py:const:`None`, which means return the header "as is".
encoding : str or None, optional
File encoding (if it is given by name).
"""
super(FASTA, self).__init__(
source, mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={},
encoding=encoding, ignore_comments=ignore_comments, parser=parser)
def _read(self):
accumulated_strings = []
# Iterate through '>' after the file is over to retrieve the last entry.
for string in itertools.chain(self._source, '>'):
stripped_string = string.strip()
# Skip empty lines.
if not stripped_string:
continue
is_comment = self._is_comment(stripped_string)
if is_comment:
# If it is a continuing comment
if len(accumulated_strings) == 1:
if not self._ignore_comments:
accumulated_strings[0] += (' ' + stripped_string[1:])
else:
continue
elif accumulated_strings:
description = accumulated_strings[0]
sequence = ''.join(accumulated_strings[1:])
# Drop the translation stop sign.
if sequence and sequence[-1] == '*':
sequence = sequence[:-1]
if self.parser is not None:
description = self.parser(description)
yield Protein(description, sequence)
accumulated_strings = [stripped_string[1:]]
else:
# accumulated_strings is empty; we're probably reading
# the very first line of the file
accumulated_strings.append(stripped_string[1:])
else:
accumulated_strings.append(stripped_string)
def get_entry(self, key):
raise aux.PyteomicsError('Direct indexing is not supported. '
'Use IndexedFASTA and its subclasses')
def _reconstruct(cls, args, kwargs):
kwargs['_skip_index'] = True
return cls(*args, **kwargs)
class IndexedFASTA(FASTABase, aux.TaskMappingMixin, aux.IndexedTextReader):
"""Indexed FASTA parser. Supports direct indexing by matched labels."""
delimiter = '\n>'
label = r'^[\n]?>(.*)\s*'
def __init__(self, source, ignore_comments=False, parser=None, **kwargs):
"""Create an indexed FASTA parser object.
Parameters
----------
source : str or file-like
File to read. If file object, it must be opened in *binary* mode.
ignore_comments : bool, optional
If :py:const:`True` then ignore the second and subsequent lines of description.
Default is :py:const:`False`, which concatenates multi-line descriptions into
a single string.
parser : function or None, optional
Defines whether the FASTA descriptions should be parsed. If it is a
function, that function will be given the description string, and
the returned value will be yielded together with the sequence.
The :py:data:`std_parsers` dict has parsers for several formats.
Hint: specify :py:func:`parse` as the parser to apply automatic
format recognition.
Default is :py:const:`None`, which means return the header "as is".
encoding : str or None, optional, keyword only
File encoding. Default is UTF-8.
block_size : int or None, optional, keyword only
Number of bytes to consume at once.
delimiter : str or None, optional, keyword only
Overrides the FASTA record delimiter (default is ``'\\n>'``).
label : str or None, optional, keyword only
Overrides the FASTA record label pattern. Default is ``'^[\\n]?>(.*)'``.
label_group : int or str, optional, keyword only
Overrides the matched group used as key in the byte offset index.
This in combination with `label` can be used to extract fields from headers.
However, consider using :py:class:`TwoLayerIndexedFASTA` for this purpose.
"""
super(IndexedFASTA, self).__init__(
source, ignore_comments=ignore_comments, parser=parser,
parser_func=self._read, pass_file=False, args=(), kwargs={}, **kwargs)
self._init_args = (source, ignore_comments, parser)
self._init_kwargs = kwargs
def __reduce_ex__(self, protocol):
return (_reconstruct,
(self.__class__, self._init_args, self._init_kwargs),
self.__getstate__())
def _read_protein_lines(self, lines):
description = []
sequence = []
for string in lines:
stripped_string = string.strip()
if not stripped_string:
continue
is_comment = self._is_comment(stripped_string)
if is_comment:
if not description or not self._ignore_comments:
description.append(stripped_string[1:])
else:
sequence.append(stripped_string)
description = ' '.join(description)
sequence = ''.join(sequence)
# Drop the translation stop sign.
if sequence and sequence[-1] == '*':
sequence = sequence[:-1]
if self.parser is not None:
description = self.parser(description)
return Protein(description, sequence)
def _item_from_offsets(self, offsets):
start, end = offsets
lines = self._read_lines_from_offsets(start, end)
return self._read_protein_lines(lines)
def _read(self, **kwargs):
for key, offsets in self._offset_index.items():
yield self._item_from_offsets(offsets)
def get_entry(self, key):
return self.get_by_id(key)
class TwoLayerIndexedFASTA(IndexedFASTA):
"""Parser with two-layer index. Extracted groups are mapped to full headers (where possible),
full headers are mapped to byte offsets.
When indexed, the key is looked up in both indexes, allowing access by meaningful IDs
(like UniProt accession) and by full header string.
"""
header_group = 1
header_pattern = None
def __init__(self, source, header_pattern=None, header_group=None,
ignore_comments=False, parser=None, **kwargs):
"""Open `source` and create a two-layer index for convenient random access
both by full header strings and extracted fields.
Parameters
----------
source : str or file-like
File to read. If file object, it must be opened in *binary* mode.
header_pattern : str or RE or None, optional
Pattern to match the header string. Must capture the group used
for the second index. If :py:const:`None` (default), second-level index is not created.
header_group : int or str or None, optional
Defines which group is used as key in the second-level index.
Default is 1.
ignore_comments : bool, optional
If :py:const:`True` then ignore the second and subsequent lines of description.
Default is :py:const:`False`, which concatenates multi-line descriptions into
a single string.
parser : function or None, optional
Defines whether the FASTA descriptions should be parsed. If it is a
function, that function will be given the description string, and
the returned value will be yielded together with the sequence.
The :py:data:`std_parsers` dict has parsers for several formats.
Hint: specify :py:func:`parse` as the parser to apply automatic
format recognition.
Default is :py:const:`None`, which means return the header "as is".
Other arguments : the same as for :py:class:`IndexedFASTA`.
"""
super(TwoLayerIndexedFASTA, self).__init__(source, ignore_comments, parser, **kwargs)
if header_group is not None:
self.header_group = header_group
if header_pattern is not None:
self.header_pattern = header_pattern
if not kwargs.get('_skip_index', False):
self.build_second_index()
self._init_args = (source, header_pattern, header_group, ignore_comments, parser)
self._init_kwargs = kwargs
def build_second_index(self):
"""Create the mapping from extracted field to whole header string."""
if self.header_pattern is None:
self._id2header = None
else:
index = {}
for key in self._offset_index:
match = re.match(self.header_pattern, key)
if match:
index[match.group(self.header_group)] = key
self._id2header = index
def __getstate__(self):
state = super(TwoLayerIndexedFASTA, self).__getstate__()
state['id2header'] = self._id2header
return state
def __setstate__(self, state):
super(TwoLayerIndexedFASTA, self).__setstate__(state)
self._id2header = state['id2header']
def get_by_id(self, key):
"""Get the entry by value of header string or extracted field."""
try:
return super(TwoLayerIndexedFASTA, self).get_by_id(key)
except KeyError:
if self._id2header:
header = self._id2header.get(key)
if header is not None:
return super(TwoLayerIndexedFASTA, self).get_entry(header)
raise KeyError(key)
def get_header(self, key):
if key in self._id2header:
return self._id2header[key]
raise KeyError(key)
def __contains__(self, key):
return super(TwoLayerIndexedFASTA, self).__contains__(key) or key in self._id2header
class _FastaParserFlavorMeta(abc.ABCMeta):
def __new__(mcs, name, bases, namespace):
if "parser" in namespace:
namespace["parser"] = _add_raw_field(namespace["parser"])
if name != 'FlavoredMixin':
reader_type = None
for t in (FASTA, IndexedFASTA, TwoLayerIndexedFASTA):
if t in bases:
reader_type = t
if reader_type is not None:
# this is a "concrete" reader class
# add a unified __init__ method for it
for c in bases:
if issubclass(c, FlavoredMixin):
flavor = c
break
else:
raise aux.PyteomicsError('Could not detect flavor of {}, not a subclass of `FlavoredMixin`.')
def __init__(self, source, parse=True, **kwargs):
reader_type.__init__(self, source, **kwargs)
flavor.__init__(self, parse)
self._init_args = (source, parse)
self._init_kwargs = kwargs
flavor_name = name[:-5]
type_name = "Text-mode" if reader_type is FASTA else "Indexed"
__init__.__doc__ = """Creates a :py:class:`{}` object.
Parameters
----------
source : str or file
The file to read. If a file object, it needs to be in *{}* mode.
parse : bool, optional
Defines whether the descriptions should be parsed in the produced tuples.
Default is :py:const:`True`.
kwargs : passed to the :py:class:`{}` constructor.
""".format(name, 'text' if reader_type is FASTA else 'binary', reader_type.__name__)
namespace['__init__'] = __init__
namespace['__doc__'] = """{} parser for {} FASTA files.""".format(type_name, flavor_name)
return super(_FastaParserFlavorMeta, mcs).__new__(mcs, name, bases, namespace)
@add_metaclass(_FastaParserFlavorMeta)
class FlavoredMixin():
"""Parser aimed at a specific FASTA flavor.
Subclasses should define `parser` and `header_pattern`.
The `parse` argument in :py:meth:`__init__` defines whether description is
parsed in output.
"""
def __init__(self, parse=True):
if not parse:
self.parser = None
class UniProtMixin(FlavoredMixin):
header_pattern = r'^(?P<db>\w+)\|(?P<id>[-\w]+)\|(?P<entry>\w+)\s+(?P<name>.*?)(?:(\s+OS=(?P<OS>[^=]+))|(\s+OX=(?P<OX>\d+))|(\s+GN=(?P<GN>[^=]+))|(\s+PE=(?P<PE>\d))|(\s+SV=(?P<SV>\d+)))*\s*$'
header_group = 'id'
def parser(self, header):
info = re.match(self.header_pattern, header).groupdict()
for key in ['OS', 'OX', 'GN', 'PE', 'SV']:
if info[key] is None:
del info[key]
info['gene_id'], info['taxon'] = info['entry'].split('_')
_intify(info, ('PE', 'SV', 'OX'))
return info
class UniProt(UniProtMixin, FASTA):
pass
class IndexedUniProt(UniProtMixin, TwoLayerIndexedFASTA):
pass
class UniRefMixin(FlavoredMixin):
header_pattern = r'^(?P<id>\S+)\s+(?P<cluster>.*?)(?:(\s+n=(?P<n>\d+))|(\s+Tax=(?P<Tax>.+?))|(\s+TaxID=(?P<TaxID>\S+))|(\s+RepID=(?P<RepID>\S+)))*\s*$'
header_group = 'id'
def parser(self, header):
assert 'Tax' in header
info = re.match(self.header_pattern, header).groupdict()
for key in ['TaxID', 'Tax', 'RepID', 'n']:
if info[key] is None:
del info[key]
_intify(info, ('n',))
return info
class UniRef(UniRefMixin, FASTA):
pass
class IndexedUniRef(UniRefMixin, TwoLayerIndexedFASTA):
pass
class UniParcMixin(FlavoredMixin):
header_pattern = r'(\S+)\s+status=(\w+)\s*$'
def parser(self, header):
ID, status = re.match(self.header_pattern, header).groups()
return {'id': ID, 'status': status}
class UniParc(UniParcMixin, FASTA):
pass
class IndexedUniParc(UniParcMixin, TwoLayerIndexedFASTA):
pass
class UniMesMixin(FlavoredMixin):
header_pattern = r'^(\S+)\s+([^=]*\S)((\s+\w+=[^=]+(?!\w*=))+)\s*$'
def parser(self, header):
assert 'OS=' in header and 'SV=' in header and 'PE=' not in header
ID, name, pairs, _ = re.match(self.header_pattern, header).groups()
info = {'id': ID, 'name': name}
info.update(_split_pairs(pairs))
_intify(info, ('SV',))
return info
class UniMes(UniMesMixin, FASTA):
pass
class IndexedUniMes(UniMesMixin, TwoLayerIndexedFASTA):
pass
class SPDMixin(FlavoredMixin):
header_pattern = r'^([^|]+?)\s*\|\s*(([^|]+?)_([^|]+?))\s*\|\s*([^|]+?)\s*$'
def parser(self, header):
assert '=' not in header
ID, gene, gid, taxon, d = re.match(self.header_pattern, header).groups()
return {'id': ID, 'gene': gene, 'description': d,
'taxon': taxon, 'gene_id': gid}
class SPD(SPDMixin, FASTA):
pass
class IndexedSPD(SPDMixin, TwoLayerIndexedFASTA):
pass
class NCBIMixin(FlavoredMixin):
header_pattern = r'^(\S+)\s+(.*\S)\s+\[(.*)\]'
def parser(self, header):
ID, description, organism = re.match(self.header_pattern, header).groups()
return {'id': ID, 'description': description, 'taxon': organism}
class NCBI(NCBIMixin, FASTA):
pass
class IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA):
pass
class RefSeqMixin(FlavoredMixin):
header_pattern = r'^ref\|([^|]+)\|\s*([^\[]*\S)\s*\[(.*)\]'
def parser(self, header):
ID, description, organism = re.match(self.header_pattern, header).groups()
return {'id': ID, 'description': description, 'taxon': organism}
class RefSeq(RefSeqMixin, FASTA):
pass
class IndexedRefSeq(RefSeqMixin, TwoLayerIndexedFASTA):
pass
def read(source=None, use_index=None, flavor=None, **kwargs):
"""Parse a FASTA file. This function serves as a dispatcher between
different parsers available in this module.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with a FASTA database. Default is
:py:const:`None`, which means read standard input.
use_index : bool, optional
If :py:const:`True`, the created parser object will be an instance of
:py:class:`IndexedFASTA`. If :py:const:`False` (default), it will be
an instance of :py:class:`FASTA`.
flavor : str or None, optional
A supported FASTA header format. If specified, a format-specific
parser instance is returned.
.. note:: See :py:data:`std_parsers` for supported flavors.
Returns
-------
out : iterator of tuples
A named 2-tuple with FASTA header (str or dict) and sequence (str).
Attributes 'description' and 'sequence' are also provided.
"""
try:
parser = std_parsers[flavor and flavor.lower()]
except KeyError:
raise aux.PyteomicsError('No parser for flavor: {}. Supported flavors: {}'.format(
flavor, ', '.join(map(str, std_parsers))))
use_index = aux._check_use_index(source, use_index, False)
return parser[use_index](source, **kwargs)
@aux._file_writer()
def write(entries, output=None):
"""
Create a FASTA file with `entries`.
Parameters
----------
entries : iterable of (str/dict, str) tuples
An iterable of 2-tuples in the form (description, sequence).
If description is a dictionary, it must have a special key, whose value
will be written as protein description. The special key is defined by the variable
:py:const:`RAW_HEADER_KEY`.
output : file-like or str, optional
A file open for writing or a path to write to. If the file exists,
it will be opened for writing. Default is :py:const:`None`, which
means write to standard output.
.. note::
The default mode for output files specified by name has been changed
from `a` to `w` in *pyteomics 4.6*. See `file_mode` to override the mode.
file_mode : str, keyword only, optional
If `output` is a file name, defines the mode the file will be opened in.
Otherwise will be ignored. Default is `'w'`.
.. note ::
The default changed from `'a'` in *pyteomics 4.6*.
Returns
-------
output_file : file object
The file where the FASTA is written.
"""
for descr, seq in entries:
if isinstance(descr, str):
output.write('>' + descr.replace('\n', '\n;') + '\n')
elif isinstance(descr, dict) and RAW_HEADER_KEY in descr:
output.write('>' + descr[RAW_HEADER_KEY].replace('\n', '\n;') + '\n')
else:
raise aux.PyteomicsError('Cannot use provided description: ' + repr(descr))
output.write(''.join([('%s\n' % seq[i:i+70]) for i in range(0, len(seq), 70)]) + '\n')
return output.file
def reverse(sequence, keep_nterm=False, keep_cterm=False):
"""
Create a decoy sequence by reversing the original one.
Parameters
----------
sequence : str
The initial sequence string.
keep_nterm : bool, optional
If :py:const:`True`, then the N-terminal residue will be kept.
Default is :py:const:`False`.
keep_cterm : bool, optional
If :py:const:`True`, then the C-terminal residue will be kept.
Default is :py:const:`False`.
Returns
-------
decoy_sequence : str
The decoy sequence.
"""
start = 1 if keep_nterm else 0
end = len(sequence)-1 if keep_cterm else len(sequence)
if start == end:
return sequence
return sequence[:start] + sequence[start:end][::-1] + sequence[end:]
def shuffle(sequence, keep_nterm=False, keep_cterm=False, keep_nterm_M=False, fix_aa=''):
"""
Create a decoy sequence by shuffling the original one.
Parameters
----------
sequence : str
The initial sequence string.
keep_nterm : bool, optional
If :py:const:`True`, then the N-terminal residue will be kept.
Default is :py:const:`False`.
keep_cterm : bool, optional
If :py:const:`True`, then the C-terminal residue will be kept.
Default is :py:const:`False`.
keep_nterm_M : bool, optional
If :py:const:`True`, then the N-terminal methionine will be kept.
Default is :py:const:`False`.
fix_aa : iterable, optional
Single letter codes for amino acids that should preserve their position
during shuffling.
Default is ''.
Returns
-------
decoy_sequence : str
The decoy sequence.
"""
# empty sequence
if len(sequence) == 0:
return ''
# presereve the first position
if (keep_nterm_M and sequence[0] == 'M') or keep_nterm:
return sequence[0] + shuffle(sequence[1:], keep_cterm=keep_cterm, fix_aa=fix_aa)
# presereve the last position
if keep_cterm:
return shuffle(sequence[:-1], fix_aa=fix_aa) + sequence[-1]
if not isinstance(fix_aa, str):
fix_aa = ''.join(fix_aa)
fixed = []
position = 0
if len(fix_aa) > 0: # non-empty fixed list
shuffled = []
for match in re.finditer(r'[{}]'.format(fix_aa), sequence):
fixed.append((match.start(), sequence[match.start()]))
shuffled.extend(sequence[position:match.start()])
position = match.end()
shuffled.extend(sequence[position:])
else: # shuffle everything
shuffled = list(sequence)
random.shuffle(shuffled)
for fix in fixed:
shuffled.insert(fix[0], fix[1])
return ''.join(shuffled)
def fused_decoy(sequence, decoy_mode='reverse', sep='R', **kwargs):
"""
Create a "fused" decoy sequence by concatenating a decoy sequence with the original one.
The method and its use cases are described in:
Ivanov, M. V., Levitsky, L. I., & Gorshkov, M. V. (2016).
`Adaptation of Decoy Fusion Strategy for Existing Multi-Stage Search Workflows.
<http://doi.org/10.1007/s13361-016-1436-7>`_
Journal of The American Society for Mass Spectrometry, 27(9), 1579-1582.
Parameters
----------
sequence : str
The initial sequence string.
decoy_mode : str or callable, optional
Type of decoy sequence to use. Should be one of the standard modes or any callable.
Standard modes are:
- 'reverse' for :py:func:`reverse`;
- 'shuffle' for :py:func:`shuffle`;
- 'fused' for :py:func:`fused_decoy` (if you love recursion).
Default is 'reverse'.
sep : str, optional
Amino acid motif that separates the decoy sequence from the target one.
This setting should reflect the enzyme specificity used in the search against the
database being generated. Default is 'R', which is suitable for trypsin searches.
**kwargs : given to the decoy generation function.
Examples
--------
>>> fused_decoy('PEPT')
'TPEPRPEPT'
>>> fused_decoy('MPEPT', 'shuffle', 'K', keep_nterm=True)
'MPPTEKMPEPT'
"""
decoy = decoy_sequence(sequence, decoy_mode, **kwargs)
return decoy + sep + sequence
_decoy_functions = {'reverse': reverse, 'shuffle': shuffle, 'fused': fused_decoy}
def decoy_sequence(sequence, mode='reverse', **kwargs):
"""
Create a decoy sequence out of a given sequence string.
Parameters
----------
sequence : str
The initial sequence string.
mode : str or callable, optional
Type of decoy sequence. Should be one of the standard modes or any callable.
Standard modes are:
- 'reverse' for :py:func:`reverse`;
- 'shuffle' for :py:func:`shuffle`;
- 'fused' for :py:func:`fused_decoy`.
Default is 'reverse'.
**kwargs : given to the decoy function.
Returns
-------
decoy_sequence : str
The decoy sequence.
"""
fmode = mode
if isinstance(mode, str):
fmode = _decoy_functions.get(mode)
if fmode is None:
raise aux.PyteomicsError('Unsupported decoy mode: {}'.format(mode))
return fmode(sequence, **kwargs)
def decoy_entries(entries, mode='reverse', prefix=DECOY_PREFIX, decoy_only=True, **kwargs):
"""Iterate over protein `entries` (tuples) and produce decoy entries.
The `entries` are only iterated once.
Parameters
----------
entries : iterable of tuples
Any iterable of (description, sequence) pairs.
mode : str or callable, optional
Algorithm of decoy sequence generation. 'reverse' by default.
See :py:func:`decoy_sequence` for more information.
prefix : str, optional
A prefix to the protein descriptions of decoy entries. The default
value is `'DECOY_'`.
decoy_only : bool, optional
If set to :py:const:`True`, only the decoy entries will be written to
`output`. If :py:const:`False`, each consumed entry is yielded unchanged,
followed by its decoy couterpart.
:py:const:`True` by default.
**kwargs : given to :py:func:`decoy_sequence`.
Returns
-------
out : iterator
An iterator over new entries.
"""
for item in entries:
if not decoy_only:
yield item
yield Protein(prefix + item[0], decoy_sequence(item[1], mode, **kwargs))
@aux._file_reader()
def decoy_db(source=None, mode='reverse', prefix=DECOY_PREFIX, decoy_only=False,
ignore_comments=False, parser=None, **kwargs):
"""Iterate over sequences for a decoy database out of a given ``source``.
Parameters
----------
source : file-like object or str or None, optional
A path to a FASTA database or a file object itself. Default is
:py:const:`None`, which means read standard input.
mode : str or callable, optional
Algorithm of decoy sequence generation. 'reverse' by default.
See :py:func:`decoy_sequence` for more information.
prefix : str, optional
A prefix to the protein descriptions of decoy entries. The default
value is `'DECOY_'`.
decoy_only : bool, optional
If set to :py:const:`True`, only the decoy entries will be written to
`output`. If :py:const:`False`, the entries from `source` will be
written first.
:py:const:`False` by default.
ignore_comments : bool, optional
If True then ignore the second and subsequent lines of description.
Default is :py:const:`False`.
parser : function or None, optional
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/pepxml.py | pyteomics/pepxml.py | """
pepxml - pepXML file reader
===========================
Summary
-------
`pepXML <http://tools.proteomecenter.org/wiki/index.php?title=Formats:pepXML>`_
was the first widely accepted format for proteomics search engines' output.
Even though it is to be replaced by a community standard
`mzIdentML <http://www.psidev.info/index.php?q=node/454>`_, it is still used
commonly.
This module provides minimalistic infrastructure for access to data stored in
pepXML files. The most important function is :py:func:`read`, which
reads peptide-spectum matches and related information and saves them into
human-readable dicts. This function relies on the terminology of the underlying
`lxml library <http://lxml.de/>`_.
Data access
-----------
:py:class:`PepXML` - a class representing a single pepXML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through peptide-spectrum matches in a pepXML
file. Data for a single spectrum are converted to an easy-to-use dict.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`DataFrame` - read pepXML files into a :py:class:`pandas.DataFrame`.
Target-decoy approach
---------------------
:py:func:`filter` - filter PSMs from a chain of pepXML files to a specific FDR
using TDA.
:py:func:`filter.chain` - chain a series of filters applied independently to
several files.
:py:func:`filter.chain.from_iterable` - chain a series of filters applied
independently to an iterable of files.
:py:func:`filter_df` - filter pepXML files and return a :py:class:`pandas.DataFrame`.
:py:func:`fdr` - estimate the false discovery rate of a PSM set using the
target-decoy approach.
:py:func:`qvalues` - get an array of scores and local FDR values for a PSM
set using the target-decoy approach.
:py:func:`is_decoy` - determine whether a PSM is decoy or not.
Miscellaneous
-------------
:py:func:`roc_curve` - get a receiver-operator curve (min PeptideProphet
probability in a sample vs. false discovery rate) of PeptideProphet analysis.
Deprecated functions
--------------------
:py:func:`iterfind` - iterate over elements in a pepXML file.
You can just call the corresponding method of the :py:class:`PepXML`
object.
:py:func:`version_info` - get information about pepXML version and schema.
You can just read the corresponding attribute of the :py:class:`PepXML`
object.
Dependencies
------------
This module requires :py:mod:`lxml`.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
from . import xml, auxiliary as aux, _schema_defaults
class PepXML(xml.MultiProcessingXML, xml.IndexSavingXML):
"""
Parser class for pepXML files.
"""
file_format = 'pepXML'
_root_element = 'msms_pipeline_analysis'
_default_schema = _schema_defaults._pepxml_schema_defaults
_default_version = '1.15'
_default_iter_tag = 'spectrum_query'
_indexed_tags = {'spectrum_query'}
_indexed_tag_keys = {'spectrum_query': 'spectrum'}
_default_id_attr = 'spectrum'
_structures_to_flatten = {'search_score_summary', 'modification_info'}
# attributes which contain unconverted values
_convert_items = {'float': {'calc_neutral_pep_mass', 'massdiff', 'probability', 'variable', 'static'},
'int': {'start_scan', 'end_scan', 'index', 'num_matched_peptides'},
'bool': {'is_rejected'},
'floatarray': {'all_ntt_prob'}}.items()
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
try:
name = kwargs.pop('ename')
except KeyError:
name = xml._local_name(element)
rec = kwargs.pop('recursive', None)
if name == self._root_element:
info = self._get_info(element, ename=name, recursive=(rec if rec is not None else False), **kwargs)
else:
info = self._get_info(element, ename=name, recursive=(rec if rec is not None else True), **kwargs)
def safe_float(s):
try:
return float(s)
except ValueError:
if s.startswith('+-0'):
return 0
return s
converters = {'float': safe_float, 'int': int,
'bool': lambda x: x.lower() in {'1', 'true'},
'floatarray': lambda x: list(map(float, x[1:-1].split(',')))}
for k, v in dict(info).items():
for t, s in self._convert_items:
if k in s:
del info[k]
info[k] = converters[t](v)
for k in {'search_score', 'parameter'}:
if k in info and isinstance(info[k], list) and all(
isinstance(x, dict) and len(x) == 1 for x in info[k]):
scores = {}
for score in info[k]:
name, value = score.popitem()
try:
scores[name] = float(value)
except ValueError:
scores[name] = value
info[k] = scores
if 'search_result' in info and len(info['search_result']) == 1:
info.update(info['search_result'][0])
del info['search_result']
if 'protein' in info and 'peptide' in info:
info['proteins'] = [{'protein': info.pop('protein'), 'protein_descr': info.pop('protein_descr', None)}]
for add_key in {'peptide_prev_aa', 'peptide_next_aa', 'protein_mw'}:
if add_key in info:
info['proteins'][0][add_key] = info.pop(add_key)
info['proteins'][0]['num_tol_term'] = info.pop('num_tol_term', 0)
if 'alternative_protein' in info:
info['proteins'].extend(info['alternative_protein'])
del info['alternative_protein']
if 'peptide' in info and 'modified_peptide' not in info:
info['modified_peptide'] = info['peptide']
if 'peptide' in info:
info['modifications'] = info.pop('mod_aminoacid_mass', [])
if 'mod_nterm_mass' in info:
info['modifications'].insert(0, {'position': 0, 'mass': float(info.pop('mod_nterm_mass'))})
if 'mod_cterm_mass' in info:
info['modifications'].append({'position': 1 + len(info['peptide']), 'mass': float(info.pop('mod_cterm_mass'))})
if 'modified_peptide' in info and info['modified_peptide'] == info.get(
'peptide'):
if not info.get('modifications'):
info['modifications'] = []
else:
mp = info['modified_peptide']
for mod in sorted(info['modifications'], key=lambda m: m['position'], reverse=True):
if mod['position'] not in {0, 1+len(info['peptide'])}:
p = mod['position']
mp = mp[:p] + '[{}]'.format(int(mod['mass'])) + mp[p:]
info['modified_peptide'] = mp
if 'search_hit' in info:
info['search_hit'].sort(key=lambda x: x['hit_rank'])
return info
def search_hits(self):
"""
Iterate over search hits rather than spectrum queries.
"""
for sq in self:
sh = sq.pop('search_hit', [])
for item in sh:
item.update(sq)
yield item
def read(*args, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
Parameters
----------
source : str or file
A path to a target pepXML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the pepXML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
elements listed in `indexed_tags`.
This is useful for random access to spectum queries.
Default is :py:const:`True`.
indexed_tags : container of bytes, optional
If `use_index` is :py:const:`True`, elements listed in this parameter
will be indexed. Empty set by default.
Returns
-------
out : PepXML
An iterator over dicts with PSM properties.
"""
return PepXML(*args, **kwargs)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`PepXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, keyword only, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, keyword only, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, keyword only, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
Returns
-------
out : iterator
"""
return PepXML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(PepXML)
def roc_curve(source):
"""Parse source and return a ROC curve for peptideprophet analysis.
Parameters
----------
source : str or file
A path to a target pepXML file or the file object itself.
Returns
-------
out : list
A list of ROC points.
"""
parser = etree.XMLParser(remove_comments=True, ns_clean=True)
tree = etree.parse(source, parser=parser)
roc_curve = []
for roc_error_data in tree.xpath(
"/*[local-name()='msms_pipeline_analysis'] \
//*[local-name()='analysis_summary' and @analysis='peptideprophet'] \
//*[local-name()='peptideprophet_summary'] \
//*[local-name()='roc_error_data']"):
for element in roc_error_data.xpath("*[local-name()='roc_data_point' or local-name()='error_point']"):
data_point = dict(element.attrib)
for key in data_point:
data_point[key] = float(data_point[key])
data_point["charge"] = roc_error_data.attrib["charge"]
data_point["tag"] = etree.QName(element).localname
roc_curve.append(data_point)
return roc_curve
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(read)
def _is_decoy_prefix(psm, prefix='DECOY_'):
"""Given a PSM dict, return :py:const:`True` if all protein names for
the PSM start with ``prefix``, and :py:const:`False` otherwise. This
function might not work for some pepXML flavours. Use the source to get the
idea and suit it to your needs.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : str, optional
A prefix used to mark decoy proteins. Default is `'DECOY_'`.
Returns
-------
out : bool
"""
return all(protein['protein'].startswith(prefix)
for protein in psm['search_hit'][0]['proteins'])
def _is_decoy_suffix(psm, suffix='_DECOY'):
return all(protein['protein'].endswith(suffix)
for protein in psm['search_hit'][0]['proteins'])
is_decoy = _is_decoy_prefix
fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)
_key = lambda x: min(sh['search_score']['expect'] for sh in x['search_hit'])
qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, _key)
filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, _key, qvalues)
filter.chain = aux._make_chain(filter, 'filter', True)
def DataFrame(*args, **kwargs):
"""Read pepXML output files into a :py:class:`pandas.DataFrame`.
Requires :py:mod:`pandas`.
Parameters
----------
*args
pepXML file names or objects. Passed to :py:func:`chain`.
**kwargs
Passed to :py:func:`chain`.
by : str, keyword only, optional
Can be :py:const:`"spectrum_query"` (default) or :py:const:`"search_hit"`.
One row in the resulting dataframe corresponds to one element of the given type.
If :py:const:`"spectrum_query"` is set, only the top search hit is shown in the dataframe.
sep : str or None, keyword only, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
recursive : bool, keyword only, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, keyword only, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, keyword only, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
pd_kwargs : dict, optional
Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs = kwargs.copy()
sep = kwargs.pop('sep', None)
pd_kwargs = kwargs.pop('pd_kwargs', {})
kwargs.setdefault('use_index', False)
by = kwargs.pop('by', 'spectrum_query')
def search_hit_info(sh):
info = {}
proteins = sh.pop('proteins')
prot_dict = {}
for p in proteins:
for k in p:
prot_dict[k] = []
for p in proteins:
for k, v in prot_dict.items():
v.append(p.get(k))
if sep is None:
info.update(prot_dict)
else:
for k, v in prot_dict.items():
info[k] = sep.join(str(val) if val is not None else '' for val in v)
info.update(sh.pop('search_score'))
mods = sh.pop('modifications', [])
formatted_mods = ['{0[mass]:.3f}@{0[position]}'.format(x) for x in mods]
if sep is not None:
info['modifications'] = sep.join(formatted_mods)
else:
info['modifications'] = formatted_mods
for k, v in sh.items():
if isinstance(v, (str, int, float)):
info[k] = v
if 'analysis_result' in sh:
for ar in sh['analysis_result']:
if ar['analysis'] == 'peptideprophet':
try:
info.update(ar['peptideprophet_result']['parameter'])
except KeyError:
pass
info['peptideprophet_probability'] = ar['peptideprophet_result']['probability']
info['peptideprophet_ntt_prob'] = ar['peptideprophet_result']['all_ntt_prob']
elif ar['analysis'] == 'interprophet':
info.update(ar['interprophet_result']['parameter'])
info['interprophet_probability'] = ar['interprophet_result']['probability']
info['interprophet_ntt_prob'] = ar['interprophet_result']['all_ntt_prob']
return info
def iter_spectrum_query():
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
if 'search_hit' in item:
sh = item['search_hit'][0]
info.update(search_hit_info(sh))
yield info
def iter_search_hit():
for source in args:
with PepXML(source, **kwargs) as f:
for sh in f.search_hits():
yield search_hit_info(sh)
items = {'spectrum_query': iter_spectrum_query, 'search_hit': iter_search_hit}[by]
return pd.DataFrame(items(), **pd_kwargs)
def filter_df(*args, **kwargs):
"""Read pepXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be pepXML files or DataFrames. Keyword parameter `fdr` is also required.
Other parameters are optional.
Requires :py:mod:`pandas`.
Parameters
----------
positional args
pepXML file names, file objects, or DataFrames. Passed to :py:func:`DataFrame`.
fdr : float, keyword only, 0 <= fdr <= 1
Desired FDR level.
key : str / iterable / callable, keyword only, optional
PSM score. Default is 'expect'.
is_decoy : str / iterable / callable, keyword only, optional
Default is to check if all strings in the "protein" column start with `'DECOY_'`.
sep : str or None, keyword only, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
reverse : bool, keyword only, optional
If :py:const:`True`, then PSMs are sorted in descending order,
i.e. the value of the key function is higher for better PSMs.
Default is :py:const:`False`.
decoy_prefix : str, optional
If the default `is_decoy` function works for you, this parameter specifies which
protein name prefix to use to detect decoy matches. If you provide your own
`is_decoy`, or if you specify `decoy_suffix`, this parameter has no effect.
Default is `"DECOY_"`.
decoy_suffix : str, optional
If the default `is_decoy` function works for you, this parameter specifies which
protein name suffix to use to detect decoy matches. If you provide your own
`is_decoy`, this parameter has no effect. Mutually exclusive with `decoy_prefix`.
remove_decoy : bool, keyword only, optional
Defines whether decoy matches should be removed from the output.
Default is :py:const:`True`.
.. note:: If set to :py:const:`False`, then by default the decoy
PSMs will be taken into account when estimating FDR. Refer to the
documentation of :py:func:`fdr` for math; basically, if
`remove_decoy` is :py:const:`True`, then formula 1 is used
to control output FDR, otherwise it's formula 2. This can be
changed by overriding the `formula` argument.
formula : int, keyword only, optional
Can be either 1 or 2, defines which formula should be used for FDR
estimation. Default is 1 if `remove_decoy` is :py:const:`True`,
else 2 (see :py:func:`fdr` for definitions).
ratio : float, keyword only, optional
The size ratio between the decoy and target databases. Default is
1. In theory, the "size" of the database is the number of
theoretical peptides eligible for assignment to spectra that are
produced by *in silico* cleavage of that database.
correction : int or float, keyword only, optional
Possible values are 0, 1 and 2, or floating point numbers between 0 and 1.
0 (default): no correction;
1: enable "+1" correction. This accounts for the probability that a false
positive scores better than the first excluded decoy PSM;
2: this also corrects that probability for finite size of the sample,
so the correction will be slightly less than "+1".
If a floating point number
is given, then instead of the expectation value for the number of false PSMs,
the confidence value is used. The value of `correction` is then interpreted as
desired confidence level. E.g., if correction=0.95, then the calculated q-values
do not exceed the "real" q-values with 95% probability.
See `this paper <http://dx.doi.org/10.1021/acs.jproteome.6b00144>`_ for further explanation.
pep : callable / array-like / iterable / str, keyword only, optional
If callable, a function used to determine the posterior error probability (PEP).
Should accept exactly one argument (PSM) and return a float.
If array-like, should contain float values for all given PSMs.
If string, it is used as a field name (PSMs must be in a record array
or a :py:class:`DataFrame`).
.. note:: If this parameter is given, then PEP values will be used to calculate
q-values. Otherwise, decoy PSMs will be used instead. This option conflicts with:
`is_decoy`, `remove_decoy`, `formula`, `ratio`, `correction`.
`key` can still be provided. Without `key`, PSMs will be sorted by PEP.
q_label : str, optional
Field name for q-value in the output. Default is ``'q'``.
score_label : str, optional
Field name for score in the output. Default is ``'score'``.
decoy_label : str, optional
Field name for the decoy flag in the output. Default is ``'is decoy'``.
pep_label : str, optional
Field name for PEP in the output. Default is ``'PEP'``.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
sep = kwargs.get('sep')
kwargs.setdefault('key', 'expect')
if all(isinstance(arg, pd.DataFrame) for arg in args):
if len(args) > 1:
df = pd.concat(args)
else:
df = args[0]
else:
read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}
df = DataFrame(*args, **read_kw)
if 'is_decoy' not in kwargs:
if sep is not None:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein'].str.split(';').apply(
lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))
else:
kwargs['is_decoy'] = df['protein'].str.split(';').apply(
lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))
else:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein'].apply(
lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))
else:
kwargs['is_decoy'] = df['protein'].apply(
lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))
return aux.filter(df, **kwargs)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/tandem.py | pyteomics/tandem.py | """
tandem - X!Tandem output file reader
====================================
Summary
-------
`X!Tandem <http://thegpm.org/tandem/>`_ is an open-source proteomic search
engine with a very simple, sophisticated application programming interface
(API): it simply takes an XML file of instructions on its command line,
and outputs the results into an XML file, which has been specified in the input
XML file. The output format is described
`here (PDF) <http://www.thegpm.org/docs/X_series_output_form.pdf>`_.
This module provides a minimalistic way to extract information from X!Tandem
output files. You can use the old functional interface (:py:func:`read`) or the
new object-oriented interface (:py:class:`TandemXML`) to iterate over entries in
`<group>` elements, i.e. identifications for a certain spectrum.
Data access
-----------
:py:class:`TandemXML` - a class representing a single X!Tandem output file.
Other data access functions use this class internally.
:py:func:`read` - iterate through peptide-spectrum matches in an X!Tandem
output file. Data from a single PSM are converted to a human-readable dict.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`DataFrame` - read X!Tandem output files into a :py:class:`pandas.DataFrame`.
Target-decoy approach
---------------------
:py:func:`filter` - iterate through peptide-spectrum matches in a chain of
X!Tandem output files, yielding only top PSMs and keeping false discovery rate
(FDR) at the desired level. The FDR is estimated using the target-decoy
approach (TDA).
:py:func:`filter.chain` - chain a series of filters applied independently to
several files.
:py:func:`filter.chain.from_iterable` - chain a series of filters applied
independently to an iterable of files.
:py:func:`filter_df` - filter X!Tandem output files and return a :py:class:`pandas.DataFrame`.
:py:func:`is_decoy` - determine if a PSM is from the decoy database.
:py:func:`fdr` - estimate the FDR in a data set using TDA.
:py:func:`qvalues` - get an array of scores and local FDR values for a PSM
set using the target-decoy approach.
Deprecated functions
--------------------
:py:func:`iterfind` - iterate over elements in an X!Tandem file.
You can just call the corresponding method of the :py:class:`TandemXML`
object.
Dependencies
------------
This module requires :py:mod:`lxml` and :py:mod:`numpy`.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from . import xml, auxiliary as aux, _schema_defaults
class TandemXML(xml.XML):
"""Parser class for TandemXML files."""
file_format = "TandemXML"
_root_element = "bioml"
_default_schema = _schema_defaults._tandem_schema_defaults
_default_iter_path = 'group[@type="model"]'
_structures_to_flatten = {'domain'}
def __init__(self, *args, **kwargs):
if 'recursive' not in kwargs:
super(TandemXML, self).__init__(*args, recursive=True, **kwargs)
else:
super(TandemXML, self).__init__(*args, **kwargs)
__init__.__doc__ = xml.XML.__init__.__doc__
def _get_info_smart(self, element, **kw):
info = self._get_info(element, **kw)
# handy simplifications below
if isinstance(info.get('note'), list) and len(info['note']) == 1 and set(info['note'][0]) == {'label', 'note'}:
info['note'] = info['note'][0]['note']
if 'protein' in info and 'label' in info:
del info['label']
if 'group' in info:
for g in info['group']:
label = g.pop('label')
type_ = g.pop('type')
info.setdefault(type_, {})[label] = g
del info['group']
if 'trace' in info:
for t in info['trace']:
info[t.pop('type')] = t
del info['trace']
if isinstance(info.get('values'), dict):
info['values'] = info['values']['values']
if isinstance(info.get('attribute'), list):
for a in info.pop('attribute'):
info[a['type']] = float(a['attribute'])
if 'support' in info:
for d in info['support'].get('supporting data', {}).values():
for label in ['Xdata', 'Ydata']:
d[label]['values'] = d[label]['values'].astype(int)
del d[label]['label']
if 'fragment ion mass spectrum' in info['support']:
fims = info['support']['fragment ion mass spectrum']
fims.update(fims.pop('tandem mass spectrum'))
for label in ['Xdata', 'Ydata']:
del info['support']['fragment ion mass spectrum'][label]['label']
if 'charge' in info:
info['charge'] = int(info['charge'])
if info.get('rt') == '':
info['rt'] = None
return info
def _get_schema_info(self, read_schema):
return self._default_schema
def __next__(self):
n = super(TandemXML, self).__next__()
del n['type']
return n
next = __next__
def read(source, iterative=True, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
Parameters
----------
source : str or file
A path to a target X!Tandem output file or the file object itself.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
Returns
-------
out : iterator
An iterator over dicts with PSM properties.
"""
return TandemXML(source, read_schema=False, recursive=True, iterative=iterative)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create a :py:class:`TandemXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
Returns
-------
out : iterator
"""
return TandemXML(source, **kwargs).iterfind(path, **kwargs)
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(TandemXML)
def _is_decoy_prefix(psm, prefix='DECOY_'):
"""Given a PSM dict, return :py:const:`True` if all protein names for
the PSM start with `prefix`, and :py:const:`False` otherwise.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : str, optional
A prefix used to mark decoy proteins. Default is `'DECOY_'`.
Returns
-------
out : bool
"""
return all(prot['label'].startswith(prefix) for prot in psm['protein'])
def _is_decoy_suffix(psm, suffix='_DECOY'):
"""Given a PSM dict, return :py:const:`True` if all protein names for
the PSM end with `suffix`, and :py:const:`False` otherwise.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
suffix : str, optional
A suffix used to mark decoy proteins. Default is `'_DECOY'`.
Returns
-------
out : bool
"""
return all(prot['label'].endswith(suffix) for prot in psm['protein'])
is_decoy = _is_decoy_prefix
qvalues = aux._make_qvalues(chain, _is_decoy_prefix, _is_decoy_suffix, operator.itemgetter('expect'))
filter = aux._make_filter(chain, _is_decoy_prefix, _is_decoy_suffix, operator.itemgetter('expect'), qvalues)
fdr = aux._make_fdr(_is_decoy_prefix, _is_decoy_suffix)
filter.chain = aux._make_chain(filter, 'filter', True)
def DataFrame(*args, **kwargs):
"""Read X!Tandem output files into a :py:class:`pandas.DataFrame`.
Requires :py:mod:`pandas`.
Parameters
----------
sep : str or None, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
pd_kwargs : dict, optional
Keyword arguments passed to the :py:class:`pandas.DataFrame` constructor.
*args
Passed to :py:func:`chain`.
**kwargs
Passed to :py:func:`chain`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
data = []
prot_keys = ['id', 'uid', 'label', 'expect']
pep_keys = ['id', 'pre', 'post', 'start', 'end']
sep = kwargs.pop('sep', None)
pd_kwargs = kwargs.pop('pd_kwargs', {})
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
protein = item['protein'][0]
for key in prot_keys:
vals = [prot.get(key) for prot in item['protein']]
if sep is not None:
vals = sep.join(str(val) if val is not None else '' for val in vals)
info['protein_' + key] = vals
for key in pep_keys:
vals = [prot['peptide'].get(key) for prot in item['protein']]
if sep is not None:
vals = sep.join(str(val) if val is not None else '' for val in vals)
info['peptide_' + key] = vals
aa = protein['peptide'].pop('aa', [])
info['modifications'] = ','.join('{0[modified]:.3f}@{0[type]}'.format(x) for x in aa)
for k in prot_keys:
protein.pop(k, None)
for k in pep_keys:
protein['peptide'].pop(k, None)
info.update(protein['peptide'])
fims = item['support']['fragment ion mass spectrum']
try:
info['scan'] = fims['note']
except KeyError:
info['scan'] = fims['id']
data.append(info)
return pd.DataFrame(data, **pd_kwargs)
def filter_df(*args, **kwargs):
"""Read X!Tandem output files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be X!Tandem output files or DataFrames.
Requires :py:mod:`pandas`.
Parameters
----------
key : str / iterable / callable, optional
Default is 'expect'.
is_decoy : str / iterable / callable, optional
Default is to check if all strings in the "protein" column start with `'DECOY_'`
*args
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
**kwargs
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
sep = kwargs.get('sep')
kwargs.setdefault('key', 'expect')
if all(isinstance(arg, pd.DataFrame) for arg in args):
if len(args) > 1:
df = pd.concat(args)
else:
df = args[0]
else:
read_kw = {k: kwargs.pop(k) for k in ['iterative', 'read_schema', 'sep', 'pd_kwargs'] if k in kwargs}
df = DataFrame(*args, **read_kw)
if 'is_decoy' not in kwargs:
if sep is not None:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein_label'].str.split(sep).apply(
lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))
else:
kwargs['is_decoy'] = df['protein_label'].str.split(sep).apply(
lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))
else:
if 'decoy_suffix' in kwargs:
kwargs['is_decoy'] = df['protein_label'].apply(
lambda s: all(x.endswith(kwargs['decoy_suffix']) for x in s))
else:
kwargs['is_decoy'] = df['protein_label'].apply(
lambda s: all(x.startswith(kwargs.get('decoy_prefix', 'DECOY_')) for x in s))
return aux.filter(df, **kwargs)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/mzmlb.py | pyteomics/mzmlb.py | # -*- coding: utf8 -*-
"""
mzmlb - reader for mass spectrometry data in mzMLb format
=========================================================
.. warning::
This is a **Provisional Implementation**. The mzMLb format has been published
but is not yet broadly available.
Summary
-------
mzMLb is an HDF5 container format wrapping around the standard rich XML-format
for raw mass spectrometry data storage. Please refer to [1]_ for more information
about mzMLb and its features. Please refer to
`psidev.info <https://www.psidev.info/mzML>`_ for the detailed
specification of the format and structure of mzML files.
This module provides a minimalistic way to extract information from mzMLb
files. You can use the old functional interface (:py:func:`read`) or the new
object-oriented interface (:py:class:`MzMLb` to iterate over entries in ``<spectrum>`` elements.
:py:class:`MzMLb` also support direct indexing with spectrum IDs or indices.
Data access
-----------
:py:class:`MzMLb` - a class representing a single mzMLb file.
Other data access functions use this class internally.
:py:func:`read` - iterate through spectra in mzMLb file. Data from a
single spectrum are converted to a human-readable dict. Spectra themselves are
stored under 'm/z array' and 'intensity array' keys.
:py:func:`chain` - read multiple mzMLb files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
Controlled Vocabularies and Caching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mzML relies on controlled vocabularies to describe its contents extensibly.
Every :py:class:`MzML` needs a copy of PSI-MS CV, which it handles using the :py:mod:`psims` library.
If you want to save time when creating instances of :py:class:`MzML`, consider enabling the :py:mod:`psims` cache.
See `psims documentation <https://mobiusklein.github.io/psims/docs/build/html/controlled_vocabulary/controlled_vocabulary.html#caching>`_
on how to enable and configure the cache (alternatively, you can handle CV creation yourself and pass a pre-created instance
using the `cv` parameter to :py:class:`MzMLb`).
See also
`Controlled Vocabulary Terms <../data.html#controlled-vocabulary-terms-in-structured-data>`_
for more details on how they are used.
Handling Time Units and Other Qualified Quantities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mzMLb contains information which may be described as using a variety of different time units.
See `Unit Handling <../data.html#unit-handling>`_ for more information.
References
----------
.. [1] Bhamber, R. S., Jankevics, A., Deutsch, E. W., Jones, A. R., & Dowsey, A. W. (2021).
MzMLb: A Future-Proof Raw Mass Spectrometry Data Format Based on Standards-Compliant
mzML and Optimized for Speed and Storage Requirements. Journal of Proteome Research,
20(1), 172–183. https://doi.org/10.1021/acs.jproteome.0c00192
"""
import io
import warnings
import logging
from collections import namedtuple
import h5py
try:
logging.getLogger("hdf5plugin").addHandler(logging.NullHandler())
import hdf5plugin
except ImportError:
hdf5plugin = None
import numpy as np
from pyteomics.mzml import MzML as _MzML
from pyteomics.auxiliary.file_helpers import HierarchicalOffsetIndex, TaskMappingMixin, TimeOrderedIndexedReaderMixin, FileReader
from pyteomics import auxiliary as aux, xml
def delta_predict(data, copy=True):
'''Reverse the lossy transformation of the delta compression
helper.
Parameters
----------
data : :class:`numpy.ndarray`
The data to transform
copy : bool
Whether to make a copy of the data array or transform it in-place.
Returns
-------
:class:`numpy.ndarray`
The transformed data array
'''
if copy:
out = data.copy()
else:
out = data
for i in range(2, len(data)):
out[i] = out[i] + out[i - 1] - out[0]
return out
def linear_predict(data, copy=True):
'''Reverse the lossy transformation of the linear interpolation compression
helper.
Parameters
----------
data : :class:`numpy.ndarray`
The data to transform
copy : bool
Whether to make a copy of the data array or transform it in-place.
Returns
-------
:class:`numpy.ndarray`
The transformed data array
'''
if copy:
out = data.copy()
else:
out = data
for i in range(2, len(data)):
out[i] = out[i] + 2 * out[i - 1] - out[i - 2] - out[1]
return out
class HDF5ByteBuffer(io.RawIOBase):
'''Helper class that looks file-like so that we can pass a HDF5 byte dataset to
an arbitrary XML parser.
Implements :class:`~io.RawIOBase` for reading.
'''
def __init__(self, buffer, offset=None):
if offset is None:
offset = 0
self.buffer = buffer
self.offset = offset
self.size = self.buffer.size
self.mode = 'rb'
def readable(self):
return True
def seekable(self):
return True
def isatty(self):
return False
def seek(self, offset, whence=0):
if whence == io.SEEK_SET:
self.offset = offset
elif whence == io.SEEK_CUR:
self.offset += offset
elif whence == io.SEEK_END:
self.offset = self.size - offset
else:
raise ValueError("Bad whence %r" % whence)
return self.offset
def tell(self):
return self.offset
def close(self):
return
@property
def closed(self):
return False
def readinto(self, b):
n = len(b)
temp = self._read(n)
m = len(temp)
b[:m] = temp[:]
return m
def readall(self):
return bytes(self._read(-1))
def read(self, n=-1):
return bytes(self._read(n))
def write(self, b):
raise ValueError("Read-only stream")
def _read(self, n=-1):
if n == -1:
n = self.size + 1
dat = bytearray(np.array(self.buffer[self.offset:self.offset + n]))
self.offset += n
return dat
class external_array_slice(namedtuple('external_array_slice',
['array_name', 'offset', 'length', 'source', 'transform', 'key', 'dtype'])):
def decode(self):
"""Decode :attr:`data` into a numerical array
Returns
-------
np.ndarray
"""
return self.source._decode_record(self)
class ExternalDataMzML(_MzML):
'''An MzML parser that reads data arrays from an external provider.
This is an implementation detail of :class:`MzMLb`.
'''
def __init__(self, *args, **kwargs):
self._external_data_registry = kwargs.pop("external_data_registry", None)
super(ExternalDataMzML, self).__init__(*args, **kwargs)
def _make_record(self, array_name, offset, length, transform, name, dtype):
return external_array_slice(array_name, offset, length, self, transform, name, dtype)
def _transform_array(self, array, transform):
if transform is None:
return array
elif "linear prediction" == transform:
return linear_predict(array, copy=False)
elif "delta prediction" == transform:
return delta_predict(array, copy=False)
else:
raise ValueError("Transformation not recognized")
def _retrieve_external_array(self, array_name, length, offset):
array = self._external_data_registry.get(array_name, length, offset)
return array
def decode_data_array(self, array_name, offset, length, transform=None, dtype=np.float64):
array = self._retrieve_external_array(array_name, length, offset)
array = self._transform_array(array, transform)
return array
def _decode_record(self, record):
array = self.decode_data_array(
record.array_name, record.offset, record.length, record.transform, record.dtype)
return self._finalize_record_conversion(array, record)
def _handle_binary(self, info, **kwargs):
if not self.decode_binary:
self.decode_binary = True
# Binary decoding works totally differently here, not supporting the previous signatures
# that the parent method will use. Pretend we are decoding because it is a no-op in the
# parent method.
result = super(ExternalDataMzML, self)._handle_binary(info, **kwargs)
self.decode_binary = False
else:
result = super(ExternalDataMzML, self)._handle_binary(info, **kwargs)
try:
array_name = info['external HDF5 dataset']
except KeyError:
array_name = info['external dataset']
offset = int(info['external offset'])
length = int(info['external array length'])
transform = None
# The zlib compression in these two terms happens automatically during HDF5 encoding and
# the reader needn't even know about it. Need an example of how Numpress will be signaled.
if "linear prediction" in info or "truncation, linear prediction and zlib compression" in info:
transform = 'linear prediction'
elif "delta prediction" in info or "truncation, delta prediction and zlib compression" in info:
transform = 'delta prediction'
if not self.decode_binary:
name = self._detect_array_name(info)
result[name] = self._make_record(
array_name, offset, length, transform, name,
self._external_data_registry.dtype_of(array_name))
return result
array = self._retrieve_external_array(array_name, length, offset)
if len(result) == 1:
name = next(iter(result))
else:
name = self._detect_array_name(info)
result[name] = self._convert_array(name, array)
return result
def reset(self):
super(ExternalDataMzML, self).reset()
self._external_data_registry.clear()
class chunk_interval_cache_record(namedtuple("chunk_interval_cache_record", ("start", "end", "array"))):
def contains(self, start, end):
if self.start <= start:
if end < self.end:
return True
return False
def get(self, start, end):
return self.array[start - self.start:end - self.start]
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.start)
class ExternalArrayRegistry(object):
'''Read chunks out of a single long array
This is an implementation detail of :class:`MzMLb`
Attributes
----------
registry : Mapping
A mapping from array name to the out-of-core array object.
chunk_size : int
The number of entries to chunk together and keep in memory.
chunk_cache : dict
A mapping from array name to cached array blocks.
'''
def __init__(self, registry, chunk_size=None):
if chunk_size is None:
chunk_size = 2 ** 20
else:
chunk_size = int(chunk_size)
self.registry = registry
self.chunk_cache = {}
self.chunk_size = chunk_size
def clear(self):
self.chunk_cache.clear()
def _get_raw(self, array_name, start, end):
return self.registry[array_name][start:end]
def _make_cache_record(self, array_name, start, end):
return chunk_interval_cache_record(start, end, self._get_raw(array_name, start, end))
def get(self, array_name, length, offset=0):
start = offset
end = start + length
try:
cache_record = self.chunk_cache[array_name]
if cache_record.contains(start, end):
return cache_record.get(start, end)
else:
cache_record = self._make_cache_record(
array_name, start, start + max(length, self.chunk_size))
self.chunk_cache[array_name] = cache_record
return cache_record.get(start, end)
except KeyError:
cache_record = self._make_cache_record(
array_name, start, start + max(length, self.chunk_size))
self.chunk_cache[array_name] = cache_record
return cache_record.get(start, end)
return self.registry[array_name][offset:offset + length]
def dtype_of(self, array_name):
return self.registry[array_name].dtype
def __call__(self, array_name, length, offset=0):
return self.get(array_name, length, offset)
class MzMLb(TimeOrderedIndexedReaderMixin, TaskMappingMixin):
'''A parser for mzMLb [1]_.
Provides an identical interface to :class:`~pyteomics.mzml.MzML`.
Attributes
----------
path : str, Path-like, or file-like object
The mzMLb file path or a file-like object providing it.
handle : :class:`h5py.File`
The raw HDF5 file container.
mzml_parser : :class:`~.ExternalDataMzML`
The mzML parser for the XML stream inside the HDF5 file with
special behavior for retrieving the out-of-band data arrays
from their respective storage locations.
schema_version : str
The mzMLb HDF5 schema version, distinct from the mzML schema inside it.
References
----------
[1] Bhamber, R. S., Jankevics, A., Deutsch, E. W., Jones, A. R., & Dowsey, A. W. (2021).
MzMLb: A Future-Proof Raw Mass Spectrometry Data Format Based on Standards-Compliant
mzML and Optimized for Speed and Storage Requirements. Journal of Proteome Research,
20(1), 172–183. https://doi.org/10.1021/acs.jproteome.0c00192
'''
_default_iter_tag = ExternalDataMzML._default_iter_tag
file_format = "mzMLb"
def __init__(self, path, hdfargs=None, mzmlargs=None, allow_updates=False,
use_index=True, **kwargs):
if hdfargs is None:
hdfargs = {}
if mzmlargs is None:
mzmlargs = {}
mzmlargs.update(kwargs)
self.path = path
self._hdfargs = hdfargs
self._mzmlargs = mzmlargs
self._allow_updates = allow_updates
self.handle = h5py.File(self.path, 'r+' if self._allow_updates else 'r', **hdfargs)
self.schema_version = self.handle['mzML'].attrs.get('version')
self._check_compressor()
self._xml_buffer = io.BufferedReader(HDF5ByteBuffer(self.handle['mzML']))
self._array_registry = ExternalArrayRegistry(self.handle)
self._make_mzml_parser(mzmlargs)
super(MzMLb, self).__init__(**kwargs)
def _check_compressor(self):
for key in self.handle.keys():
if "spectrum_MS_" in key or "chromatogram_MS_":
data = self.handle[key]
try:
filts = data._filters
except AttributeError:
continue
if '32001' in filts:
if hdf5plugin is None:
warnings.warn(
("Blosc meta-compressor detected, but hdf5plugin is "
"not installed, may not be able to access %r") % (key))
def _make_mzml_parser(self, kwargs):
self._mzml_parser = ExternalDataMzML(
self._xml_buffer, external_data_registry=self._array_registry,
use_index=False, **kwargs)
self._mzml_parser._offset_index = self.build_byte_index()
self._mzml_parser._use_index = True
@property
def name(self):
if hasattr(self.path, 'name'):
return self.path.name
return self.path
def build_byte_index(self):
index = HierarchicalOffsetIndex()
for label in [u'spectrum', u'chromatogram']:
sub = index[label]
ids = bytearray(np.array(self.handle['mzML_{}Index_idRef'.format(label)])).split(b"\x00")
offsets = self.handle["mzML_{}Index".format(label)][:-1]
for i, o in enumerate(offsets):
sub[ids[i].decode('utf8')] = o
return index
def get_by_id(self, id):
"""Parse the file and return the element with `id` attribute equal
to `elem_id`. Returns :py:const:`None` if no such element is found.
Parameters
----------
elem_id : str
The value of the `id` attribute to match.
Returns
-------
out : :py:class:`dict` or :py:const:`None`
"""
return self._mzml_parser.get_by_id(id)
def get_by_ids(self, ids):
return self._mzml_parser.get_by_ids(ids)
def get_by_index(self, i):
return self._mzml_parser.get_by_index(i)
def get_by_indexes(self, indexes):
return self._mzml_parser.get_by_indexes(indexes)
def get_by_index_slice(self, s):
return self._mzml_parser.get_by_index_slice(s)
def get_by_key_slice(self, s):
return self._mzml_parser.get_by_key_slice(s)
def __contains__(self, key):
return key in self.index
def __getitem__(self, i):
return self._mzml_parser[i]
def __len__(self):
return len(self._mzml_parser)
def __iter__(self):
return iter(self._mzml_parser)
def __next__(self):
return next(self._mzml_parser)
def next(self):
return self.__next__()
def __reduce__(self):
return self.__class__, (self.path, self._hdfargs, self._mzmlargs, self._allow_updates)
def close(self):
self.handle.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def iterfind(self, *args, **kwargs):
iterf = self._mzml_parser.iterfind(*args, **kwargs)
iterf.parser = self
return iterf
def _iterfind_impl(self, path, *args, **kwargs):
return self._mzml_parser._iterfind_impl(path, *args, **kwargs)
@property
def index(self):
return self._mzml_parser.index
@property
def _offset_index(self):
return self._mzml_parser._offset_index
@property
def default_index(self):
return self._mzml_parser.default_index
def _get_time(self, scan):
return self._mzml_parser._get_time(scan)
@property
def mzml_parser(self):
return self._mzml_parser
def _task_map_iterator(self):
"""Returns the :class:`Iteratable` to use when dealing work items onto the input IPC
queue used by :meth:`map`
Returns
-------
:class:`Iteratable`
"""
return iter(self.index[self._default_iter_tag])
def read(self, n=-1):
return self._mzml_parser.read(n)
def reset(self):
self._mzml_parser.reset()
def seek(self, offset, whence=0):
self._mzml_parser.seek(offset, whence)
def tell(self):
return self._mzml_parser.tell()
def get_dataset(self, name):
'''Get an HDF5 dataset by its name or path relative to
the root node.
.. warning::
Because this accesses HDF5 data directly, it may be possible to mutate
the underlying file if :attr:`allow_updates` is :const:`True`.
Parameters
----------
name : :class:`str`
The dataset name or path.
Returns
-------
:class:`h5py.Dataset` or :class:`h5py.Group`
Raises
------
KeyError :
The name is not found.
'''
return self.handle[name]
def read(source, dtype=None):
"""Parse `source` and iterate through spectra.
Parameters
----------
source : str or file
A path to a target mzMLb file or the file object itself.
dtype : type or dict, optional
dtype to convert arrays to, one for both m/z and intensity arrays or one for each key.
If :py:class:`dict`, keys should be 'm/z array' and 'intensity array'.
Returns
-------
out : iterator
An iterator over the dicts with spectrum properties.
"""
reader = MzMLb(source, dtype=dtype)
return reader
# The MzMLb class is detatched from the normal :class:`FileReader`-based inheritance tree,
# this grafts it back on for :func:`isinstance` and :func:`issubclass` tests at least.
FileReader.register(MzMLb)
version_info = xml._make_version_info(MzMLb)
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(MzMLb)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/mztab.py | pyteomics/mztab.py | """
mztab - mzTab file reader
=========================
Summary
-------
`mzTab <https://github.com/HUPO-PSI/mzTab>`_ is one of the standards
developed by the Proteomics Informatics working group of the HUPO Proteomics
Standard Initiative.
This module provides a way to read mzTab files into a collection of
:py:class:`pandas.DataFrame` instances in memory, along with a mapping
of the file-level metadata. MzTab specifications 1.0 and 2.0 are supported.
Data access
-----------
:py:class:`MzTab` - a class representing a single mzTab file.
Helpers
-------
:py:class:`Group` - a collection of metadata relating to one entity.
Internals
---------
:py:class:`_MzTabTable` - a single table in an mzTab file.
Property Management
~~~~~~~~~~~~~~~~~~~
:mod:`mztab` uses metaprogramming to generate its metadata accessors, generated by
these classes working in concert.
:py:class:`MetadataBackedProperty`
:py:class:`MetadataBackedCollection`
:py:class:`MetadataPropertyAnnotator`
-------------------------------------------------------------------------------
"""
import re
import warnings
try:
import pandas as pd
except ImportError:
pd = None
from collections import OrderedDict
from pyteomics.auxiliary import _file_obj
from pyteomics.auxiliary import cvstr
from pyteomics.auxiliary.utils import add_metaclass
def _require_pandas():
if pd is None:
raise ImportError(
"To load an mzTab file into pandas.DataFrame objects, you must install pandas!")
class MetadataBackedProperty(object):
'''Our descriptor type which uses the instance's metadata attribute to carry its values'''
def __init__(self, name, variant_required=None):
if variant_required is None:
variant_required = ()
self.name = name
self.variant_required = variant_required
self.__doc__ = self.build_docstring()
def __repr__(self):
return "{self.__class__.__name__}(name={self.name!r}, variant_required={self.variant_required})".format(self=self)
def __get__(self, obj, objtype=None):
if obj is None and objtype is not None:
# So the property can be seen for what it is
return self
value = obj.metadata.get(self.name)
if value is None and self.variant_required and obj.variant in self.variant_required:
raise AttributeError("{0} is missing from a mzTab-\"{1}\" document where it is required!".format(
self.name, obj.variant))
return value
def __set__(self, obj, value):
obj.metadata[self.name] = value
def __delete__(self, obj):
del obj.metadata[self.name]
def build_docstring(self):
doc = '''Accesses the {self.name!r} key in the :attr:`metadata` mapping attached
to this object.
'''
if self.variant_required:
if len(self.variant_required) > 1:
plural = 's'
else:
plural = ''
requires = ' or '.join(['-%s' % v for v in self.variant_required])
doc += '''
This key must be present when the file is of {requires} variant{plural}.
'''.format(requires=requires, plural=plural)
doc += '''
Returns
-------
object
'''
doc = doc.format(self=self)
return doc
class MetadataBackedCollection(object):
def __init__(self, name, variant_required=None):
if variant_required is None:
variant_required = ()
self.name = name
self.variant_required = variant_required
self.__doc__ = self.build_docstring()
def __get__(self, obj, objtype=None):
if obj is None and objtype is not None:
# So the property can be seen for what it is
return self
groups = obj.gather(obj.metadata)
value = groups.get(self.name)
if value is None and self.variant_required and obj.variant in self.variant_required:
raise AttributeError("{0} is missing from a mzTab-\"{1}\" document where it is required!".format(
self.name, obj.variant))
return value
def build_docstring(self):
doc = '''Accesses the {self.name!r} key group gathered in the :attr:`metadata` mapping attached
to this object.
This group is dynamically generated on each access and may be expensive for repeated use.
'''
if self.variant_required:
if len(self.variant_required) > 1:
plural = 's'
else:
plural = ''
requires = ' or '.join(['-%s' % v for v in self.variant_required])
doc += '''
This key must be present when the file is of {requires} variant{plural}.
'''.format(requires=requires, plural=plural)
doc += '''
Returns
-------
:class:`~.Group`
'''
doc = doc.format(self=self)
return doc
class MetadataPropertyAnnotator(type):
'''A simple metaclass to do some class-creation time introspection
and descriptor binding.
Uses a list of strings or 3-tuples from :attr:`__metadata_properties__` to
bind :class:`MetadataBackedProperty` or :class:`MetadataBackedCollection`
onto the class during its creation.
The specification for a property is a tuple of three values:
1. The metadata key to fetch
2. The property name to expose on the object
3. The variant(s) which require this metadata key be present
:obj:`("mzTab-version", "version", ("M", "P"))` would be interpreted as
Expose a property "version" on instances which serves the key "mzTab-version"
from the instance's :attr:`metadata`, and raise an error if it is absent in
the "M" or "P" variants.
Alternatively a specification may be a single string which will be interpreted
as the metadata key, and used to generate the property name replacing all '-'
with '_' and assumed to be optional in all variants.
If a metadata key ends with "[]" the property is assumed to be a collection. mzTab
makes heavy use of "<collection_name>[<index>]..." keys to define groups of homogenous
object types, often with per-element attributes.
.. code-block::
variable_mod[1] CHEMMOD:15.9949146221
variable_mod[1]-site M
variable_mod[1]-position Anywhere
variable_mod[2] CHEMMOD:42.0105646863
variable_mod[2]-site N-term
variable_mod[2]-position Protein N-term
A specification :obj:`("variable_mod[]", "variable_mods", ())` would create a property
that returns:
.. code-block:: python
>>>instance.variable_mods
Group([(1,
{'name': 'CHEMMOD:15.9949146221',
'position': 'Anywhere',
'site': 'M'}),
(2,
{'name': 'CHEMMOD:42.0105646863',
'position': 'Protein N-term',
'site': 'N-term'})])
For precise description of the property collection algorithm, see
:meth:`~_MzTabParserBase.collapse_properties` and
:meth:`~_MzTabParserBase.gather`.
If any base classes have a :attr:`__metadata_properties__` attribute, it will
also be included unless :attr:`__inherit_metadata_properties__` is set to
:const:`False`. Any names explicitly set by the current class override this
automatic property generation.
'''
def __new__(mcls, name, bases, attrs):
props = attrs.get('__metadata_properties__', [])
inherit_props = attrs.get("__inherit_metadata_properties__", True)
# Gather from parent classes so we can use inheritance for overriding this
# behavior too.
if inherit_props:
for base in bases:
props.extend(getattr(base, '__metadata_properties__', []))
keys = set(attrs)
# Iterate in reverse to ensure that classes nearer to the new classes override
# more basal classes, ending with the new class to make sure overrides are
# applied.
for prop in reversed(props):
# If the property definition is a single string, interpret the specification
# as the property name, and apply some simple normalization to make it a valid
# Python attribute name and assume the property is always optional.
if isinstance(prop, str):
prop_name = prop
attr_name = prop_name.replace("mzTab-", '').replace('-', '_')
variant_required = None
else:
# Otherwise unpack the triple
prop_name, attr_name, variant_required = prop
# Attach the new descriptor to the class definition to be created. These descriptors
# will then be used when instances of that class try to get/set those attribute names.
if attr_name in keys:
continue
if prop_name.endswith('[]'):
# If the property name ends with "[]", then we're dealing with a collection so
# use the :class:`MetadataBackedCollection` descriptor
attrs[attr_name] = MetadataBackedCollection(
prop_name[:-2], variant_required=variant_required)
else:
# Otherwise it is a scalar-valued property, using the :class:`MetadataBackedProperty`
# descriptor
prop = attrs[attr_name] = MetadataBackedProperty(
prop_name, variant_required=variant_required)
return super(MetadataPropertyAnnotator, mcls).__new__(mcls, name, bases, attrs)
class _MzTabParserBase(object):
def _parse_param(self, tuplet):
"""Parse a controlled vocabulary or user specified parameter tuplet
into a Python object
Parameters
----------
tuplet : str
A square brace enclosed tuplet of values describing the parameter
Returns
-------
tuple
The reduced representation of the parameter
"""
cv, acc, name, value = re.split(r"\s*,\s*", tuplet[1:-1])
param_name = cvstr(name, acc)
if value:
return (param_name, value)
else:
return (param_name)
def collapse_properties(self, proplist):
'''Collapse a flat property list into a hierchical structure.
This is intended to operate on :py:class:`Mapping` objects, including
:class:`dict`, :class:`pandas.Series` and :class:`pandas.DataFrame`.
.. code-block:: python
{
"ms_run[1]-format": "Andromeda:apl file format",
"ms_run[1]-location": "file://...",
"ms_run[1]-id_format": "scan number only nativeID format"
}
to
.. code-block:: python
{
"ms_run": [
{
"format": "Andromeda:apl file format",
"location": "file://...",
"id_format": "scan number only nativeID format"
}
]
}
Parameters
----------
proplist: :class:`Mapping`
Key-Value pairs to collapse
Returns
-------
:class:`OrderedDict`:
The collapsed property list
'''
entities = OrderedDict()
rest = {}
for key, value in proplist.items():
try:
entity, prop_name = key.rsplit("-", 1)
except ValueError:
rest[key] = value
continue
try:
entity_dict = entities[entity]
except KeyError:
entity_dict = entities[entity] = {}
entity_dict[prop_name] = value
for key, value in proplist.items():
if key in entities:
entity = entities[key]
if 'name' not in entity:
entity['name'] = value
for key, value in rest.items():
if key in entities:
entities[key]['name'] = value
else:
entities[key] = value
return entities
def _collapse_collections(self, entities):
gathered = Group()
for key, props in entities.items():
if '[' in key:
k, ix = key.split('[', 1)
if '[' in ix:
# If we have multiple [ in a key, we are dealing with a path
path = extract_path(key)
for k, ix in path[:-1]:
store = gathered[k]
store = store[int(ix)]
k, ix = path[-1]
store[k][int(ix)] = props
else:
ix = int(ix[:-1])
gathered[k][ix] = props
else:
gathered[key] = props
return gathered
def _cast_value(self, value):
"""Convert a cell value to the appropriate Python type
Parameters
----------
value : str
The cell value as text
Returns
-------
object
The most specialized type recognized
"""
if value == 'null':
return None
# is it a parameter?
if value.startswith("["):
try:
if "|" in value:
return [self._cast_value(v) for v in value.split("|")]
else:
return self._parse_param(value)
except ValueError:
return value
else:
# begin guessing dtype
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
return value
def gather(self, mapping):
'''Collapse property lists using :meth:`collapse_properties`
and then gather collections of entites into lists.
Parameters
----------
mapping : dict
The flattened hierarchy of properties to re-construct
Returns
-------
Group :
A :class:`Group` of all entities and collections of entities
'''
return self._collapse_collections(self.collapse_properties(mapping))
class _MzTabTable(_MzTabParserBase):
"""An internal class for accumulating information about an single table
represented in an mzTab file
Attributes
----------
header : list
The column names for the table
name : str
The table's name, human readable
rows : list
An accumulator of table rows
"""
def __init__(self, name, header=None, rows=None):
if rows is None:
rows = []
self.name = name
self.header = header
self.rows = rows
def __repr__(self):
n_cols = len(self.header) if self.header is not None else 0
n_rows = len(self.rows)
template = "<_MzTabTable {name} with {n_cols} columns and {n_rows} rows>"
return template.format(n_cols=n_cols, n_rows=n_rows, name=self.name)
def add(self, row):
self.rows.append([self._cast_value(v) for v in row])
def __len__(self):
return len(self.rows)
def __getitem__(self, i):
if isinstance(i, int):
return self.gather({h: r for h, r in zip(self.header, self.rows[i])})
elif isinstance(i, slice):
out = []
for i in range(i.start or 0, i.stop or len(self), i.step or 1):
out.append(self[i])
return out
raise TypeError("Cannot access table with object of type %r" % type(i))
def as_dict(self):
return {"rows": [dict(zip(self.header, row)) for row in self.rows],
"name": self.name}
def as_df(self, index=None):
"""Convert the table to a DataFrame in memory.
Returns
-------
pd.DataFrame
"""
_require_pandas()
table = pd.DataFrame(data=self.rows, columns=self.header)
if index is not None and len(table.index) > 0:
table = table.set_index(index, drop=False)
table.name = self.name
return table
def clear(self):
self.header = None
self.rows = []
DATA_FRAME_FORMAT = 'df'
DICT_FORMAT = 'dict'
RAW_FORMAT = 'raw'
PATH_PARSER = re.compile(r"([^\[]+)\[(\d+)\]_?")
def extract_path(path):
'''Parse `key[index]_next_key[next_index]...` sequences into
lists of (key, index) pairs.
Parameters
----------
path : str
The path key to parse
Returns
-------
list
'''
return [(t, int(i)) for t, i in PATH_PARSER.findall(path)]
class Group(OrderedDict):
'''A type for holding collections of arbitrarily nested keys from rows
and metadata mappings.
Implemented as an autovivifying :class:`OrderedDict` variant. As such implements
the :class:`~collections.abc.Mapping` interface.
'''
def get_path(self, path, default=None):
'''As :meth:`get` but over a path key parsed with :func:`extract_path`.
Parameters
----------
path : str
The path to search down
default : object, optional
The return value when the path is missing
Returns
-------
object
'''
tokens = extract_path(path)
if not tokens:
return self.get(path, default)
layer = self
for k, i in tokens[:-1]:
i = int(i)
layer = layer.get(k)
if layer is None:
return None
layer = layer.get(i)
if layer is None:
return None
k, i = tokens[-1]
i = int(i)
layer = layer.get(k)
if layer is None:
return default
value = layer.get(i, default)
return value
def __missing__(self, key):
value = self.__class__()
self[key] = value
return value
@add_metaclass(MetadataPropertyAnnotator)
class MzTab(_MzTabParserBase):
"""Parser for mzTab format files.
Attributes
----------
comments : list
A list of comments across the file
file : _file_obj
A file stream wrapper for the file to be read
metadata : OrderedDict
A mapping of metadata that was entities.
peptide_table : _MzTabTable or pd.DataFrame
The table of peptides. Not commonly used.
protein_table : _MzTabTable or pd.DataFrame
The table of protein identifications.
small_molecule_table : _MzTabTable or pd.DataFrame
The table of small molecule identifications.
spectrum_match_table : _MzTabTable or pd.DataFrame
The table of spectrum-to-peptide match identifications.
table_format: 'df', 'dict', or callable
The structure type to replace each table with. The string
'df' will use pd.DataFrame instances. 'dict' will create
a dictionary of dictionaries for each table. A callable
will be called on each raw _MzTabTable object
Additional components of :attr:`metadata` are exposed as properties, returning
single values or aggregated collections of objects.
"""
__metadata_properties__ = [
('mzTab-version', 'version', ()),
('mzTab-mode', 'mode', 'P'),
('mzTab-type', 'type', 'P'),
('mzTab-ID', 'id', 'M'),
'title',
'description',
('ms_run[]', 'ms_runs', 'MP'),
('instrument[]', 'instruments', ()),
('software[]', 'software', ()),
('publication[]', 'publications', ()),
('contact[]', 'contacts', ()),
('uri[]', 'uris', ()),
('external_study_uri[]', 'external_study_uris', ()),
('quantification_method', 'quantification_method', 'M'),
('sample[]', 'samples', ()),
('assay[]', 'assays', ()),
('study_variable[]', 'study_variables', 'M'),
('custom[]', 'custom', ()),
('cv[]', 'cvs', 'M'),
('database[]', 'databases', 'M'),
('psm_search_engine_score[]', 'psm_search_engine_scores', ()),
('protein_search_engine_score[]', 'protein_search_engine_scores', ()),
('fixed_mod[]', 'fixed_mods', 'P'),
('variable_mod[]', 'variable_mods', 'P'),
'colunit_protein',
'colunit_peptide',
'colunit_psm',
'colunit_small_molecule',
'false_discovery_rate',
('derivatization_agent[]', 'derivatization_agents', ()),
('small_molecule-quantification_unit',
'small_molecule_quantification_unit', 'M'),
('small_molecule_feature-quantification_unit', 'small_molecule_feature_quantification_unit', 'M'),
('small_molecule-identification_reliability',
'small_molecule_identification_reliability', ()),
('id_confidence_measure[]', 'id_confidence_measures', 'M'),
('colunit-small_molecule', 'colunit_small_molecule', ()),
('colunit-small_molecule_feature', 'colunit_small_molecule_feature', ()),
('colunit-small_molecule_evidence', 'colunit_small_molecule_evidence', ()),
('sample_processing[]', 'sample_processing', ())
]
def __init__(self, path, encoding='utf8', table_format=DATA_FRAME_FORMAT):
if table_format == DATA_FRAME_FORMAT:
_require_pandas()
# Must be defined in order for metadata properties to work
self.variant = None
self.file = _file_obj(path, mode='r', encoding=encoding)
self.metadata = OrderedDict()
self.comments = []
self._table_format = table_format
self._init_tables()
self._parse()
self._determine_schema_version()
self._transform_tables()
@property
def table_format(self):
return self._table_format
def __getitem__(self, key):
key = key.lower().strip()
if key in ('psm', ):
return self.spectrum_match_table
if key in ('pep', ):
return self.peptide_table
if key in ('prt', ):
return self.protein_table
if key in ('sml', ):
return self.small_molecule_table
if key in ('smf', ):
return self.small_molecule_feature_table
if key in ('sme', ):
return self.small_molecule_evidence_table
else:
raise KeyError(key)
def __iter__(self):
if self.variant == "P":
yield 'PRT', self.protein_table
yield 'PEP', self.peptide_table
yield 'PSM', self.spectrum_match_table
yield 'SML', self.small_molecule_table
elif self.variant == "M":
yield 'SML', self.small_molecule_table
yield 'SMF', self.small_molecule_feature_table
yield 'SME', self.small_molecule_evidence_table
def _init_tables(self):
self.protein_table = _MzTabTable("protein")
self.peptide_table = _MzTabTable("peptide")
self.spectrum_match_table = _MzTabTable('psm')
self.small_molecule_table = _MzTabTable('small molecule')
self.small_molecule_feature_table = _MzTabTable('small molecule feature')
self.small_molecule_evidence_table = _MzTabTable('small molecule evidence')
def _transform_tables(self):
if self._table_format == DATA_FRAME_FORMAT:
self.protein_table = self.protein_table.as_df('accession')
self.peptide_table = self.peptide_table.as_df()
self.spectrum_match_table = self.spectrum_match_table.as_df('PSM_ID')
self.small_molecule_table = self.small_molecule_table.as_df()
self.small_molecule_feature_table = self.small_molecule_feature_table.as_df()
self.small_molecule_evidence_table = self.small_molecule_evidence_table.as_df()
elif self._table_format in (DICT_FORMAT, dict):
self.protein_table = self.protein_table.as_dict()
self.peptide_table = self.peptide_table.as_dict()
self.spectrum_match_table = self.spectrum_match_table.as_dict()
self.small_molecule_table = self.small_molecule_table.as_dict()
self.small_molecule_feature_table = self.small_molecule_feature_table.as_dict()
self.small_molecule_evidence_table = self.small_molecule_evidence_table.as_dict()
elif callable(self._table_format):
self.protein_table = self._table_format(self.protein_table)
self.peptide_table = self._table_format(self.peptide_table)
self.spectrum_match_table = self._table_format(self.spectrum_match_table)
self.small_molecule_table = self._table_format(self.small_molecule_table)
self.small_molecule_feature_table = self._table_format(self.small_molecule_feature_table)
self.small_molecule_evidence_table = self._table_format(self.small_molecule_evidence_table)
def _parse(self):
for i, line in enumerate(self.file):
line = line.strip()
tokens = line.split("\t")
if not tokens:
continue
if tokens[0] == ("MTD"):
name = tokens[1]
value = self._cast_value(tokens[2])
self.metadata[name] = value
elif tokens[0] == 'COM':
self.comments.append(self._cast_value(tokens[1]))
# headers
elif tokens[0] == "PRH":
self.protein_table.header = tokens[1:]
elif tokens[0] == "PEH":
self.peptide_table.header = tokens[1:]
elif tokens[0] == "PSH":
self.spectrum_match_table.header = tokens[1:]
elif tokens[0] == "SMH":
self.small_molecule_table.header = tokens[1:]
elif tokens[0] == "SFH":
self.small_molecule_feature_table.header = tokens[1:]
elif tokens[0] == "SEH":
self.small_molecule_evidence_table.header = tokens[1:]
# rows
elif tokens[0] == "PRT":
self.protein_table.add(tokens[1:])
elif tokens[0] == "PEP":
self.peptide_table.add(tokens[1:])
elif tokens[0] == "PSM":
self.spectrum_match_table.add(tokens[1:])
elif tokens[0] == "SML":
self.small_molecule_table.add(tokens[1:])
elif tokens[0] == "SMF":
self.small_molecule_feature_table.add(tokens[1:])
elif tokens[0] == "SME":
self.small_molecule_evidence_table.add(tokens[1:])
def _determine_schema_version(self):
if self.version is not None:
version = str(self.version)
else:
warnings.warn("The mzTab-version metadata header was missing. Assuming the schema version is 1.0.0")
version = "1.0.0"
self.version = version
match = re.search(r"(?P<schema_version>\d+(?:\.\d+(?:\.\d+)?)?)(?:-(?P<schema_variant>[MP]))?", version)
if match is None:
warnings.warn("mzTab-version does not match the expected pattern: %r" % version)
version_parsed = '1.0.0'
variant = 'P'
else:
version_parsed, variant = match.groups()
if variant is None:
variant = "P"
self.num_version = [int(v) for v in version_parsed.split(".")]
# Ensure self.num_version is 3-tuple
while len(self.num_version) < 3:
self.num_version.append(0)
self.variant = variant
def keys(self):
return OrderedDict(list(self)).keys()
def values(self):
return OrderedDict(list(self)).values()
def items(self):
return OrderedDict(list(self)).items()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/parser.py | pyteomics/parser.py | """
parser - operations on modX peptide sequences
=============================================
modX is a simple extension of the `IUPAC one-letter peptide sequence
representation <http://www.chem.qmul.ac.uk/iupac/AminoAcid/A2021.html>`_.
The labels (or codes) for the 20 standard amino acids in modX are the same as
in IUPAC nomeclature. A label for a modified amino acid has a general
form of 'modX', i.e.:
- it starts with an arbitrary number of lower-case symbols or numbers
(a modification);
- it ends with a single upper-case symbol (an amino acid residue).
The valid examples of modX amino acid labels are: 'G', 'pS', 'oxM'. This rule
allows to combine read- and parseability.
Besides the sequence of amino acid residues, modX has a rule to specify
terminal modifications (alternative terminal groups) of a polypeptide.
Such a label should start or end with a hyphen. The default N-terminal amine group and C-terminal
carboxyl group may not be shown explicitly.
Therefore, valid examples of peptide sequences in modX are: "GAGA",
"H-PEPTIDE-OH", "H-TEST-NH2". It is not recommmended to specify only one
terminal group.
Operations on polypeptide sequences
-----------------------------------
:py:func:`parse` - convert a sequence string into a list of
amino acid residues.
:py:func:`to_string` - convert a parsed sequence to a string.
:py:func:`to_proforma` - convert a (parsed) *modX* sequence to ProForma.
:py:func:`amino_acid_composition` - get numbers of each amino acid
residue in a peptide.
:py:func:`cleave`, :py:func:`icleave`, :py:func:`xcleave` - cleave a polypeptide using a given rule of
enzymatic digestion.
:py:func:`num_sites` - count the number of cleavage sites in a sequence.
:py:func:`isoforms` - generate all unique modified peptide sequences
given the initial sequence and modifications.
Auxiliary commands
------------------
:py:func:`coverage` - calculate the sequence coverage of a protein by peptides.
:py:func:`length` - calculate the number of amino acid
residues in a polypeptide.
:py:func:`valid` - check if a sequence can be parsed successfully.
:py:func:`fast_valid` - check if a sequence contains of known one-letter
codes.
:py:func:`is_modX` - check if supplied code corresponds to a modX label.
:py:func:`is_term_group` - check if supplied code corresponds to a
terminal group.
Data
----
:py:data:`std_amino_acids` - a list of the 20 standard amino acid IUPAC codes.
:py:data:`std_nterm` - the standard N-terminal modification (the
unmodified group is a single atom of hydrogen).
:py:data:`std_cterm` - the standard C-terminal modification (the
unmodified group is hydroxyl).
:py:data:`std_labels` - a list of all standard sequence
elements, amino acid residues and terminal modifications.
:py:data:`expasy_rules` and :py:data:`psims_rules` - two dicts with the regular expressions of
cleavage rules for the most popular proteolytic enzymes.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import deque
import itertools as it
import warnings
from .auxiliary import PyteomicsError, memoize, BasicComposition, cvstr, cvquery
std_amino_acids = ['Q', 'W', 'E', 'R', 'T', 'Y', 'I', 'P', 'A', 'S',
'D', 'F', 'G', 'H', 'K', 'L', 'C', 'V', 'N', 'M']
"""modX labels for the 20 standard amino acids."""
std_nterm = 'H-'
"""modX label for the unmodified N-terminus."""
std_cterm = '-OH'
"""modX label for the unmodified C-terminus."""
std_labels = std_amino_acids + [std_nterm, std_cterm]
"""modX labels for the standard amino acids and unmodified termini."""
_nterm_group = r'[^-]+-$'
_cterm_group = r'-[^-]+$'
def is_term_group(label):
"""Check if `label` corresponds to a terminal group.
Parameters
----------
label : str
Returns
-------
out : bool
Examples
--------
>>> is_term_group('A')
False
>>> is_term_group('Ac-')
True
>>> is_term_group('-customGroup')
True
>>> is_term_group('this-group-')
False
>>> is_term_group('-')
False
"""
return (re.match(_nterm_group, label) or re.match(_cterm_group, label)) is not None
is_term_mod = is_term_group
def match_modX(label):
"""Check if `label` is a valid 'modX' label.
Parameters
----------
label : str
Returns
-------
out : re.match or None
"""
return re.match(_modX_single, label)
def is_modX(label):
"""Check if `label` is a valid 'modX' label.
Parameters
----------
label : str
Returns
-------
out : bool
Examples
--------
>>> is_modX('M')
True
>>> is_modX('oxM')
True
>>> is_modX('oxMet')
False
>>> is_modX('160C')
True
"""
return bool(match_modX(label))
def length(sequence, **kwargs):
"""Calculate the number of amino acid residues in a polypeptide
written in modX notation.
Parameters
----------
sequence : str or list or dict
A string with a polypeptide sequence, a list with a parsed sequence or
a dict of amino acid composition.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
Returns
-------
out : int
Examples
--------
>>> length('PEPTIDE')
7
>>> length('H-PEPTIDE-OH')
7
"""
if not sequence:
return 0
if isinstance(sequence, str) or isinstance(sequence, list):
if isinstance(sequence, str):
parsed_sequence = parse(sequence, **kwargs)
else:
parsed_sequence = sequence
num_term_groups = 0
if is_term_group(parsed_sequence[0]):
num_term_groups += 1
if is_term_group(parsed_sequence[-1]):
num_term_groups += 1
return len(parsed_sequence) - num_term_groups
elif isinstance(sequence, dict):
return sum(amount for aa, amount in sequence.items() if not is_term_group(aa))
raise PyteomicsError('Unsupported type of sequence.')
def _split_label(label):
try:
mod, X = match_modX(label).groups()
except AttributeError:
raise PyteomicsError('Cannot split a non-modX label: %s' % label)
if not mod:
return (X,)
else:
return mod, X
_modX_sequence = re.compile(r'^([^-]+-)?((?:[^A-Z-]*[A-Z])+)(-[^-]+)?$')
_modX_group = re.compile(r'[^A-Z-]*[A-Z]')
_modX_split = re.compile(r'([^A-Z-]*)([A-Z])')
_modX_single = re.compile(r'^([^A-Z-]*)([A-Z])$')
def parse(sequence, show_unmodified_termini=False, split=False, allow_unknown_modifications=False, **kwargs):
"""Parse a sequence string written in modX notation into a list of
labels or (if `split` argument is :py:const:`True`) into a list of
tuples representing amino acid residues and their modifications.
Parameters
----------
sequence : str
The sequence of a polypeptide.
show_unmodified_termini : bool, optional
If :py:const:`True` then the unmodified N- and C-termini are explicitly
shown in the returned list. Default value is :py:const:`False`.
split : bool, optional
If :py:const:`True` then the result will be a list of tuples with 1 to 4
elements: terminal modification, modification, residue. Default value is
:py:const:`False`.
allow_unknown_modifications : bool, optional
If :py:const:`True` then do not raise an exception when an unknown
modification of a known amino acid residue is found in the sequence.
This also includes terminal groups.
Default value is :py:const:`False`.
.. note::
Since version 2.5, this parameter has effect only if `labels`
are provided.
labels : container, optional
A container of allowed labels for amino acids,
modifications and terminal modifications.
If not provided, no checks will be done.
Separate labels for modifications (such as 'p' or 'ox')
can be supplied, which means they are applicable to all residues.
.. warning::
If `show_unmodified_termini` is set to :py:const:`True`, standard
terminal groups need to be present in `labels`.
.. warning::
Avoid using sequences with only one terminal group, as they are
ambiguous. If you provide one, `labels` (or :py:const:`std_labels`)
will be used to resolve the ambiguity.
Returns
-------
out : list
List of tuples with labels of modifications and amino acid residues.
Examples
--------
>>> parse('PEPTIDE', split=True)
[('P',), ('E',), ('P',), ('T',), ('I',), ('D',), ('E',)]
>>> parse('H-PEPTIDE')
['P', 'E', 'P', 'T', 'I', 'D', 'E']
>>> parse('PEPTIDE', show_unmodified_termini=True)
['H-', 'P', 'E', 'P', 'T', 'I', 'D', 'E', '-OH']
>>> parse('TEpSToxM', labels=std_labels + ['pS', 'oxM'])
['T', 'E', 'pS', 'T', 'oxM']
>>> parse('zPEPzTIDzE', True, True, labels=std_labels+['z'])
[('H-', 'z', 'P'), ('E',), ('P',), ('z', 'T'), ('I',), ('D',), ('z', 'E', '-OH')]
>>> parse('Pmod1EPTIDE')
['P', 'mod1E', 'P', 'T', 'I', 'D', 'E']
"""
sequence = str(sequence)
try:
n, body, c = re.match(_modX_sequence, sequence).groups()
except AttributeError:
raise PyteomicsError('Not a valid modX sequence: ' + sequence)
# Check for allowed labels, if they were explicitly given
labels = kwargs.get('labels')
# labels help save the day when only one terminal group is given
if c is None and n is not None:
if labels is None:
labels = std_labels
# we can try to resolve the ambiguity
if n != std_nterm and n not in labels and valid(n[:-1], labels=labels):
# n is the body then
c = '-' + body
body = n[:-1]
n = None
# Actual parsing
if split:
parsed_sequence = [g if g[0] else (g[1],) for g in re.findall(
_modX_split, body)]
else:
parsed_sequence = re.findall(_modX_group, body)
nterm, cterm = (n or std_nterm), (c or std_cterm)
# Check against `labels` if given
if labels is not None:
labels = set(labels)
for term, std_term in zip([n, c], [std_nterm, std_cterm]):
if term and term not in labels and not allow_unknown_modifications:
raise PyteomicsError('Unknown label: {}'.format(term))
for group in parsed_sequence:
if split:
mod, X = group if len(group) == 2 else ('', group[0])
else:
mod, X = re.match(_modX_split, group).groups()
if ((not mod) and X not in labels) or not ((mod + X in labels) or (
X in labels and (
mod in labels or allow_unknown_modifications))):
raise PyteomicsError('Unknown label: {}'.format(group))
# Append terminal labels
if show_unmodified_termini or nterm != std_nterm:
if split:
parsed_sequence[0] = (nterm,) + parsed_sequence[0]
else:
parsed_sequence.insert(0, nterm)
if show_unmodified_termini or cterm != std_cterm:
if split:
parsed_sequence[-1] = parsed_sequence[-1] + (cterm,)
else:
parsed_sequence.append(cterm)
return parsed_sequence
def valid(*args, **kwargs):
"""Try to parse sequence and catch the exceptions.
All parameters are passed to :py:func:`parse`.
Returns
-------
out : bool
:py:const:`True` if the sequence was parsed successfully, and
:py:const:`False` otherwise.
"""
try:
parse(*args, **kwargs)
except PyteomicsError:
return False
return True
def fast_valid(sequence, labels=set(std_labels)):
"""Iterate over `sequence` and check if all items are in `labels`.
With strings, this only works as expected on sequences without
modifications or terminal groups.
Parameters
----------
sequence : iterable (expectedly, str)
The sequence to check. A valid sequence would be a string of
labels, all present in `labels`.
labels : iterable, optional
An iterable of known labels.
Returns
-------
out : bool
"""
return set(sequence).issubset(labels)
def to_string(parsed_sequence, show_unmodified_termini=True):
"""Create a string from a parsed sequence.
Parameters
----------
parsed_sequence : iterable
Expected to be in one of the formats returned by
:py:func:`parse`, i.e. list of labels or list of tuples.
show_unmodified_termini : bool, optional
Defines the behavior towards standard terminal groups in the input.
:py:const:`True` means that they will be preserved if present (default).
:py:const:`False` means that they will be removed. Standard terminal
groups will not be added if not shown in `parsed_sequence`,
regardless of this setting.
Returns
-------
sequence : str
"""
parsed_sequence = list(parsed_sequence)
labels = []
nterm = parsed_sequence[0]
cterm = parsed_sequence[-1]
if isinstance(nterm, str):
if nterm != std_nterm or show_unmodified_termini:
labels.append(nterm)
labels.extend(parsed_sequence[1:-1])
if len(parsed_sequence) > 1 and (cterm != std_cterm or show_unmodified_termini):
labels.append(cterm)
else:
if len(parsed_sequence) == 1:
g = nterm
if nterm[0] == std_nterm and not show_unmodified_termini:
g = g[1:]
if nterm[-1] == std_cterm and not show_unmodified_termini:
g = g[:-1]
return ''.join(g)
if nterm[0] != std_nterm or show_unmodified_termini:
labels.append(''.join(nterm))
else:
labels.append(''.join(nterm[1:]))
labels.extend(''.join(g) for g in parsed_sequence[1:-1])
if len(parsed_sequence) > 1:
if cterm[-1] != std_cterm or show_unmodified_termini:
labels.append(''.join(cterm))
else:
labels.append(''.join(cterm[:-1]))
return ''.join(labels)
tostring = to_string
def to_proforma(sequence, **kwargs):
"""Converts a (parsed) *modX* sequence to a basic ProForma string.
Modifications are represented as masses, if those are given in :arg:`aa_mass`,
as chemical formulas (via :arg:`aa_comp`) or as names (using :arg:`mod_names`).
Parameters
----------
sequence : str or list
A *modX* sequence, possibly in the parsed form.
aa_mass : dict, keyword only, optional
Used to render modifications as mass shifts.
aa_comp : dict, keyword only, optional
Used to render modifications as chemical formulas.
mod_names : dict or callable, keyword only, optional
Used to get the rendered name of modification from the mod label.
prefix : str, keyword only, optional
Prepend all modification names with the given prefix.
Returns
-------
out : str
A ProForma sequence.
Examples
--------
>>> to_proforma('PEPTIDE')
'PEPTIDE'
>>> to_proforma('Ac-oxMYPEPTIDE-OH', aa_mass={'Ac-': 42.010565}, mod_names={'ox': 'Oxidation'}, prefix='U:')
'[+42.0106]-M[U:Oxidation]YPEPTIDE'
>>> to_proforma('oxidationMYPEPTIDE') # last fallback is to just capitalize the label
'M[Oxidation]YPEPTIDE'
"""
from . import proforma
from .mass.mass import std_aa_mass, std_aa_comp
if isinstance(sequence, str):
return to_proforma(parse(sequence), **kwargs)
aa_mass = kwargs.get('aa_mass', std_aa_mass)
aa_comp = kwargs.get('aa_comp', std_aa_comp)
mod_names = kwargs.get('mod_names', {})
prefix = kwargs.get('prefix', '')
if isinstance(mod_names, dict):
get_name = mod_names.get
else:
get_name = mod_names
def get_tag(label):
if label in aa_mass:
return [proforma.MassModification(aa_mass[label])]
if label in aa_comp:
return [proforma.FormulaModification(''.join('{}{}'.format(k, v if v not in {0, 1} else '') for k, v in aa_comp[label].items()))]
name = get_name(label)
if not name:
warnings.warn("Unable to resolve label `{}`. "
"The ProForma string may be invalid. Specify `mod_names`, `aa_mass` or `aa_comp`.".format(label))
name = label.capitalize()
return [proforma.GenericModification(prefix + name)]
i, j = 0, len(sequence)
nterm = cterm = None
pro_sequence = []
if isinstance(sequence[0], str): # regular parsed sequence
if is_term_group(sequence[0]) and sequence[0] != std_nterm:
nterm = get_tag(sequence[0])
i = 1
if is_term_group(sequence[-1]) and sequence[-1] != std_cterm:
cterm = get_tag(sequence[-1])
j -= 1
for label in sequence[i:j]:
if len(label) == 1:
pro_sequence.append((label, None))
else:
mod, aa = _split_label(label)
pro_sequence.append((aa, get_tag(mod)))
else: # split sequence
if is_term_group(sequence[0][0]) and sequence[0][0] != std_nterm:
nterm = get_tag(sequence[0][0])
if is_term_group(sequence[-1][-1]) and sequence[-1][-1] != std_cterm:
cterm = get_tag(sequence[-1][-1])
if len(sequence) == 1:
pro_sequence = [(sequence[0][-2] if cterm else sequence[0][-1], get_tag(sequence[0][1]) if len(sequence[0]) == 4 else None)]
else:
pro_sequence.append((sequence[0][-1], get_tag(sequence[0][-2]) if len(sequence[0]) == 3 else None))
for group in sequence[1:-1]:
pro_sequence.append((group[-1], get_tag(group[0]) if len(group) == 2 else None))
if len(sequence[-1]) == 1 or (len(sequence[-1]) == 2 and cterm):
pro_sequence.append((sequence[-1][0], None))
else:
pro_sequence.append((sequence[-1][1], get_tag(sequence[-1][0])))
return proforma.to_proforma(pro_sequence, n_term=nterm, c_term=cterm)
def amino_acid_composition(sequence, show_unmodified_termini=False, term_aa=False, allow_unknown_modifications=False, **kwargs):
"""Calculate amino acid composition of a polypeptide.
Parameters
----------
sequence : str or list
The sequence of a polypeptide or a list with a parsed sequence.
show_unmodified_termini : bool, optional
If :py:const:`True` then the unmodified N- and C-terminus are explicitly
shown in the returned dict. Default value is :py:const:`False`.
term_aa : bool, optional
If :py:const:`True` then the terminal amino acid residues are
artificially modified with `nterm` or `cterm` modification.
Default value is :py:const:`False`.
allow_unknown_modifications : bool, optional
If :py:const:`True` then do not raise an exception when an unknown
modification of a known amino acid residue is found in the sequence.
Default value is :py:const:`False`.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
Returns
-------
out : dict
A dictionary of amino acid composition.
Examples
--------
>>> amino_acid_composition('PEPTIDE') == \
{'I': 1, 'P': 2, 'E': 2, 'T': 1, 'D': 1}
True
>>> amino_acid_composition('PEPTDE', term_aa=True) == \
{'ctermE': 1, 'E': 1, 'D': 1, 'P': 1, 'T': 1, 'ntermP': 1}
True
>>> amino_acid_composition('PEPpTIDE', labels=std_labels+['pT']) == \
{'I': 1, 'P': 2, 'E': 2, 'D': 1, 'pT': 1}
True
"""
labels = kwargs.get('labels')
if isinstance(sequence, str):
parsed_sequence = parse(sequence, show_unmodified_termini,
allow_unknown_modifications=allow_unknown_modifications,
labels=labels)
elif isinstance(sequence, list):
if sequence and isinstance(sequence[0], tuple):
parsed_sequence = parse(tostring(sequence, True),
show_unmodified_termini,
allow_unknown_modifications=allow_unknown_modifications,
labels=labels)
else:
parsed_sequence = sequence
else:
raise PyteomicsError('Unsupported type of a sequence.'
'Must be str or list, not %s' % type(sequence))
aa_dict = BasicComposition()
# Process terminal amino acids.
if term_aa:
nterm_aa_position = 1 if is_term_group(parsed_sequence[0]) else 0
cterm_aa_position = (
len(parsed_sequence) - 2 if is_term_group(parsed_sequence[-1])
else len(parsed_sequence) - 1)
if len(parsed_sequence) > 1:
aa_dict['cterm' + parsed_sequence.pop(cterm_aa_position)] = 1
aa_dict['nterm' + parsed_sequence.pop(nterm_aa_position)] = 1
# Process core amino acids.
for aa in parsed_sequence:
aa_dict[aa] += 1
return aa_dict
@memoize()
def cleave(*args, **kwargs):
"""Cleaves a polypeptide sequence using a given rule.
.. seealso::
:func:`icleave` and :func:`xcleave`, which produce both peptides and their indices.
Parameters
----------
sequence : str
The sequence of a polypeptide.
.. note::
The sequence is expected to be in one-letter uppercase notation.
Otherwise, some of the cleavage rules in :py:data:`expasy_rules`
will not work as expected.
rule : str or compiled regex
A key present in :py:data:`expasy_rules`, :py:data:`psims_rules` (or an MS ontology accession) or a
`regular expression <https://docs.python.org/library/re.html#regular-expression-syntax>`_
describing the site of cleavage. It is recommended
to design the regex so that it matches only the residue whose C-terminal
bond is to be cleaved. All additional requirements should be specified
using `lookaround assertions
<http://www.regular-expressions.info/lookaround.html>`_.
:py:data:`expasy_rules` contains cleavage rules for popular cleavage agents.
.. seealso:: The `regex` argument.
missed_cleavages : int, optional
Maximum number of allowed missed cleavages. Defaults to 0.
min_length : int or None, optional
Minimum peptide length. Defaults to :py:const:`None`.
.. note ::
This checks for string length, which is only correct for one-letter
notation and not for full *modX*. Use :py:func:`length` manually if
you know what you are doing and apply :py:func:`cleave` to *modX*
sequences.
max_length : int or None, optional
Maximum peptide length. Defaults to :py:const:`None`. See note above.
semi : bool, optional
Include products of semi-specific cleavage. Default is :py:const:`False`.
This effectively cuts every peptide at every position and adds results to the output.
exception : str or compiled RE or None, optional
Exceptions to the cleavage rule. If specified, should be a key present in :py:const:`expasy_rules`
or regular expression. Cleavage sites matching `rule` will be checked against `exception` and omitted
if they match.
regex : bool, optional
If :py:const:`True`, the cleavage rule is always interpreted as a regex. Otherwise, a matching value
is looked up in :py:data:`expasy_rules` and :py:data:`psims_rules`.
Returns
-------
out : set
A set of unique (!) peptides.
Examples
--------
>>> cleave('AKAKBK', expasy_rules['trypsin'], 0) == {'AK', 'BK'}
True
>>> cleave('AKAKBK', 'trypsin', 0) == {'AK', 'BK'}
True
>>> cleave('AKAKBK', 'MS:1001251', 0) == {'AK', 'BK'}
True
>>> cleave('GKGKYKCK', 'Trypsin/P', 2) == \
{'CK', 'GKYK', 'YKCK', 'GKGK', 'GKYKCK', 'GK', 'GKGKYK', 'YK'}
True
"""
return set(p for i, p in icleave(*args, **kwargs))
def icleave(sequence, rule, missed_cleavages=0, min_length=None, max_length=None, semi=False, exception=None, regex=False):
"""Like :py:func:`cleave`, but the result is an iterator and includes peptide indices.
Refer to :py:func:`cleave` for explanation of parameters.
Returns
-------
out : iterator
An iterator over (index, sequence) pairs.
"""
if not regex:
if rule in expasy_rules:
rule = expasy_rules[rule]
elif rule in psims_rules:
rule = psims_rules[rule]
elif rule in _psims_index:
rule = _psims_index[rule]
elif re.search(r'[a-z]', rule):
warnings.warn('Interpreting the rule as a regular expression: {}. Did you mistype the rule? '
'Specify `regex=True` to silence this warning.'.format(rule))
exception = expasy_rules.get(exception, exception)
ml = missed_cleavages + 2
trange = range(ml)
cleavage_sites = deque([0], maxlen=ml)
if min_length is None:
min_length = 1
if max_length is None:
max_length = len(sequence)
cl = 1
if exception is not None:
exceptions = {x.end() for x in re.finditer(exception, sequence)}
for end in it.chain([x.end() for x in re.finditer(rule, sequence)], [None]):
if exception is not None and end in exceptions:
continue
cleavage_sites.append(end)
if cl < ml:
cl += 1
for j in trange[:cl - 1]:
seq = sequence[cleavage_sites[j]:cleavage_sites[-1]]
lenseq = len(seq)
if end is not None:
start = end - lenseq
else:
start = len(sequence) - lenseq
if seq and min_length <= lenseq <= max_length:
yield (start, seq)
if semi:
for k in range(min_length, min(lenseq, max_length)):
yield (start, seq[:k])
for k in range(max(1, lenseq - max_length), lenseq - min_length + 1):
yield (start + k, seq[k:])
def xcleave(*args, **kwargs):
"""Like :py:func:`icleave`, but returns a list.
Returns
-------
out : list
A list of (index, sequence) pairs.
Examples
--------
>>> xcleave('AKAKBK', 'trypsin', 1)
[(0, 'AK'), (0, 'AKAK'), (2, 'AK'), (2, 'AKBK'), (4, 'BK')]
"""
return list(icleave(*args, **kwargs))
def num_sites(sequence, rule, **kwargs):
"""Count the number of sites where `sequence` can be cleaved using
the given `rule` (e.g. number of miscleavages for a peptide).
Parameters
----------
sequence : str
The sequence of a polypeptide.
rule : str or compiled regex
A regular expression describing the site of cleavage. It is recommended
to design the regex so that it matches only the residue whose C-terminal
bond is to be cleaved. All additional requirements should be specified
using `lookaround assertions
<http://www.regular-expressions.info/lookaround.html>`_.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
exception : str or compiled RE or None, optional
Exceptions to the cleavage rule. If specified, should be a regular expression.
Cleavage sites matching `rule` will be checked against `exception` and omitted
if they match.
Returns
-------
out : int
Number of cleavage sites.
"""
return sum(1 for _ in icleave(sequence, rule, **kwargs)) - 1
expasy_rules = {
'arg-c': r'R',
'asp-n': r'\w(?=D)',
'bnps-skatole' : r'W',
'caspase 1': r'(?<=[FWYL]\w[HAT])D(?=[^PEDQKR])',
'caspase 2': r'(?<=DVA)D(?=[^PEDQKR])',
'caspase 3': r'(?<=DMQ)D(?=[^PEDQKR])',
'caspase 4': r'(?<=LEV)D(?=[^PEDQKR])',
'caspase 5': r'(?<=[LW]EH)D',
'caspase 6': r'(?<=VE[HI])D(?=[^PEDQKR])',
'caspase 7': r'(?<=DEV)D(?=[^PEDQKR])',
'caspase 8': r'(?<=[IL]ET)D(?=[^PEDQKR])',
'caspase 9': r'(?<=LEH)D',
'caspase 10': r'(?<=IEA)D',
'chymotrypsin high specificity' : r'([FY](?=[^P]))|(W(?=[^MP]))',
'chymotrypsin low specificity':
r'([FLY](?=[^P]))|(W(?=[^MP]))|(M(?=[^PY]))|(H(?=[^DMPW]))',
'clostripain': r'R',
'cnbr': r'M',
'enterokinase': r'(?<=[DE]{3})K',
'factor xa': r'(?<=[AFGILTVM][DE]G)R',
'formic acid': r'D',
'glutamyl endopeptidase': r'E',
'granzyme b': r'(?<=IEP)D',
'hydroxylamine': r'N(?=G)',
'iodosobenzoic acid': r'W',
'lysc': r'K',
'ntcb': r'\w(?=C)',
'pepsin ph1.3': r'((?<=[^HKR][^P])[^R](?=[FL][^P]))|'
r'((?<=[^HKR][^P])[FL](?=\w[^P]))',
'pepsin ph2.0': r'((?<=[^HKR][^P])[^R](?=[FLWY][^P]))|'
r'((?<=[^HKR][^P])[FLWY](?=\w[^P]))',
'proline endopeptidase': r'(?<=[HKR])P(?=[^P])',
'proteinase k': r'[AEFILTVWY]',
'staphylococcal peptidase i': r'(?<=[^E])E',
'thermolysin': r'[^DE](?=[AFILMV][^P])',
'thrombin': r'((?<=G)R(?=G))|'
r'((?<=[AFGILTVM][AFGILTVWA]P)R(?=[^DE][^DE]))',
'trypsin': r'([KR](?=[^P]))|((?<=W)K(?=P))|((?<=M)R(?=P))',
'trypsin_exception': r'((?<=[CD])K(?=D))|((?<=C)K(?=[HY]))|((?<=C)R(?=K))|((?<=R)R(?=[HR]))',
}
"""
This dict contains regular expressions for cleavage rules of the most
popular proteolytic enzymes. The rules were taken from the
`PeptideCutter tool
<http://ca.expasy.org/tools/peptidecutter/peptidecutter_enzymes.html>`_
at Expasy.
.. note::
'trypsin_exception' can be used as `exception` argument when calling
:py:func:`cleave` with 'trypsin' `rule`::
>>> parser.cleave('PEPTIDKDE', parser.expasy_rules['trypsin'])
{'DE', 'PEPTIDK'}
>>> parser.cleave('PEPTIDKDE', parser.expasy_rules['trypsin'], \
exception=parser.expasy_rules['trypsin_exception'])
{'PEPTIDKDE'}
"""
psims_rules = {
cvstr('2-iodobenzoate', 'MS:1001918'): r'(?<=W)',
cvstr('Arg-C', 'MS:1001303'): r'(?<=R)(?!P)',
cvstr('Asp-N', 'MS:1001304'): r'(?=[BD])',
cvstr('Asp-N ambic', 'MS:1001305'): r'(?=[DE])',
cvstr('CNBr', 'MS:1001307'): r'(?<=M)',
cvstr('Chymotrypsin', 'MS:1001306'): r'(?<=[FYWL])(?!P)',
cvstr('Formic acid', 'MS:1001308'): r'((?<=D))|((?=D))',
cvstr('Lys-C', 'MS:1001309'): r'(?<=K)(?!P)',
cvstr('Lys-C/P', 'MS:1001310'): r'(?<=K)',
cvstr('PepsinA', 'MS:1001311'): r'(?<=[FL])',
cvstr('TrypChymo', 'MS:1001312'): r'(?<=[FYWLKR])(?!P)',
cvstr('Trypsin', 'MS:1001251'): r'(?<=[KR])(?!P)',
cvstr('Trypsin/P', 'MS:1001313'): r'(?<=[KR])',
cvstr('V8-DE', 'MS:1001314'): r'(?<=[BDEZ])(?!P)',
cvstr('V8-E', 'MS:1001315'): r'(?<=[EZ])(?!P)',
cvstr('glutamyl endopeptidase', 'MS:1001917'): r'(?<=[^E]E)',
cvstr('leukocyte elastase', 'MS:1001915'): r'(?<=[ALIV])(?!P)',
cvstr('proline endopeptidase', 'MS:1001916'): r'(?<=[HKR]P)(?!P)',
}
"""
This dict contains regular expressions for cleavage rules of the most
popular proteolytic enzymes. The rules were taken from the PSI `MS ontology
<http://purl.obolibrary.org/obo/MS_1001045>`_.
You can use names or accessions to access the rules.
Use :py:func:`pyteomics.auxiliary.cvquery` for accession access::
>>> from pyteomics.auxiliary import cvquery
>>> from pyteomics.parser import psims_rules
>>> cvquery(psims_rules, 'MS:1001918')
'(?<=W)'
"""
_psims_index = cvquery(psims_rules)
def isoforms(sequence, **kwargs):
"""
Apply variable and fixed modifications to the polypeptide and yield
the unique modified sequences.
Parameters
----------
sequence : str
Peptide sequence to modify.
variable_mods : dict, optional
A dict of variable modifications in the following format:
:py:const:`{'label1': ['X', 'Y', ...], 'label2': ['X', 'A', 'B', ...]}`
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/proforma.py | pyteomics/proforma.py | '''
proforma - Proteoform and Peptidoform Notation
==============================================
ProForma is a notation for defining modified amino acid sequences using
a set of controlled vocabularies, as well as encoding uncertain or partial
information about localization. See `ProForma specification <https://www.psidev.info/proforma>`_
for more up-to-date information.
For more details, see the :mod:`pyteomics.proforma` online.
'''
import itertools
import re
import warnings
from typing import Any, Callable, Dict, Iterable, List, Optional, ClassVar, Sequence, Tuple, Type, Union, Generic, TypeVar, NamedTuple
from collections import deque, namedtuple
from functools import partial
from itertools import chain
from array import array as _array
from enum import Enum
from numbers import Integral
from .mass import Composition, std_aa_mass, Unimod, nist_mass, calculate_mass, std_ion_comp, mass_charge_ratio, std_aa_comp
from .auxiliary import PyteomicsError, BasicComposition
from .auxiliary.utils import add_metaclass
from .auxiliary.psims_util import load_psimod, load_xlmod, load_gno, obo_cache, _has_psims
try:
import numpy as np
except ImportError:
np = None
_WATER_MASS = calculate_mass(formula="H2O")
std_aa_mass = std_aa_mass.copy()
std_aa_mass['X'] = 0
element_symbols = set(nist_mass)
T = TypeVar('T')
class ProFormaError(PyteomicsError):
def __init__(self, message, index=None, parser_state=None, **kwargs):
super(ProFormaError, self).__init__(PyteomicsError, message, index, parser_state)
self.message = message
self.index = index
self.parser_state = parser_state
class PrefixSavingMeta(type):
'''A subclass-registering-metaclass that provides easy
lookup of subclasses by prefix attributes.
'''
def __new__(mcs, name, parents, attrs):
new_type = type.__new__(mcs, name, parents, attrs)
prefix = attrs.get("prefix_name")
if prefix:
new_type.prefix_map[prefix.lower()] = new_type
short = attrs.get("short_prefix")
if short:
new_type.prefix_map[short.lower()] = new_type
return new_type
def find_by_tag(self, tag_name):
if tag_name is None:
raise ValueError("tag_name cannot be None!")
tag_name = tag_name.lower()
return self.prefix_map[tag_name]
class TagTypeEnum(Enum):
unimod = 0
psimod = 1
massmod = 2
generic = 3
info = 4
gnome = 5
xlmod = 6
formula = 7
glycan = 8
localization_marker = 9
position_label = 10
position_modifier = 11
comup = 12
comkp = 13
limit = 14
custom = 15
group_placeholder = 999
class ModificationTagStyle(Enum):
Unset = 0
ShortId = 1
LongId = 2
ShortName = 3
LongName = 4
class ModificationSourceType(Enum):
"""
Whether a tag was generated from explicit user input (``Explicit``), a constant
modification rule (``Constant``), or from a variable expansion (``Generated``).
Used to track sources in :class:`ProteoformCombinator` machinery.
"""
Explicit = 0
Constant = 1
Generated = 2
_sentinel = object()
class ModificationMassNotFoundError(ProFormaError):
pass
class CompositionNotFoundError(ProFormaError):
pass
class MissingChargeStateError(ProFormaError):
pass
class UnknownMonosaccharideError(ProFormaError):
pass
@add_metaclass(PrefixSavingMeta)
class TagBase(object):
'''A base class for all tag types.
Attributes
----------
type: Enum
An element of :class:`TagTypeEnum` saying what kind of tag this is.
value: object
The data stored in this tag, usually an externally controlled name
extra: list
Any extra tags that were nested within this tag. Usually limited to INFO
tags but may be other synonymous controlled vocabulary terms.
group_id: str or None
A short label denoting which group, if any, this tag belongs to
'''
__slots__ = ("type", "value", "extra", "group_id", )
type: TagTypeEnum
value: Any
extra: List["TagBase"]
group_id: Optional[str]
prefix_name: ClassVar[Optional[str]] = None
short_prefix: ClassVar[Optional[str]] = None
prefix_map: ClassVar[Dict[str, Type['TagBase']]] = {}
def __init__(self, type, value, extra=None, group_id=None):
self.type = type
self.value = value
self.extra = extra
self.group_id = group_id
def copy(self):
return self.__class__(self.value, [e.copy() for e in (self.extra or [])], self.group_id)
def __str__(self):
part = self._format_main()
had_marker = False
if self.extra:
rest = []
for e in self.extra:
rest.append(str(e))
had_marker |= isinstance(e, GroupLabelBase) and e.group_id == self.group_id
label = '|'.join([part] + rest)
else:
label = part
if self.group_id and not had_marker:
label = '%s%s' % (label, self.group_id)
return '%s' % label
def __repr__(self):
template = "{self.__class__.__name__}({self.value!r}, {self.extra!r}, {self.group_id!r})"
return template.format(self=self)
def __eq__(self, other):
if other is None:
return False
if isinstance(other, str):
return str(self) == other
return (self.type == other.type) and (self.value == other.value) and (self.extra == other.extra) \
and (self.group_id == other.group_id)
def __ne__(self, other):
return not self == other
def is_modification(self) -> bool:
return self.type in (
TagTypeEnum.formula,
TagTypeEnum.generic,
TagTypeEnum.glycan,
TagTypeEnum.gnome,
TagTypeEnum.unimod,
TagTypeEnum.massmod,
TagTypeEnum.psimod,
TagTypeEnum.custom,
)
def find_modification(self) -> Optional["TagBase"]:
if self.is_modification():
return self
for tag in self.extra:
if tag.is_modification:
return tag
return None
def find_tag_type(self, tag_type: TagTypeEnum) -> List['TagBase']:
'''Search this tag or tag collection for elements with a particular
tag type and return them.
Parameters
----------
tag_type : TagTypeEnum
A label from :class:`TagTypeEnum`, or an equivalent type.
Returns
-------
matches : list
The list of all tags in this object which match the requested tag type.
'''
out = []
if self.type == tag_type:
out.append(self)
if not self.extra:
return out
for e in self.extra:
if e.type == tag_type:
out.append(e)
return out
@classmethod
def parse(cls, buffer) -> 'TagBase':
return process_tag_tokens(buffer)
def has_mass(self) -> bool:
"""
Check if this tag carries a mass value.
Returns
-------
bool
"""
return False
def has_composition(self) -> bool:
return False
class GroupLabelBase(TagBase):
__slots__ = ()
def __str__(self):
part = self._format_main()
if self.extra:
rest = [str(e) for e in self.extra]
label = '|'.join([part] + rest)
else:
label = part
return '%s' % label
class PositionLabelTag(GroupLabelBase):
'''A tag to mark that a position is involved in a group in some way, but does
not imply any specific semantics.
'''
__slots__ = ()
def __init__(self, value=None, extra=None, group_id=None):
assert group_id is not None
value = group_id
super(PositionLabelTag, self).__init__(
TagTypeEnum.position_label, value, extra, group_id)
def _format_main(self):
return "{self.group_id}".format(self=self)
class LocalizationMarker(GroupLabelBase):
'''A tag to mark a particular localization site
'''
__slots__ = ()
def __init__(self, value, extra=None, group_id=None):
assert group_id is not None
super(LocalizationMarker, self).__init__(
TagTypeEnum.localization_marker, float(value), extra, group_id)
def _format_main(self):
return "{self.group_id}({self.value:.4g})".format(self=self)
class InformationTag(TagBase):
'''A tag carrying free text describing the location
'''
__slots__ = ()
prefix_name = "INFO"
def __init__(self, value, extra=None, group_id=None):
super(InformationTag, self).__init__(
TagTypeEnum.info, str(value), extra, group_id)
def _format_main(self):
return f"INFO:{self.value}"
class PositionModifierTag(TagBase):
__slots__ = ()
prefix_name = "Position"
def __init__(self, value, extra=None, group_id=None):
super().__init__(TagTypeEnum.position_modifier, value, extra, group_id)
def _format_main(self):
return f"{self.prefix_name}:{self.value}"
class LimitModifierTag(TagBase):
__slots__ = ()
prefix_name = "Limit"
def __init__(self, value, extra=None, group_id=None):
if not isinstance(value, (int, float)):
try:
value = int(value)
except (ValueError, TypeError):
pass
super().__init__(TagTypeEnum.limit, value, extra, group_id)
def _format_main(self):
return f"{self.prefix_name}:{self.value}"
class ColocaliseModificationsOfKnownPostionTag(TagBase):
__slots__ = ()
prefix_name = "ColocaliseModificationsOfKnownPosition"
short_prefix = "CoMKP"
def __init__(self, extra=None, group_id=None):
super().__init__(TagTypeEnum.comkp, None, extra, group_id)
def copy(self):
return self.__class__([e.copy() for e in (self.extra or [])], self.group_id)
def _format_main(self):
return self.short_prefix
class ColocaliseModificationsOfUnknownPostionTag(TagBase):
__slots__ = ()
prefix_name = "ColocaliseModificationsOfUnknownPosition"
short_prefix = "CoMUP"
def __init__(self, extra=None, group_id=None):
super().__init__(TagTypeEnum.comup, None, extra, group_id)
def copy(self):
return self.__class__([e.copy() for e in self.extra or []], self.group_id)
def _format_main(self):
return self.short_prefix
class ModificationResolver(object):
name: str
symbol: str
_database: Optional[Any]
_cache: Optional[Dict[Tuple[Optional[str], Optional[int], frozenset], Any]]
def __init__(self, name, **kwargs):
self.name = name.lower()
self.symbol = self.name[0]
self._database = None
self._cache = {}
def clear_cache(self):
"""Clear the modification definition cache"""
self._cache.clear()
def enable_caching(self, flag: bool=True):
"""
Enable or disable caching of modification definitions.
If `flag` is :const:`False`, this will also dispose of any
existing cached values.
Parameters
----------
flag : :class:`bool`
Whether or not to disable the cache
"""
if flag:
if not self._cache:
self._cache = {}
else:
self._cache = None
def load_database(self):
raise NotImplementedError()
@property
def database(self):
if not self._database:
self._database = self.load_database()
return self._database
@database.setter
def database(self, database):
self._database = database
def parse_identifier(self, identifier: str) -> Tuple[Optional[str], Optional[int]]:
"""Parse a string that is either a CV prefixed identifier or name.
Parameters
----------
identifier : str
The identifier string to parse, removing CV prefix as needed.
Returns
-------
name : str, optional
A textual identifier embedded in the qualified identifier, if any, otherwise
:const:`None`.
id : int, optional
An integer ID embedded in the qualified identifier, if any, otherwise
:const:`None`.
"""
tokens = identifier.split(":", 1)
if len(tokens) > 1:
prefix = tokens[0].lower()
if prefix == self.name or prefix == self.symbol:
identifier = tokens[1]
if identifier.isdigit():
id = int(identifier)
name = None
else:
name = identifier
id = None
return name, id
def _resolve_impl(self, name: str=None, id: int=None, **kwargs) -> Dict[str, Any]:
raise NotImplementedError()
def resolve(self, name: str=None, id: int=None, **kwargs):
if self._cache is None:
return self._resolve_impl(name, id, **kwargs)
cache_key = (name, id, frozenset(kwargs.items()))
if cache_key in self._cache:
return self._cache[cache_key].copy()
try:
value = self._resolve_impl(name, id, **kwargs)
except KeyError:
if name.startswith(("+", "-")):
value = {
"composition": None,
"mass": float(name),
"name": name,
"id": None,
"provider": self.name,
"source": self,
}
else:
raise
self._cache[cache_key] = value
return value.copy()
def __call__(self, name=None, id=None, **kwargs):
return self.resolve(name, id, **kwargs)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.name)
class UnimodResolver(ModificationResolver):
def __init__(self, **kwargs):
super(UnimodResolver, self).__init__("unimod", **kwargs)
self._database = kwargs.get("database")
self.strict = kwargs.get("strict", True)
def load_database(self):
if _has_psims:
return obo_cache.resolve("http://www.unimod.org/obo/unimod.obo")
return Unimod()
def _resolve_impl(self, name=None, id=None, **kwargs):
strict = kwargs.get("strict", self.strict)
exhaustive = kwargs.get("exhaustive", True)
if name is not None:
defn = self.database.by_title(name, strict=strict)
if not defn:
defn = self.database.by_name(name, strict=strict)
if not defn and exhaustive and strict:
defn = self.database.by_title(name, strict=False)
if not defn:
defn = self.database.by_name(name, strict=False)
if defn and isinstance(defn, list):
warnings.warn(
"Multiple matches found for {!r} in Unimod, taking the first, {}.".format(
name, defn[0]['record_id']))
defn = defn[0]
if not defn:
raise KeyError(name)
elif id is not None:
defn = self.database[id]
if not defn:
raise KeyError(id)
else:
raise ValueError("Must provide one of `name` or `id`")
if isinstance(defn, dict):
return {
'composition': defn['composition'],
'name': defn['title'],
'id': defn['record_id'],
'mass': defn['mono_mass'],
'provider': self.name,
"source": self
}
else:
name = defn.ex_code_name
if not name:
name = defn.code_name
return {
"composition": defn.composition,
"name": name,
"id": defn.id,
"mass": defn.monoisotopic_mass,
"provider": self.name,
"source": self
}
class PSIModResolver(ModificationResolver):
def __init__(self, **kwargs):
super(PSIModResolver, self).__init__('psimod', **kwargs)
self._database = kwargs.get("database")
def load_database(self):
return load_psimod()
def _resolve_impl(self, name=None, id=None, **kwargs):
if name is not None:
defn = self.database[name]
elif id is not None:
defn = self.database['MOD:{:05d}'.format(id)]
else:
raise ValueError("Must provide one of `name` or `id`")
# Non-standard amino acids are listed with `DiffMono` = `none`
# but have a valid `MassMono` definition. Normally, `MassMono` is
# the full mass of the residue plus the modification so it'd double count the
# amino acid to use that value. Non-standard amino acids are a special case
# because they *should* only be used with the amino acid X
mass = None
for key in ["DiffMono", "MassMono"]:
if key in defn:
try:
mass = float(defn[key])
break
except (KeyError, TypeError, ValueError):
continue
else:
raise ModificationMassNotFoundError(
"Could not resolve the mass of %r from %r" % ((name, id), defn)
)
# As with `DiffMono` for non-standard amino acids, but for chemical formulas -> Compositions
for key in ["DiffFormula", "Formula"]:
if key in defn and defn[key] is not None:
composition = Composition()
diff_formula_tokens = defn[key].strip().split(" ")
for i in range(0, len(diff_formula_tokens), 2):
element = diff_formula_tokens[i]
count = diff_formula_tokens[i + 1]
if count:
count = int(count)
if element.startswith("("):
j = element.index(")")
isotope = element[1:j]
element = "%s[%s]" % (element[j + 1:], isotope)
composition[element] += count
break
else:
composition = None
warnings.warn("No formula was found for %r in PSI-MOD, composition will be missing" % ((name, id), ))
return {
'mass': mass,
'composition': composition,
'name': defn.name,
'id': defn.id,
'provider': self.name,
"source": self
}
class XLMODResolver(ModificationResolver):
def __init__(self, **kwargs):
super(XLMODResolver, self).__init__('xlmod', **kwargs)
self._database = kwargs.get("database")
def load_database(self):
return load_xlmod()
def _resolve_impl(self, name=None, id=None, **kwargs):
if name is not None:
defn = self.database[name]
elif id is not None:
defn = self.database['XLMOD:{:05d}'.format(id)]
else:
raise ValueError("Must provide one of `name` or `id`")
try:
mass = float(defn['monoIsotopicMass'])
except (KeyError, TypeError, ValueError):
raise ModificationMassNotFoundError("Could not resolve the mass of %r from %r" % ((name, id), defn))
if 'deadEndFormula' in defn:
composition = Composition(defn['deadEndFormula'].replace(" ", '').replace("D", "H[2]"))
elif 'bridgeFormula' in defn:
composition = Composition(
defn['bridgeFormula'].replace(" ", '').replace("D", "H[2]"))
return {
'mass': mass,
'composition': composition,
'name': defn.name,
'id': defn.id,
'provider': self.name,
"source": self
}
# TODO: Implement resolve walking up the graph to get the mass. Can't really
# get any more information without glypy/glyspace interaction
class GNOResolver(ModificationResolver):
mass_pattern = re.compile(r"(\d+(:?\.\d+)) Da")
def __init__(self, **kwargs):
super(GNOResolver, self).__init__('gnome', **kwargs)
self._database = kwargs.get("database")
def load_database(self):
return load_gno()
def get_mass_from_glycan_composition(self, term):
'''Parse the Byonic-style glycan composition from property GNO:00000202
to get the counts of each monosaccharide and use that to calculate mass.
The mass computed here is exact and dehydrated, distinct from the rounded-off
mass that :meth:`get_mass_from_term` will produce by walking up the CV term
hierarchy. However, not all glycan compositions are representable in GNO:00000202
format, so this may silently be absent or incomplete, hence the double-check in
:meth:`get_mass_from_term`.
Parameters
----------
term : psims.controlled_vocabulary.Entity
The CV entity being parsed.
Returns
-------
mass : float or :const:`None`
If a glycan composition is found on the term, the computed
mass will be returned. Otherwise the :const:`None` is returned
'''
val = term.get('GNO:00000202')
monosaccharides = BasicComposition()
composition = Composition()
if val:
tokens = re.findall(r"([A-Za-z0-9]+)\((\d+)\)", val)
mass = 0.0
for symbol, count in tokens:
count = int(count)
try:
mono_mass, mono_comp, symbol = GlycanModification.valid_monosaccharides[symbol]
mass += mono_mass * count
composition += mono_comp * count
monosaccharides[symbol] += count
except KeyError:
continue
return mass, monosaccharides, composition
return None, None, None
def get_mass_from_term(self, term, raw_mass):
'''Walk up the term hierarchy and find the mass group
term near the root of the tree, and return the most accurate
mass available for the provided term.
The mass group term's mass is rounded to two decimal places, leading
to relatively large errors.
Parameters
----------
term : psims.controlled_vocabulary.Entity
The CV entity being parsed.
Returns
-------
mass : float or :const:`None`
If a root node is found along the term's lineage, computed
mass will be returned. Otherwise the :const:`None` is returned.
The mass may be
'''
root_id = 'GNO:00000001'
parent = term.parent()
if isinstance(parent, list):
parent = parent[0]
while parent.id != root_id:
next_parent = parent.parent()
if isinstance(next_parent, list):
next_parent = next_parent[0]
if next_parent.id == root_id:
break
parent = next_parent
match = self.mass_pattern.search(parent.name)
if not match:
return None
# This will have a small mass error.
rough_mass = float(match.group(1)) - _WATER_MASS
if raw_mass is not None and abs(rough_mass - raw_mass) < 1:
return raw_mass
warnings.warn(
("An accurate glycan composition could not be inferred from %s. "
"Only a rough approximation is available.") % (term, ))
return rough_mass
def _resolve_impl(self, name=None, id=None, **kwargs):
if name is not None:
term = self.database[name]
elif id is not None:
term = self.database[id]
else:
raise ValueError("Must provide one of `name` or `id`")
raw_mass, monosaccharides, composition = self.get_mass_from_glycan_composition(term)
rec = {
"name": term.name,
"id": term.id,
"provider": self.name,
"composition": composition,
"monosaccharides": monosaccharides,
"mass": self.get_mass_from_term(term, raw_mass),
"source": self
}
return rec
class GenericResolver(ModificationResolver):
def __init__(self, resolvers, **kwargs):
super(GenericResolver, self).__init__('generic', **kwargs)
self.resolvers = list(resolvers)
def load_database(self):
return None
def parse_identifier(self, identifier):
"""Parse a string that is either a CV prefixed identifier or name.
Does no parsing as a :class:`GenericModification` is never qualified.
Parameters
----------
identifier : str
The identifier string to parse, removing CV prefix as needed.
Returns
-------
name : str, optional
A textual identifier embedded in the qualified identifier, if any, otherwise
:const:`None`.
id : int, optional
An integer ID embedded in the qualified identifier, if any, otherwise
:const:`None`.
"""
return identifier, None
def _resolve_impl(self, name=None, id=None, **kwargs):
defn = None
for resolver in self.resolvers:
try:
defn = resolver(name=name, id=id, **kwargs)
break
except KeyError:
continue
except ModificationMassNotFoundError:
warnings.warn("Could not resolve the mass for %r in %r" % ((name, id), resolver))
continue
if defn is None:
if name is None:
raise KeyError(id)
elif id is None:
raise KeyError(name)
else:
raise ValueError("Must provide one of `name` or `id`")
return defn
class CustomResolver(ModificationResolver):
store: Dict[str, Dict[str, Any]]
def __init__(self, store: Dict[str, Dict[str, Any]]=None, **kwargs):
if store is None:
store = {}
super().__init__("custom", **kwargs)
self.store = store
def _resolve_impl(self, name = None, id = None, **kwargs):
if name is not None:
return self.store[name]
elif id is not None:
return self.store[id]
else:
raise ValueError("Must provide one of `name` or `id`")
def register(self, name, state: Dict[str, Any], **kwargs):
state = state.copy()
state.update(kwargs)
state['id'] = name
no_mass = "mass" not in state
no_comp = "composition" not in state
if no_mass and no_comp:
raise ValueError("A custom modification definition *must* include at least one of `mass` or `composition`")
self.store[name] = state
class ModificationBase(TagBase):
'''A base class for all modification tags with marked prefixes.
While :class:`ModificationBase` is hashable, its equality testing
brings in additional tag-related information. For pure modification
identity comparison, use :attr:`key` to get a :class:`ModificationToken`
free of these concerns..
'''
_tag_type = None
__slots__ = ('_definition', 'style', '_generated')
_generated: ModificationSourceType
def __init__(self, value, extra=None, group_id=None, style=None):
if style is None:
style = ModificationTagStyle.Unset
super(ModificationBase, self).__init__(
self._tag_type, value, extra, group_id)
self._definition = None
self._generated = ModificationSourceType.Explicit
self.style = style
def copy(self):
return self.__class__(self.value, [e.copy() for e in self.extra], self.group_id, self.style)
def __reduce__(self):
return self.__class__, (self.value, self.extra, self.group_id, self.style), self.__getstate__()
def __getstate__(self):
if self._definition is None:
return None
state = self._definition.copy()
state['source'] = None
return state
def __setstate__(self, state):
self._definition = state
def __eq__(self, other):
if isinstance(other, ModificationToken):
return other == self
return super(ModificationBase, self).__eq__(other)
def __hash__(self):
return hash((self.id, self.provider))
@property
def key(self) -> 'ModificationToken':
'''Get a safe-to-hash-and-compare :class:`ModificationToken`
representing this modification without tag-like properties.
Returns
--------
ModificationToken
'''
return ModificationToken(self.value, self.id, self.provider, self.__class__)
@property
def definition(self) -> Dict[str, Any]:
'''A :class:`dict` of properties describing this modification, given
by the providing controlled vocabulary. This value is cached, and
should not be modified.
Returns
-------
dict
'''
if self._definition is None:
self._definition = self.resolve()
return self._definition
@property
def mass(self):
'''The monoisotopic mass shift this modification applies
Returns
-------
float
'''
return self.definition['mass']
def has_mass(self):
"""
Check if this tag carries a mass value.
Returns
-------
bool
"""
return True
def has_composition(self):
return True
@property
def composition(self) -> Optional[Composition]:
'''The chemical composition shift this modification applies'''
return self.definition.get('composition')
@property
def charge(self) -> Optional[int]:
return self.definition.get('charge')
@property
def id(self) -> Optional[int]:
'''The unique identifier given to this modification by its provider
Returns
-------
str or int
'''
return self.definition.get('id')
@property
def name(self):
'''The primary name of this modification from its provider.
Returns
-------
str
'''
return self.definition.get('name')
@property
def provider(self):
'''The name of the controlled vocabulary that provided this
modification.
Returns
-------
str
'''
return self.definition.get('provider')
def _populate_from_definition(self, definition):
self._definition = definition
def _format_main(self) -> str:
if self.style == ModificationTagStyle.Unset or self.style is None:
return "{self.prefix_name}:{self.value}".format(self=self)
elif self.style == ModificationTagStyle.LongId:
return "{self.prefix_name}:{self.id}".format(self=self)
elif self.style == ModificationTagStyle.ShortId:
return "{self.short_prefix}:{self.id}".format(self=self)
elif self.style == ModificationTagStyle.LongName:
return "{self.prefix_name}:{self.name}".format(self=self)
elif self.style == ModificationTagStyle.ShortName:
return "{self.short_prefix}:{self.name}".format(self=self)
else:
warnings.warn("Unknown formatting style {!r}".format(self.style))
return "{self.prefix_name}:{self.value}".format(self=self)
def resolve(self):
'''Find the term and return it's properties
'''
keys = self.resolver.parse_identifier(self.value)
return self.resolver(*keys)
class MassModification(TagBase):
'''A modification defined purely by a signed mass shift in Daltons.
The value of a :class:`MassModification` is always a :class:`float`
'''
__slots__ = ('_significant_figures', '_generated')
prefix_name = "Obs"
_generated: ModificationSourceType
def __init__(self, value, extra=None, group_id=None):
if isinstance(value, str):
sigfigs = len(value.split('.')[-1].rstrip('0'))
else:
sigfigs = 4
self._significant_figures = sigfigs
self._generated = ModificationSourceType.Explicit
super(MassModification, self).__init__(
TagTypeEnum.massmod, float(value), extra, group_id)
def copy(self):
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/mzxml.py | pyteomics/mzxml.py | """
mzxml - reader for mass spectrometry data in mzXML format
=========================================================
Summary
-------
**mzXML** is a (formerly) standard XML-format for raw mass spectrometry data storage,
intended to be replaced with **mzML**.
This module provides a minimalistic way to extract information from mzXML
files. You can use the old functional interface (:py:func:`read`) or the new
object-oriented interface (:py:class:`MzXML`)
to iterate over entries in ``<scan>`` elements.
:py:class:`MzXML` also supports direct indexing with scan IDs.
Data access
-----------
:py:class:`MzXML` - a class representing a single mzXML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through spectra in mzXML file. Data from a
single scan are converted to a human-readable dict. Spectra themselves are
stored under 'm/z array' and 'intensity array' keys.
:py:func:`chain` - read multiple mzXML files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
Deprecated functions
--------------------
:py:func:`version_info` - get version information about the mzXML file.
You can just read the corresponding attribute of the :py:class:`MzXML` object.
:py:func:`iterfind` - iterate over elements in an mzXML file.
You can just call the corresponding method of the :py:class:`MzXML` object.
Dependencies
------------
This module requires :py:mod:`lxml` and :py:mod:`numpy`.
-------------------------------------------------------------------------------
"""
# Copyright 2016 Joshua Klein, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import heapq
from . import xml, auxiliary as aux, _schema_defaults
import numpy as np
def _decode_peaks(info, peaks_data):
"""Decode the interleaved base 64 encoded, potentially
compressed, raw data points.
Parameters
----------
info : dict
The current context
peaks_data : str
The textually encoded peak data
Returns
-------
tuple of np.array
A pair of NumPy arrays containing
m/z and intensity values.
"""
compressed = (info.get('compressionType') == 'zlib')
dt = np.float32 if info['precision'] == '32' else np.float64
dtype = np.dtype([('m/z array', dt), ('intensity array', dt)]).newbyteorder('>')
data = aux._decode_base64_data_array(peaks_data, dtype, compressed)
return data
class IteratorQueue(object):
def __init__(self, iterator):
q = list()
heapq.heapify(q)
self.queue = q
self.iterator = iterator
self.last_index = -1
self.producer = self.consume(iterator)
def insert_item(self, scan):
heapq.heappush(self.queue, (int(scan['num']), scan))
def __iter__(self):
return self.producer
def consume(self, iterator):
for scan in iterator:
scan.pop("scan", None)
if scan['msLevel'] != 1:
self.insert_item(scan)
else:
self.insert_item(scan)
barrier = int(scan['num'])
while True:
idx, item = heapq.heappop(self.queue)
if idx >= barrier:
self.insert_item(item)
break
yield item
while self.queue:
idx, item = heapq.heappop(self.queue)
yield item
class MzXML(aux.BinaryArrayConversionMixin, aux.TimeOrderedIndexedReaderMixin, xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for mzXML files."""
_root_element = 'mzXML'
_default_iter_tag = 'scan'
_indexed_tags = {'scan'}
_indexed_tag_keys = {'scan': 'num'}
_default_version = None
_default_schema = _schema_defaults._mzxml_schema_defaults
_default_id_attr = 'num'
def __init__(self, *args, **kwargs):
self.decode_binary = kwargs.pop('decode_binary', True)
super(MzXML, self).__init__(*args, **kwargs)
def __getstate__(self):
state = super(MzXML, self).__getstate__()
state['decode_binary'] = self.decode_binary
return state
def __setstate__(self, state):
super(MzXML, self).__setstate__(state)
self.decode_binary = state['decode_binary']
def _get_info_smart(self, element, **kw):
name = xml._local_name(element)
kwargs = dict(kw)
rec = kwargs.pop('recursive', None)
if name in {'mzXML'}:
info = self._get_info(element,
recursive=(
rec if rec is not None else False),
**kwargs)
else:
info = self._get_info(element,
recursive=(rec if rec is not None else True),
**kwargs)
if 'num' in info and isinstance(info, dict):
info['id'] = info['num']
if 'peaks' in info and isinstance(info, dict):
self._decode_peaks(info)
return info
def _determine_compression(self, info):
if info.get('compressionType') == 'zlib':
return 'zlib compression'
return "no compression"
def _determine_dtype(self, info):
dt = np.float32 if info['precision'] == '32' else np.float64
endianess = ">" if info['byteOrder'] in ('network', "big") else "<"
dtype = np.dtype(
[('m/z array', dt), ('intensity array', dt)]).newbyteorder(endianess)
return dtype
def _finalize_record_conversion(self, array, record):
key = record.key
return self._convert_array(key, array[key])
def _decode_peaks(self, info):
# handle cases where peaks is the encoded binary data which must be
# unpacked
if not isinstance(info['peaks'], (dict, list)):
compression_type = self._determine_compression(info)
dtype = self._determine_dtype(info)
binary = info.pop('peaks')
if not self.decode_binary:
for k in self._array_keys:
record = self._make_record(binary, compression_type, dtype, k)
info[k] = record
else:
peak_data = self.decode_data_array(binary, compression_type, dtype)
for k in self._array_keys:
info[k] = self._convert_array(k, peak_data[k])
# otherwise we've already decoded the arrays and we're just passing
# them up the hierarchy
else:
if not self.decode_binary:
arrays = info.pop('peaks')[0]
for k in self._array_keys:
info[k] = arrays[k]
else:
peak_data = info.pop('peaks')[0]
for k in self._array_keys:
info[k] = self._convert_array(k, peak_data.get(k, np.array([])))
def iterfind(self, path, **kwargs):
if path == 'scan':
generator = super(MzXML, self).iterfind(path, **kwargs)
for item in IteratorQueue(generator):
yield item
else:
for item in super(MzXML, self).iterfind(path, **kwargs):
yield item
def _get_time(self, scan):
return scan['retentionTime']
def read(source, read_schema=False, iterative=True, use_index=False, dtype=None,
huge_tree=False, decode_binary=True):
"""Parse `source` and iterate through spectra.
Parameters
----------
source : str or file
A path to a target mzML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzML header. Otherwise, use default
parameters. Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
spectrum elements. Default is :py:const:`False`.
decode_binary : bool, optional
Defines whether binary data should be decoded and included in the output
(under "m/z array", "intensity array", etc.).
Default is :py:const:`True`.
huge_tree : bool, optional
This option is passed to the `lxml` parser and defines whether
security checks for XML tree depth and node size should be disabled.
Default is :py:const:`False`.
Enable this option for trusted files to avoid XMLSyntaxError exceptions
(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).
Returns
-------
out : iterator
An iterator over the dicts with spectrum properties.
"""
return MzXML(source, read_schema=read_schema, iterative=iterative,
use_index=use_index, dtype=dtype, huge_tree=huge_tree,
decode_binary=decode_binary)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified XPath.
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`MzXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
decode_binary : bool, optional
Defines whether binary data should be decoded and included in the output
(under "m/z array", "intensity array", etc.).
Default is :py:const:`True`.
Returns
-------
out : iterator
"""
return MzXML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(MzXML)
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(MzXML)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/ms2.py | pyteomics/ms2.py | """
ms2 - read and write MS/MS data in MS2 format
=============================================
Summary
-------
`MS2 <http://dx.doi.org/10.1002/rcm.1603>`_ is a simple
human-readable format for MS2 data. It allows storing MS2 peak lists and
exprimental parameters.
This module provides minimalistic infrastructure for access to data stored in
MS2 files.
Two main classes are :py:class:`MS2`, which provides an iterative, text-mode parser,
and :py:class:`IndexedMS2`, which is a binary-mode parser that supports random access using scan IDs
and retention times.
The function :py:func:`read` helps dispatch between the two classes.
Also, common parameters can be read from MS2 file header with
:py:func:`read_header` function.
Classes
-------
:py:class:`MS2` - a text-mode MS2 parser. Suitable to read spectra from a file consecutively.
Needs a file opened in text mode (or will open it if given a file name).
:py:class:`IndexedMS2` - a binary-mode MS2 parser. When created, builds a byte offset index
for fast random access by spectrum ID. Sequential iteration is also supported.
Needs a seekable file opened in binary mode (if created from existing file object).
:py:class:`MS2Base` - abstract class, the common ancestor of the two classes above.
Can be used for type checking.
Functions
---------
:py:func:`read` - an alias for :py:class:`MS2` or :py:class:`IndexedMS1`.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`read_header` - get a dict with common parameters for all spectra
from the beginning of MS2 file.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyteomics import auxiliary as aux
from pyteomics.ms1 import MS1, IndexedMS1, MS1Base
class MS2Base(aux.MaskedArrayConversionMixin, MS1Base):
"""Abstract class representing an MS2 file. Subclasses implement different approaches to parsing."""
_array_keys = ['m/z array', 'intensity array', 'charge array', 'resolution array']
_float_keys = ['RTime', 'RetTime', 'IonInjectionTime', 'PrecursorInt']
def __init__(self, source=None, use_header=False, convert_arrays=2, dtype=None, read_charges=True, read_resolutions=True, encoding=None, **kwargs):
"""
Create an instance of a :py:class:`MS2Base` parser.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS1 format. Default is
:py:const:`None`, which means read standard input.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`False`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
Charge is expected to be the **third** number on the line, after peak *m/z* and intensity.
read_resolutions : bool, optional
If `True` (default), fragment peak resolutions are reported. Disabling it improves performance.
Resolution is expected to be the **fourth** number on the line, after peak *m/z*, intensity, and charge.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array', 'resolution array'.
encoding : str, optional
File encoding.
"""
super(MS2Base, self).__init__(source=source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype,
encoding=encoding, **kwargs)
self._read_charges = read_charges
self._read_resolutions = read_resolutions
def _handle_peak(self, line, sline, info):
super(MS2Base, self)._handle_peak(line, sline, info)
if self._read_charges:
if len(sline) > 2:
sline = line.strip().split()
try:
info['charge array'].append(int(sline[2]))
except ValueError:
raise aux.PyteomicsError("Error parsing fragment charge on line: " + line)
else:
info['charge array'].append(0)
if self._read_resolutions:
if len(sline) > 2:
sline = line.strip().split()
try:
info['resolution array'].append(int(sline[3]))
except ValueError:
raise aux.PyteomicsError("Error parsing fragment peak resolution on line: " + line)
else:
info['resolution array'].append(0)
def _make_scan(self, info):
if not self._read_charges:
del info['charge array']
if not self._read_resolutions:
del info['resolution array']
return super(MS2Base, self)._make_scan(info)
def __reduce_ex__(self, protocol):
return (self.__class__,
(self._source_init, False, self._convert_arrays, None, self._read_charges, self._read_resolutions, self.encoding),
self.__getstate__())
class MS2(MS2Base, MS1):
"""
A class representing an MS2 file. Supports the `with` syntax and direct iteration for sequential
parsing.
:py:class:`MS2` object behaves as an iterator, **yielding** spectra one by one.
Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',
'intensity array', and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
and 'params' stores a :py:class:`dict` of parameters.
Attributes
----------
header : dict
The file header.
"""
def __init__(self, *args, **kwargs):
"""
Create an :py:class:`MS2` (text-mode) reader for a given MS2 file.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS2 format. Default is
:py:const:`None`, which means read standard input.
.. note :: If a file object is given, it must be opened in text mode.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`False`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
Charge is expected to be the **third** number on the line, after peak *m/z* and intensity.
read_resolutions : bool, optional
If `True` (default), fragment peak resolutions are reported. Disabling it improves performance.
Resolution is expected to be the **fourth** number on the line, after peak *m/z*, intensity, and charge.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array'.
encoding : str, optional
File encoding.
Returns
-------
out : MS2
The reader object.
"""
super(MS2, self).__init__(*args, **kwargs)
class IndexedMS2(IndexedMS1, MS2Base):
"""
A class representing an MS2 file. Supports the `with` syntax and direct iteration for sequential
parsing. Specific spectra can be accessed by title using the indexing syntax in constant time.
If created using a file object, it needs to be opened in binary mode.
When iterated, :py:class:`IndexedMS2` object yields spectra one by one.
Each 'spectrum' is a :py:class:`dict` with four keys: 'm/z array',
'intensity array', 'charge array' and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,
and 'params' stores a :py:class:`dict` of parameters (keys and values are
:py:class:`str`, keys corresponding to MS2).
.. warning ::
Labels for scan objects are constructed as the first number in the S line, as follows:
for a line ``S 0 1 123.4`` the label is `'0'`. If these labels are not unique
for the scans in the file, the indexed parser will not work correctly. Consider using
:py:class:`MS2` instead.
Attributes
----------
header : dict
The file header.
time : RTLocator
A property used for accessing spectra by retention time.
"""
def __init__(self, source=None, use_header=False, convert_arrays=2, dtype=None, read_charges=True, read_resolutions=True,
encoding='utf-8', _skip_index=False, **kwargs):
"""
Create an :py:class:`IndexedMS2` (binary-mode) reader for a given MS2 file.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS2 format. Default is
:py:const:`None`, which means read standard input.
.. note :: If a file object is given, it must be opened in binary mode.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`True`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
Charge is expected to be the **third** number on the line, after peak *m/z* and intensity.
read_resolutions : bool, optional
If `True` (default), fragment peak resolutions are reported. Disabling it improves performance.
Resolution is expected to be the **fourth** number on the line, after peak *m/z*, intensity, and charge.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array'.
encoding : str, optional
File encoding.
block_size : int, optinal
Size of the chunk (in bytes) used to parse the file when creating the byte offset index.
Returns
-------
out : IndexedMS2
The reader object.
"""
super(IndexedMS2, self).__init__(source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype,
read_charges=read_charges, read_resolutions=read_resolutions, encoding=encoding, _skip_index=_skip_index, **kwargs)
def __reduce_ex__(self, protocol):
return (self.__class__,
(self._source_init, False, self._convert_arrays, None, self._read_charges, self._read_resolutions, self.encoding, True),
self.__getstate__())
def read_header(source, *args, **kwargs):
"""
Read the specified MS2 file, get the parameters specified in the header
as a :py:class:`dict`.
Parameters
----------
source : str or file
File name or file object representing an file in MS2 format.
Returns
-------
header : dict
"""
kwargs['use_header'] = True
return read(source, *args, **kwargs).header
def read(*args, **kwargs):
"""Read an MS2 file and return entries iteratively.
Read the specified MS2 file, **yield** spectra one by one.
Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',
'intensity array', and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
and 'params' stores a :py:class:`dict` of parameters.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS2 format. Default is
:py:const:`None`, which means read standard input.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`False`.
convert_arrays : bool, optional
If :py:const:`False`, m/z and intensities will be returned as regular lists.
If :py:const:`True` (default), they will be converted to regular :py:class:`numpy.ndarray`'s.
Conversion requires :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
Charge is expected to be the **third** number on the line, after peak *m/z* and intensity.
read_resolutions : bool, optional
If `True` (default), fragment peak resolutions are reported. Disabling it improves performance.
Resolution is expected to be the **fourth** number on the line, after peak *m/z*, intensity, and charge.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array' and/or 'intensity array'.
encoding : str, optional
File encoding.
use_index : bool, optional
Determines which parsing method to use. If :py:const:`True`, an instance of
:py:class:`IndexedMS2` is created. This facilitates random access by scan titles.
If an open file is passed as `source`, it needs to be open in binary mode.
.. warning ::
Labels for scan objects are constructed as the first number in the S line, as follows:
for a line ``S 0 1 123.4`` the label is `'0'`. If these labels are not unique
for the scans in the file, the indexed parser will not work correctly.
If :py:const:`False` (default), an instance of :py:class:`MS2` is created. It reads
`source` in text mode and is suitable for iterative parsing.
block_size : int, optinal
Size of the chunk (in bytes) used to parse the file when creating the byte offset index.
(Accepted only for :py:class:`IndexedMS2`.)
Returns
-------
out :
An instance of :py:class:`MS2` or :py:class:`IndexedMS2`, depending on `use_index` and `source`.
"""
if args:
source = args[0]
else:
source = kwargs.get('source')
use_index = kwargs.pop('use_index', None)
use_index = aux._check_use_index(source, use_index, False)
tp = IndexedMS2 if use_index else MS2
return tp(*args, **kwargs)
chain = aux._make_chain(read, 'read')
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/version.py | pyteomics/version.py | """
version - Pyteomics version information
=======================================
This module is provided for convenience and captures information about the current version number of Pyteomics.
Classes
-------
:py:class:`VersionInfo` - a namedtuple for version numbers that supports comparisons and can be initialized
from a version string.
Constants
---------
:py:const:`version` - a string with the current version.
:py:const:`version_info` - a tuple with structured information about the current version.
"""
__version__ = '5.0a10'
from collections import namedtuple
import re
class VersionInfo(namedtuple('VersionInfo', ('major', 'minor', 'micro', 'releaselevel', 'serial'))):
"""Tuple mimicking :py:const:`sys.version_info`"""
def __new__(cls, version_str):
if isinstance(version_str, str):
groups = re.match(r'(\d+)\.(\d+)(?:\.)?(\d+)?([a-zA-Z]+)?(\d+)?', version_str).groups()
inst = super(VersionInfo, cls).__new__(cls, *groups)
else:
inst = super(VersionInfo, cls).__new__(cls, *(str(x) if x is not None else x for x in version_str))
inst._version_str = version_str
inst._version_ints = tuple(int(x) if isinstance(x, str) and x.isdigit() else 0 for x in inst)
return inst
def __str__(self):
return 'Version {}'.format(self._version_str)
def __lt__(self, other):
if not isinstance(other, VersionInfo):
other = VersionInfo(other)
return self._version_ints < other._version_ints
def __gt__(self, other):
if not isinstance(other, VersionInfo):
other = VersionInfo(other)
return self._version_ints > other._version_ints
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or self > other
def __eq__(self, other):
if not isinstance(other, VersionInfo):
other = VersionInfo(other)
return super(VersionInfo, self).__eq__(other)
version_info = VersionInfo(__version__)
version = __version__
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/xml.py | pyteomics/xml.py | """
xml - utilities for XML parsing
===============================
This module is not intended for end users. It implements the abstract classes
for all XML parsers, :py:class:`XML` and :py:class:`IndexedXML`, and some utility functions.
Dependencies
------------
This module requres :py:mod:`lxml` and :py:mod:`numpy`.
--------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import socket
from traceback import format_exc
import warnings
from collections import OrderedDict, namedtuple
from itertools import islice
from lxml import etree
import numpy as np
from urllib.request import urlopen, URLError
from .auxiliary import FileReader, PyteomicsError, _file_obj, HierarchicalOffsetIndex
from .auxiliary import unitint, unitfloat, unitstr, cvstr
from .auxiliary import _keepstate_method as _keepstate
from .auxiliary import TaskMappingMixin, IndexedReaderMixin, IndexSavingMixin
from .auxiliary.psims_util import load_psims, HasValueTypeRelationship, _has_psims
def _local_name(element):
"""Strip namespace from the XML element's name"""
tag = element.tag
if tag and tag[0] == '{':
return tag.rpartition('}')[2]
return tag
def xsd_parser(schema_url):
"""Parse an XSD file from the specified URL into a schema dictionary
that can be used by :class:`XML` parsers to automatically cast data to
the appropriate type.
Parameters
----------
schema_url : str
The URL to retrieve the schema from
Returns
-------
dict
"""
ret = {}
if not (schema_url.startswith('http://') or
schema_url.startswith('https://') or
schema_url.startswith('file://')):
schema_url = 'file://' + schema_url
schema_file = urlopen(schema_url)
p = etree.XMLParser(remove_comments=True)
schema_tree = etree.parse(schema_file, parser=p)
types = {'ints': {'int', 'long', 'nonNegativeInteger', 'positiveInt',
'integer', 'unsignedInt'},
'floats': {'float', 'double'},
'bools': {'boolean'},
'intlists': {'listOfIntegers'},
'floatlists': {'listOfFloats'},
'charlists': {'listOfChars', 'listOfCharsOrAny'}}
for k, val in types.items():
tuples = set()
for elem in schema_tree.iter():
if _local_name(elem) == 'attribute' and elem.attrib.get(
'type', '').split(':')[-1] in val:
anc = elem.getparent()
anc_name = _local_name(anc)
while not (
(anc_name == 'complexType' and 'name' in anc.attrib) or anc_name == 'element'):
anc = anc.getparent()
anc_name = _local_name(anc)
if anc is None:
break
else:
if anc_name == 'complexType':
elnames = [x.attrib['name'] for x in
schema_tree.iter()
if x.attrib.get('type', '').split(':')[-1] == anc.attrib['name']]
else:
elnames = (anc.attrib['name'],)
for elname in elnames:
tuples.add(
(elname, elem.attrib['name']))
ret[k] = tuples
ret['lists'] = set(elem.attrib['name'] for elem in schema_tree.xpath(
'//*[local-name()="element"]') if 'name' in elem.attrib and
elem.attrib.get('maxOccurs', '1') != '1')
return ret
class XMLValueConverter(object):
# Adapted from http://stackoverflow.com/questions/2764269/parsing-an-xsduration-datatype-into-a-python-datetime-timedelta-object
_duration_parser = re.compile(
(r'(?P<sign>-?)P(?:(?P<years>\d+\.?\d*)Y)?(?:(?P<months>\d+\.?\d*)M)?(?:(?P<days>\d+\.?\d*)D)?(?:T(?:(?P<hours>\d+\.?\d*)H)?(?:(?P<minutes>\d+\.?\d*)M)?(?:(?P<seconds>\d+\.?\d*)S)?)?'))
@classmethod
def duration_str_to_float(cls, s):
# Not a duration, so pass along
if not s.startswith('P'):
try:
return unitfloat(s, 'duration')
except ValueError:
return unitstr(s, 'duration')
match = cls._duration_parser.search(s)
if match:
matchdict = match.groupdict()
hours = float(matchdict.get('hours', 0) or 0)
minutes = float(matchdict.get('minutes', 0) or 0)
seconds = float(matchdict.get('seconds', 0) or 0)
minutes += hours * 60.
minutes += (seconds / 60.)
return unitfloat(minutes, 'minute')
else:
return unitstr(s, 'duration')
@classmethod
def str_to_bool(cls, s):
if s.lower() in {'true', '1', 'y'}:
return True
if s.lower() in {'false', '0', 'n'}:
return False
raise PyteomicsError('Cannot convert string to bool: ' + s)
@classmethod
def str_to_num(cls, s, numtype):
return numtype(s) if s else None
@classmethod
def to(cls, t):
def convert_from(s):
return cls.str_to_num(s, t)
return convert_from
@classmethod
def converters(cls):
return {
'ints': cls.to(unitint), 'floats': cls.to(unitfloat), 'bools': cls.str_to_bool,
'intlists': lambda x: np.fromstring(x.replace('\n', ' '), dtype=int, sep=' '),
'floatlists': lambda x: np.fromstring(x.replace('\n', ' '), sep=' '),
'charlists': list,
'duration': cls.duration_str_to_float
}
class _XMLParam(namedtuple("XMLParam", ("name", "value", "type"))):
'''A holder for semantic parameters used in several common XML formats
Attributes
----------
name: :class:`~.cvstr`
The name of the attribute, carrying the accession and unit information
value: :class:`~.unitfloat`, :class:`~.unitint` or :class:`~.unitstr`
The value of the parameter
type: :class:`str`
The parameter's local XML tag name.
'''
__slots__ = ()
def is_empty(self):
value = self.value
return value == "" or value is None
class XML(FileReader):
"""Base class for all format-specific XML parsers. The instances can be used
as context managers and as iterators.
"""
# Configurable data
file_format = 'XML'
_root_element = None
_default_schema = {}
_read_schema = False
_default_version = 0
_default_iter_tag = None
_default_iter_path = None
_structures_to_flatten = []
_schema_location_param = 'schemaLocation'
_default_id_attr = 'id'
_huge_tree = False
_retrieve_refs_enabled = None # only some subclasses implement this
_iterative = True
# these se attributes deal with parsing of UserParams
_element_handlers = {}
_param_elements = {'userParam', 'UserParam'}
_param_subelements = _param_elements
_param_types = {'int': int, 'float': float, 'string': str}
_default_param_type = float
_fallback_param_type = str
# Configurable plugin logic
_converters = XMLValueConverter.converters()
# Must be implemented by subclasses
def _get_info_smart(self, element, **kwargs):
raise NotImplementedError
def __init__(self, source, read_schema=None, iterative=None, build_id_cache=False, **kwargs):
"""Create an XML parser object.
Parameters
----------
source : str or file
File name or file-like object corresponding to an XML file.
read_schema : bool, optional
Defines whether schema file referenced in the file header
should be used to extract information about value conversion.
Default is :py:const:`False`.
iterative : bool, optional
Defines whether an :py:class:`ElementTree` object should be
constructed and stored on the instance or if iterative parsing
should be used instead. Iterative parsing keeps the memory usage
low for large XML files. Default is :py:const:`True`.
build_id_cache : bool, optional
Defines whether a dictionary mapping IDs to XML tree elements
should be built and stored on the instance. It is used in
:py:meth:`XML.get_by_id`, e.g. when using
:py:class:`pyteomics.mzid.MzIdentML` with ``retrieve_refs=True``.
huge_tree : bool, optional
This option is passed to the `lxml` parser and defines whether
security checks for XML tree depth and node size should be disabled.
Default is :py:const:`False`.
Enable this option for trusted files to avoid XMLSyntaxError exceptions
(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).
"""
super(XML, self).__init__(
source, mode='rb', parser_func=self.iterfind, pass_file=False,
args=(self._default_iter_path or self._default_iter_tag,), kwargs=kwargs)
if iterative is None:
iterative = self._iterative
if iterative:
self._tree = None
else:
self.build_tree()
if build_id_cache:
self.build_id_cache()
else:
self._id_dict = None
self.version_info = self._get_version_info()
if read_schema is not None:
self._read_schema = read_schema
self.schema_info = self._get_schema_info(read_schema)
self._converters_items = self._converters.items()
self._huge_tree = kwargs.get('huge_tree', self._huge_tree)
self._retrieve_refs_enabled = kwargs.get('retrieve_refs')
def __reduce_ex__(self, protocol):
return self.__class__, (
self._source_init, self._read_schema, self._tree is None,
False,
), self.__getstate__()
def __getstate__(self):
state = super(XML, self).__getstate__()
state['_huge_tree'] = self._huge_tree
state['_retrieve_refs_enabled'] = self._retrieve_refs_enabled
state['_id_dict'] = self._id_dict
return state
def __setstate__(self, state):
super(XML, self).__setstate__(state)
self._huge_tree = state['_huge_tree']
self._retrieve_refs_enabled = state['_retrieve_refs_enabled']
self._id_dict = state['_id_dict']
@_keepstate
def _get_version_info(self):
"""
Provide version information about the XML file.
Returns
-------
out : tuple
A (version, schema URL) tuple, both elements are strings or None.
"""
for _, elem in etree.iterparse(
self._source, events=('start',), remove_comments=True, huge_tree=self._huge_tree):
if _local_name(elem) == self._root_element:
return (
elem.attrib.get('version'),
elem.attrib.get(('{{{}}}'.format(elem.nsmap['xsi']) if 'xsi' in elem.nsmap else '') + self._schema_location_param)
)
@_keepstate
def _get_schema_info(self, read_schema=True):
"""Stores defaults for the schema, tries to retrieve the schema for
other versions. Keys are: 'floats', 'ints', 'bools', 'lists',
'intlists', 'floatlists', 'charlists'."""
if not read_schema:
return self._default_schema
version, schema = self.version_info
if version == self._default_version:
return self._default_schema
ret = {}
try:
if not schema:
schema_url = ''
raise PyteomicsError(
'Schema information not found in {}.'.format(self.name))
schema_url = schema.split()[-1]
ret = xsd_parser(schema_url)
except Exception as e:
if isinstance(e, (URLError, socket.error, socket.timeout)):
warnings.warn(
"Can't get the {0.file_format} schema for version `{1}` from <{2}> at the moment.\n"
"Using defaults for {0._default_version}.\nYou can disable reading the schema by specifying "
"`read_schema=False`.".format(self, version, schema_url))
else:
warnings.warn(
"Unknown {0.file_format} version `{1}`.\n"
"Attempt to use schema information from <{2}> failed.\nException information:\n{3}\n"
"Falling back to defaults for {0._default_version}\n"
"NOTE: This is just a warning, probably from a badly-generated XML file.\nYou will still most probably get "
"decent results.\nLook here for suppressing warnings:\n"
"http://docs.python.org/library/warnings.html#temporarily-suppressing-warnings\n"
"You can also disable reading the schema by specifying `read_schema=False`.\n"
"If you think this shouldn't have happened, please report this to\n"
"http://github.com/levitsky/pyteomics/issues\n"
"".format(self, version, schema_url, format_exc()))
ret = self._default_schema
return ret
def _convert_types(self, name, info):
try:
for k, v in info.items():
for t, a in self._converters_items:
if t in self.schema_info and (name, k) in self.schema_info[t]:
info[k] = a(v)
except ValueError as e:
message = 'Error when converting types: {}'.format(e.args)
if not self._read_schema:
message += '\nTry reading the file with read_schema=True'
raise PyteomicsError(message)
def _flatten(self, info):
for k, v in dict(info).items():
if k in self._structures_to_flatten:
if isinstance(v, list):
for vi in v:
info.update(vi)
else:
info.update(v)
del info[k]
# another simplification
for k, v in dict(info).items():
if isinstance(v, dict) and 'name' in v and len(v) == 1:
info[k] = v['name']
if len(info) == 2 and 'name' in info and (
'value' in info or 'values' in info):
name = info.pop('name')
info = {name: info.popitem()[1]}
return info
def _process_text(self, element, name, info):
if element.text:
stext = element.text.strip()
if stext:
if info:
info[name] = stext
else:
return stext
def _postprocess(self, element, name, info, **kwargs):
text = self._process_text(element, name, info)
if text:
return text
self._convert_types(name, info)
if kwargs.get('retrieve_refs', self._retrieve_refs_enabled):
self._retrieve_refs(info, **kwargs)
info = self._flatten(info)
return info
@_keepstate
def build_tree(self):
"""Build and store the :py:class:`ElementTree` instance
for the underlying file"""
p = etree.XMLParser(remove_comments=True, huge_tree=True)
self._tree = etree.parse(self._source, parser=p)
def clear_tree(self):
"""Remove the saved :py:class:`ElementTree`."""
self._tree = None
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in _ref. Removes the id attribute from `info`.
This implementation is a stub and must be implemented for each specific
subclass. It is only called if :attr:`retrieve_refs` """
raise NotImplementedError(
("_retrieve_refs is not implemented for {}. "
"Do not use `retrieve_refs=True`.").format(
self.__class__.__name__))
def iterfind(self, path, **kwargs):
"""Parse the XML and yield info on elements with specified local
name or by specified "XPath".
Parameters
----------
path : str
Element name or XPath-like expression. The path is very close to
full XPath syntax, but local names should be used for all elements in the path.
They will be substituted with local-name() checks, up to the (first) predicate.
The path can be absolute or "free". Please don't specify namespaces.
**kwargs : passed to :py:meth:`self._get_info_smart`.
Returns
-------
out : iterator
"""
return Iterfind(self, path, **kwargs)
@_keepstate
def _iterfind_impl(self, path, **kwargs):
"""Parse the XML and yield info on elements with specified local
name or by specified "XPath".
Parameters
----------
path : str
Element name or XPath-like expression. The path is very close to
full XPath syntax, but local names should be used for all elements in the path.
They will be substituted with local-name() checks, up to the (first) predicate.
The path can be absolute or "free". Please don't specify namespaces.
**kwargs : passed to :py:meth:`self._get_info_smart`.
Returns
-------
out : iterator
"""
try:
path, tail = re.match(pattern_path, path).groups()
except AttributeError:
raise PyteomicsError('Invalid path: ' + path)
if path[:2] == '//' or path[0] != '/':
absolute = False
if path[:2] == '//':
path = path[2:]
if path[0] == '/' or '//' in path:
raise PyteomicsError("Too many /'s in a row.")
else:
absolute = True
path = path[1:]
nodes = path.rstrip('/').split('/')
if not nodes:
raise PyteomicsError('Invalid path: ' + path)
if not self._tree:
if tail:
if tail[0] == '[':
tail = '(.)' + tail
else:
raise PyteomicsError('Cannot parse path tail: ' + tail)
xpath = etree.XPath(tail)
localname = nodes[0]
found = False
for ev, elem in etree.iterparse(self, events=('start', 'end'), remove_comments=True, huge_tree=self._huge_tree):
name_lc = _local_name(elem)
if ev == 'start':
if name_lc == localname or localname == '*':
found += 1
else:
if name_lc == localname or localname == '*':
if (absolute and elem.getparent() is None) or not absolute:
for child in get_rel_path(elem, nodes[1:]):
if tail:
for elem in xpath(child):
info = self._get_info_smart(elem, **kwargs)
yield info
else:
info = self._get_info_smart(child, **kwargs)
yield info
if not localname == '*':
found -= 1
if not found:
elem.clear()
else:
xpath = ('/' if absolute else '//') + '/'.join(
'*[local-name()="{}"]'.format(node) if node != '*' else '*' for node in nodes) + tail
for elem in self._tree.xpath(xpath):
info = self._get_info_smart(elem, **kwargs)
yield info
@_keepstate
def build_id_cache(self):
"""Construct a cache for each element in the document, indexed by id
attribute"""
stack = 0
id_dict = {}
for event, elem in etree.iterparse(self._source, events=('start', 'end'), remove_comments=True, huge_tree=self._huge_tree):
if event == 'start':
if 'id' in elem.attrib:
stack += 1
else:
if 'id' in elem.attrib:
stack -= 1
id_dict[elem.attrib['id']] = elem
elif stack == 0:
elem.clear()
self._id_dict = id_dict
def clear_id_cache(self):
"""Clear the element ID cache"""
self._id_dict = {}
def _find_by_id_no_reset(self, elem_id, id_key=None):
"""
An almost exact copy of :meth:`get_by_id` with the difference that it does
not reset the file reader's position before iterative parsing.
Parameters
----------
elem_id : str
The element id to query for
Returns
-------
lxml.Element
"""
found = False
if id_key is None:
id_key = self._default_id_attr
for event, elem in etree.iterparse(
self._source, events=('start', 'end'), remove_comments=True, huge_tree=self._huge_tree):
if event == 'start':
if elem.attrib.get(id_key) == elem_id:
found = True
else:
if elem.attrib.get(id_key) == elem_id:
return elem
if not found:
elem.clear()
raise KeyError(elem_id)
@_keepstate
def get_by_id(self, elem_id, **kwargs):
"""Parse the file and return the element with `id` attribute equal
to `elem_id`. Returns :py:const:`None` if no such element is found.
Parameters
----------
elem_id : str
The value of the `id` attribute to match.
Returns
-------
out : :py:class:`dict` or :py:const:`None`
"""
if not self._id_dict:
elem = self._find_by_id_no_reset(elem_id)
else:
elem = self._id_dict[elem_id]
return self._get_info_smart(elem, **kwargs)
# the following methods deal with parsing of UserParams
def _param_type(self, attribs):
if attribs.get('type') in self._param_types:
return self._param_types[attribs['type']]
return self._default_param_type
def _param_value(self, attribs):
value = attribs.get('value', '')
vtype = self._param_type(attribs)
try:
return vtype(value)
except ValueError:
return self._fallback_param_type(value)
def _param_name(self, attribs):
return attribs['name']
def _handle_param(self, element, **kwargs):
"""Unpacks cvParam and userParam tags into key-value pairs"""
attribs = element.attrib
return _XMLParam(self._param_name(attribs), self._param_value(attribs), _local_name(element))
def _handle_referenceable_param_group(self, param_group_ref, **kwargs):
raise NotImplementedError()
return []
def _find_immediate_params(self, element, **kwargs):
return element.xpath(
'./*[' + ' or '.join('local-name()="{}"'.format(name) for name in self._param_subelements) + ']')
def _insert_param(self, info_dict, param):
key = param.name
if key in info_dict:
if isinstance(info_dict[key], list):
info_dict[key].append(param.value)
else:
info_dict[key] = [info_dict[key], param.value]
else:
info_dict[key] = param.value
def _promote_empty_parameter_to_name(self, info, params):
empty_values = []
not_empty_values = []
for param in params:
if param.is_empty():
empty_values.append(param)
else:
not_empty_values.append(param)
if len(empty_values) == 1 and 'name' not in info:
info['name'] = empty_values[0].name
return info, not_empty_values
return info, params
def _get_info(self, element, **kwargs):
# this method currently does not call the superclass implementation, rather relies on XML._postprocess
try:
name = kwargs.pop('ename')
except KeyError:
name = _local_name(element)
if name in self._param_elements:
return self._handle_param(element, **kwargs)
elif name == "referenceableParamGroupRef":
return self._handle_referenceable_param_group(element, **kwargs)
info = dict(element.attrib)
# process subelements
params = []
if kwargs.get('recursive'):
for child in element.iterchildren():
cname = _local_name(child)
if cname in self._param_elements:
newinfo = self._handle_param(child, **kwargs)
params.append(newinfo)
elif cname == "referenceableParamGroupRef":
params.extend(self._handle_referenceable_param_group(child, **kwargs))
else:
if cname not in self.schema_info['lists']:
info[cname] = self._get_info_smart(child, ename=cname, **kwargs)
else:
info.setdefault(cname, []).append(
self._get_info_smart(child, ename=cname, **kwargs))
else:
# handle the case where we do not want to unpack all children, but
# *Param tags are considered part of the current entity, semantically
for child in self._find_immediate_params(element, **kwargs):
param_or_group = self._handle_param(child, **kwargs)
if isinstance(param_or_group, list):
params.extend(param_or_group)
else:
params.append(param_or_group)
handler = self._element_handlers.get(name)
if handler is not None:
info, params = handler(self, info, params)
for param in params:
self._insert_param(info, param)
info = self._postprocess(element, name, info, **kwargs)
return info
class CVParamParser(XML):
"""
A subclass of :py:class:`XML` that implements additional processing for `cvParam` elements.
These elements refer to the PSI-MS Controlled Vocabulary, and :py:class:`CVParamParser` uses a copy of it
for type checking.
This class requires :py:mod:`psims` to work.
Attributes
----------
cv : psims.controlled_vocabulary.controlled_vocabulary.ControlledVocabulary
"""
cv = None
_param_types = {'int': unitint, 'float': unitfloat, 'string': unitstr}
_default_param_type = unitfloat
_fallback_param_type = unitstr
_param_elements = XML._param_elements.copy()
_param_elements.add('cvParam')
_param_subelements = _param_elements.copy()
_param_subelements.add('referenceableParamGroupRef')
_cvparam_types = {
'int': {'xsd:integer', 'xsd:int', 'xsd:nonNegativeInteger', 'xsd:positiveInteger'},
'float': {'xsd:float', 'xsd:double', 'xsd:decimal'},
'string': {'xsd:string', 'xsd:anyURI', 'xsd:boolean', 'xsd:dateTime'} # better catch more to avoid auto-conversion to float
}
_cv_type_cache = {}
def _param_name(self, attribs):
unit_accesssion = None
if 'unitCvRef' in attribs or 'unitName' in attribs:
unit_accesssion = attribs.get('unitAccession')
accession = attribs.get('accession')
return cvstr(attribs['name'], accession, unit_accesssion)
def _param_unit_info(self, attribs):
unit_info = None
unit_accesssion = None
if 'unitCvRef' in attribs or 'unitName' in attribs:
unit_accesssion = attribs.get('unitAccession')
unit_name = attribs.get('unitName')
if self.cv is not None and unit_name is None and unit_accesssion is not None:
unit_name = self.cv[unit_accesssion].name
unit_info = unit_name or unit_accesssion
return unit_info
def _param_type(self, attribs):
if attribs.get('type') in self._param_types:
return self._param_types[attribs['type']]
param_accession = attribs.get('accession')
if param_accession in self._cv_type_cache:
return self._cv_type_cache[param_accession]
# check for type information in CV
if self.cv is not None and 'accession' in attribs and 'value' in attribs and attribs.get('cvRef') in {'PSI-MS', 'MS'}:
entity = self.cv[attribs['accession']]
for r in entity.relationship:
if isinstance(r, HasValueTypeRelationship):
for type_name, types in self._cvparam_types.items():
if r.value_type.id in types:
tp = self._param_types[type_name]
self._cv_type_cache[param_accession] = tp
return tp
tp = self._default_param_type
self._cv_type_cache[param_accession] = tp
return tp
def _param_value(self, attribs):
value = attribs.get('value', '')
vtype = self._param_type(attribs)
uinfo = self._param_unit_info(attribs)
try:
return vtype(value, uinfo)
except ValueError:
return self._fallback_param_type(value, uinfo)
def __init__(self, *args, **kwargs):
super(CVParamParser, self).__init__(*args, **kwargs)
if not _has_psims:
raise PyteomicsError('Parsing PSI formats requires `psims`.')
cv = kwargs.pop('cv', None)
if cv is None:
cv = load_psims()
self.cv = cv
# XPath emulator tools
pattern_path = re.compile(r'([\w/*]*)(.*)')
def get_rel_path(element, names):
if not names:
yield element
else:
for child in element.iterchildren():
if names[0] == '*' or _local_name(child) == names[0]:
if len(names) == 1:
yield child
else:
for gchild in get_rel_path(child, names[1:]):
yield gchild
def xpath(tree, path, ns=None):
"""Return the results of XPath query with added namespaces.
Assumes the ns declaration is on the root element or absent.
Parameters
----------
tree : ElementTree
path : str
ns : str or None, optional
"""
if hasattr(tree, 'getroot'):
root = tree.getroot()
else:
root = tree
while root.getparent() is not None:
root = root.getparent()
ns = root.nsmap.get(ns)
def repl(m):
s = m.group(1)
if not ns: return s
if not s: return 'd:'
return '/d:'
new_path = re.sub(r'(\/|^)(?![\*\/])', repl, path)
n_s = {'d': ns} if ns else None
return tree.xpath(new_path, namespaces=n_s)
def _make_version_info(cls):
def version_info(source):
return cls(source).version_info
version_info.__doc__ = """
Provide version information about the {0.file_format} file.
.. note:: This function is provided for backward compatibility only.
It simply creates an :py:class:`{0.__name__}` instance
and returns its :py:data:`!version_info` attribute.
Parameters
----------
source : str or file
File name or file-like object.
Returns
-------
out : tuple
A (version, schema URL) tuple, both elements are strings or None.
""".format(cls)
return version_info
class ByteCountingXMLScanner(_file_obj):
"""
Carry out the construction of a byte offset index for `source` XML file
for each type of tag in :attr:`indexed_tags`.
Inheris from :py:class:`pyteomics.auxiliary._file_obj` to support the object-oriented
:py:func:`_keep_state` interface.
"""
entities = {
'quot': '"',
'amp': '&',
'apos': "'",
'lt': '<',
'gt': '>',
}
xml_entity_pattern = re.compile(r"&({});".format('|'.join(entities.keys())))
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/mzid.py | pyteomics/mzid.py | """
mzid - mzIdentML file reader
============================
Summary
-------
`mzIdentML <http://www.psidev.info/mzidentml>`_ is one of the standards
developed by the Proteomics Informatics working group of the HUPO Proteomics
Standard Initiative.
This module provides a minimalistic way to extract information from mzIdentML
files. You can use the old functional interface (:py:func:`read`) or the new
object-oriented interface (:py:class:`MzIdentML`) to iterate over entries in
``<SpectrumIdentificationResult>`` elements, i.e. groups of identifications
for a certain spectrum. Note that each entry can contain more than one PSM
(peptide-spectrum match). They are accessible with "SpectrumIdentificationItem"
key.
:py:class:`MzIdentML` objects also support direct indexing by element ID.
Data access
-----------
:py:class:`MzIdentML` - a class representing a single MzIdentML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through peptide-spectrum matches in an mzIdentML
file. Data from a single PSM group are converted to a human-readable dict.
Basically creates an :py:class:`MzIdentML` object and reads it.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`DataFrame` - read MzIdentML files into a :py:class:`pandas.DataFrame`.
Target-decoy approach
---------------------
:py:func:`filter` - read a chain of mzIdentML files and filter to a certain
FDR using TDA.
:py:func:`filter.chain` - chain a series of filters applied independently to
several files.
:py:func:`filter.chain.from_iterable` - chain a series of filters applied
independently to an iterable of files.
:py:func:`filter_df` - filter MzIdentML files and return a :py:class:`pandas.DataFrame`.
:py:func:`is_decoy` - determine if a "SpectrumIdentificationResult" should be
consiudered decoy.
:py:func:`fdr` - estimate the false discovery rate of a set of identifications
using the target-decoy approach.
:py:func:`qvalues` - get an array of scores and local FDR values for a PSM
set using the target-decoy approach.
Controlled Vocabularies and Caching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mzIdentML relies on controlled vocabularies to describe its contents extensibly.
Every :py:class:`MzIdentML` needs a copy of PSI-MS CV, which it handles using the :py:mod:`psims` library.
If you want to save time when creating instances of :py:class:`MzIdentML`, consider enabling the :py:mod:`psims` cache.
See `psims documentation <https://mobiusklein.github.io/psims/docs/build/html/controlled_vocabulary/controlled_vocabulary.html#caching>`_
on how to enable and configure the cache (alternatively, you can handle CV creation yourself and pass a pre-created instance
using the `cv` parameter to :py:class:`MzIdentML`).
See also
`Controlled Vocabulary Terms <../data.html#controlled-vocabulary-terms-in-structured-data>`_
for more details on how they are used.
Handling Time Units and Other Qualified Quantities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mzIdentML contains information which may be described as using a variety of different time units.
See `Unit Handling <../data.html#unit-handling>`_ for more information.
Deprecated functions
--------------------
:py:func:`version_info` - get information about mzIdentML version and schema.
You can just read the corresponding attribute of the :py:class:`MzIdentML`
object.
:py:func:`get_by_id` - get an element by its ID and extract the data from it.
You can just call the corresponding method of the :py:class:`MzIdentML`
object.
:py:func:`iterfind` - iterate over elements in an mzIdentML file.
You can just call the corresponding method of the :py:class:`MzIdentML`
object.
Dependencies
------------
This module requires :py:mod:`lxml`.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from . import auxiliary as aux
from . import xml, _schema_defaults
class MzIdentML(xml.CVParamParser, xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for MzIdentML files."""
file_format = 'mzIdentML'
_root_element = 'MzIdentML'
_default_schema = _schema_defaults._mzid_schema_defaults
_default_version = '1.1.0'
_default_iter_tag = 'SpectrumIdentificationResult'
_structures_to_flatten = {'Fragmentation'}
_indexed_tags = {'SpectrumIdentificationResult', 'SpectrumIdentificationItem',
'SearchDatabase', 'SourceFile', 'SpectraData', 'Sample',
'DBSequence', 'Peptide', 'PeptideEvidence',
'Measure', 'TranslationTable', 'MassTable', 'Enzyme',
'Organization', 'AnalysisSoftware', 'BibliographicReference', 'Person', 'Provider',
'SpectrumIdentificationList', 'SpectrumIdentificationProtocol', 'SpectrumIdentification',
'ProteinDetectionList', 'ProteinDetectionProtocol', 'ProteinDetection',
'ProteinDetectionHypothesis', 'ProteinAmbiguityGroup'}
_element_handlers = xml.CVParamParser._element_handlers.copy()
_element_handlers.update({
"Modification": xml.CVParamParser._promote_empty_parameter_to_name,
"SpectrumIDFormat": xml.CVParamParser._promote_empty_parameter_to_name,
"FileFormat": xml.CVParamParser._promote_empty_parameter_to_name,
"Role": xml.CVParamParser._promote_empty_parameter_to_name
})
def __init__(self, *args, **kwargs):
kwargs.setdefault('retrieve_refs', True)
super(MzIdentML, self).__init__(*args, **kwargs)
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
name = xml._local_name(element)
kwargs = dict(kwargs)
rec = kwargs.pop("recursive", None)
# Try not to recursively unpack the root element
# unless the user really wants to.
if rec is None:
rec = (name != self._root_element)
return self._get_info(element, recursive=rec, **kwargs)
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in _ref. Removes the id attribute from `info`"""
for k, v in dict(info).items():
if k.endswith('_ref'):
try:
by_id = self.get_by_id(v, retrieve_refs=True)
except KeyError:
warnings.warn('Ignoring unresolved reference: ' + v)
else:
info.update(by_id)
del info[k]
info.pop('id', None)
def read(source, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
.. note:: This function is provided for backward compatibility only.
It simply creates an :py:class:`MzIdentML` instance using
provided arguments and returns it.
Parameters
----------
source : str or file
A path to a target mzIdentML file or the file object itself.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
build_id_cache : bool, optional
Defines whether a cache of element IDs should be built and stored on the
created :py:class:`MzIdentML` instance. Default value is the value of
`retrieve_refs`.
.. note:: This parameter is ignored when ``use_index`` is ``True`` (default).
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
the indexed elements. If :py:const:`True` (default), `build_id_cache` is ignored.
indexed_tags : container of bytes, optional
Defines which elements need to be indexed. Empty set by default.
Returns
-------
out : MzIdentML
An iterator over the dicts with PSM properties.
"""
kwargs = kwargs.copy()
kwargs.setdefault('retrieve_refs', True)
kwargs['build_id_cache'] = kwargs.get('build_id_cache', kwargs.get('retrieve_refs'))
return MzIdentML(source, **kwargs)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`MzIdentML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`False`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
build_id_cache : bool, optional
Defines whether a cache of element IDs should be built and stored on the
created :py:class:`MzIdentML` instance. Default value is the value of
`retrieve_refs`.
Returns
-------
out : iterator
"""
kwargs = kwargs.copy()
kwargs['build_id_cache'] = kwargs.get('build_id_cache',
kwargs.get('retrieve_refs'))
return MzIdentML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(MzIdentML)
def get_by_id(source, elem_id, **kwargs):
"""Parse `source` and return the element with `id` attribute equal
to `elem_id`. Returns :py:const:`None` if no such element is found.
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`get_by_id` calls on one file, you should
create an :py:class:`MzIdentML` object and use its
:py:meth:`!get_by_id` method.
Parameters
----------
source : str or file
A path to a target mzIdentML file of the file object itself.
elem_id : str
The value of the `id` attribute to match.
Returns
-------
out : :py:class:`dict` or :py:const:`None`
"""
return MzIdentML(source, **kwargs).get_by_id(elem_id, **kwargs)
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(MzIdentML)
def is_decoy(psm, prefix=None):
"""Given a PSM dict, return :py:const:`True` if all proteins in the dict
are marked as decoy, and :py:const:`False` otherwise.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : ignored
Returns
-------
out : bool
"""
return all(pe['isDecoy'] for sii in psm['SpectrumIdentificationItem']
for pe in sii['PeptideEvidenceRef'])
def DataFrame(*args, **kwargs):
"""Read MzIdentML files into a :py:class:`pandas.DataFrame`.
Requires :py:mod:`pandas`.
.. warning :: Only the first 'SpectrumIdentificationItem' element is considered in every
'SpectrumIdentificationResult'.
Parameters
----------
*args
Passed to :py:func:`chain`.
**kwargs
Passed to :py:func:`chain`.
sep : str or None, keyword only, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
data = []
sep = kwargs.pop('sep', None)
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
sii = item.get('SpectrumIdentificationItem', [None])[0]
if sii is not None:
info.update((k, v) for k, v in sii.items() if isinstance(v, (str, int, float)))
evref = sii.get('PeptideEvidenceRef')
if evref:
prot_descr, accessions, isd, starts, ends, lengths = [], [], [], [], [], []
for d in evref:
prot_descr.append(d.get('protein description'))
accessions.append(d.get('accession'))
isd.append(d.get('isDecoy'))
starts.append(d.get('start'))
ends.append(d.get('end'))
lengths.append(d.get('length'))
isd = all(isd)
if sep is not None:
if all(isinstance(prd, str) for prd in prot_descr):
prot_descr = sep.join(prot_descr)
if all(isinstance(acc, str) for acc in accessions):
accessions = sep.join(accessions)
if all(prd is None for prd in prot_descr):
prot_descr = None
if all(acc is None for acc in accessions):
accessions = None
info.update((k, v) for k, v in evref[0].items() if isinstance(v, (str, int, float, list)))
info['protein description'] = prot_descr
info['accession'] = accessions
info['isDecoy'] = isd
info['start'] = starts
info['end'] = ends
info['length'] = lengths
data.append(info)
df = pd.DataFrame(data)
return df
def filter_df(*args, **kwargs):
"""Read MzIdentML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be MzIdentML files or DataFrames.
Requires :py:mod:`pandas`.
.. warning :: Only the first 'SpectrumIdentificationItem' element is considered in every
'SpectrumIdentificationResult'.
Parameters
----------
key : str / iterable / callable, keyword only, optional
Default is 'mascot:expectation value'.
is_decoy : str / iterable / callable, keyword only, optional
Default is 'isDecoy'.
*args
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
**kwargs
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs.setdefault('key', 'mascot:expectation value')
kwargs.setdefault('is_decoy', 'isDecoy')
if all(isinstance(arg, pd.DataFrame) for arg in args):
df = pd.concat(args)
else:
df = DataFrame(*args, **kwargs)
return aux.filter(df, **kwargs)
fdr = aux._make_fdr(is_decoy, None)
_key = lambda x: min(
sii['mascot:expectation value'] for sii in x['SpectrumIdentificationItem'])
qvalues = aux._make_qvalues(chain, is_decoy, None, _key)
filter = aux._make_filter(chain, is_decoy, None, _key, qvalues)
filter.chain = aux._make_chain(filter, 'filter', True)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/_schema_defaults.py | pyteomics/_schema_defaults.py | _protxml_schema_defaults = {'bools': set(),
'charlists': set(),
'floatlists': set(),
'floats': {('ASAPRatio', 'heavy2light_ratio_mean'),
('ASAPRatio', 'heavy2light_ratio_standard_dev'),
('ASAPRatio', 'ratio_mean'),
('ASAPRatio', 'ratio_standard_dev'),
('ASAPRatio_pvalue', 'adj_ratio_mean'),
('ASAPRatio_pvalue', 'adj_ratio_standard_dev'),
('ASAPRatio_pvalue', 'decimal_pvalue'),
('ASAPRatio_pvalue', 'heavy2light_adj_ratio_mean'),
('ASAPRatio_pvalue', 'heavy2light_adj_ratio_standard_dev'),
('ASAPRatio_pvalue', 'pvalue'),
('ASAP_Peak', 'heavy2light_ratio_mean'),
('ASAP_Peak', 'heavy2light_ratio_standard_dev'),
('ASAP_Peak', 'ratio_mean'),
('ASAP_Peak', 'ratio_standard_dev'),
('ASAP_Peak', 'weight'),
('ASAP_Seq', 'heavy2light_ratio_mean'),
('ASAP_Seq', 'heavy2light_ratio_standard_dev'),
('ASAP_Seq', 'ratio_mean'),
('ASAP_Seq', 'ratio_standard_dev'),
('ASAP_Seq', 'weight'),
('ASAP_prot_analysis_summary', 'min_peptide_probability'),
('ASAP_prot_analysis_summary', 'min_peptide_weight'),
('ASAP_prot_analysis_summary', 'min_protein_probability'),
('ASAP_pvalue_analysis_summary', 'background_fitting_error'),
('ASAP_pvalue_analysis_summary', 'background_ratio_mean'),
('ASAP_pvalue_analysis_summary', 'background_ratio_stdev'),
('StPeterQuant', 'SIn'),
('StPeterQuant', 'ng'),
('StPeterQuant_peptide', 'spectralIndex'),
('StPeter_analysis_summary', 'FDR'),
('StPeter_analysis_summary', 'probability'),
('StPeter_analysis_summary', 'sampleLoad'),
('StPeter_analysis_summary', 'tolerance'),
('XPress_analysis_summary', 'min_peptide_probability'),
('XPress_analysis_summary', 'min_peptide_weight'),
('XPress_analysis_summary', 'min_protein_probability'),
('affected_channel', 'correction'),
('decoy_analysis_summary', 'decoy_ratio'),
('error_point', 'error'),
('error_point', 'min_prob'),
('fpkm_distribution', 'alt_pos_to_neg_ratio'),
('fpkm_distribution', 'fpkm_lower_bound_excl'),
('fpkm_distribution', 'fpkm_lower_bound_incl'),
('fpkm_distribution', 'neg_freq'),
('fpkm_distribution', 'pos_freq'),
('fpkm_distribution', 'pos_to_neg_ratio'),
('fragment_masses', 'mz'),
('indistinguishable_peptide', 'calc_neutral_pep_mass'),
('intensity', 'error'),
('intensity', 'mz'),
('intensity', 'ratio'),
('libra_summary', 'mass_tolerance'),
('libra_summary', 'min_pep_prob'),
('libra_summary', 'min_pep_wt'),
('libra_summary', 'min_prot_prob'),
('ni_distribution', 'alt_pos_to_neg_ratio'),
('ni_distribution', 'neg_freq'),
('ni_distribution', 'ni_lower_bound_excl'),
('ni_distribution', 'ni_lower_bound_incl'),
('ni_distribution', 'pos_freq'),
('ni_distribution', 'pos_to_neg_ratio'),
('nsp_distribution', 'alt_pos_to_neg_ratio'),
('nsp_distribution', 'neg_freq'),
('nsp_distribution', 'nsp_lower_bound_excl'),
('nsp_distribution', 'nsp_lower_bound_incl'),
('nsp_distribution', 'pos_freq'),
('nsp_distribution', 'pos_to_neg_ratio'),
('peptide', 'calc_neutral_pep_mass'),
('peptide', 'exp_sibling_ion_bin'),
('peptide', 'exp_sibling_ion_instances'),
('peptide', 'exp_tot_instances'),
('peptide', 'fpkm_adjusted_probability'),
('peptide', 'initial_probability'),
('peptide', 'max_fpkm'),
('peptide', 'n_sibling_peptides'),
('peptide', 'ni_adjusted_probability'),
('peptide', 'nsp_adjusted_probability'),
('peptide', 'weight'),
('point', 'fdr_pp'),
('point', 'fdr_pp_decoy'),
('point', 'logratio'),
('point', 'model_distr'),
('point', 'num_corr_pp'),
('point', 'num_corr_pp_decoy'),
('point', 'obs_distr'),
('point', 'pp_decoy_uncert'),
('point', 'pp_uncert'),
('point', 'prob_cutoff'),
('protein', 'confidence'),
('protein', 'percent_coverage'),
('protein', 'probability'),
('protein_group', 'probability'),
('protein_summary_data_filter', 'false_positive_error_rate'),
('protein_summary_data_filter', 'min_probability'),
('protein_summary_data_filter', 'predicted_num_correct'),
('protein_summary_data_filter', 'predicted_num_incorrect'),
('protein_summary_data_filter', 'sensitivity'),
('protein_summary_header', 'initial_min_peptide_prob'),
('protein_summary_header', 'min_peptide_probability'),
('protein_summary_header', 'min_peptide_weight'),
('protein_summary_header', 'num_predicted_correct_prots'),
('protein_summary_header', 'total_no_spectrum_ids')},
'intlists': set(),
'ints': {('ASAPRatio', 'ratio_number_peptides'),
('ASAP_Peak', 'datanum'),
('ASAP_Seq', 'datanum'),
('ASAP_pvalue_analysis_summary', 'asap_prot_id'),
('ASAP_pvalue_analysis_summary', 'asapratio_id'),
('StPeterQuant_peptide', 'charge'),
('affected_channel', 'channel'),
('analysis_result', 'id'),
('analysis_summary', 'id'),
('contributing_channel', 'channel'),
('error_point', 'num_corr'),
('error_point', 'num_incorr'),
('fpkm_distribution', 'bin_no'),
('fragment_masses', 'channel'),
('intensity', 'channel'),
('libra_result', 'number'),
('libra_summary', 'centroiding_preference'),
('libra_summary', 'normalization'),
('libra_summary', 'output_type'),
('ni_distribution', 'bin_no'),
('nsp_distribution', 'bin_no'),
('peptide', 'charge'),
('peptide', 'fpkm_bin'),
('peptide', 'n_enzymatic_termini'),
('peptide', 'n_instances'),
('peptide', 'n_sibling_peptides_bin'),
('protein', 'n_indistinguishable_proteins'),
('protein', 'total_number_distinct_peptides'),
('protein', 'total_number_peptides'),
('protein_summary_header', 'num_input_1_spectra'),
('protein_summary_header', 'num_input_2_spectra'),
('protein_summary_header', 'num_input_3_spectra'),
('protein_summary_header', 'num_input_4_spectra'),
('protein_summary_header', 'num_input_5_spectra')},
'lists': {'ASAP_Dta',
'ASAP_Peak',
'ASAP_Seq',
'StPeterQuant_peptide',
'affected_channel',
'analysis_result',
'analysis_summary',
'contributing_channel',
'error_point',
'fpkm_distribution',
'fpkm_information',
'fragment_masses',
'indistinguishable_peptide',
'indistinguishable_protein',
'intensity',
'mod_aminoacid_mass',
'modification_info',
'ni_distribution',
'ni_information',
'nsp_distribution',
'parameter',
'peptide',
'peptide_parent_protein',
'point',
'protein',
'protein_group',
'protein_summary_data_filter'}}
_mzid_schema_defaults = {'bools': {('Enzyme', 'semiSpecific'),
('Enzymes', 'independent'),
('PeptideEvidence', 'isDecoy'),
('ProteinDetectionHypothesis', 'passThreshold'),
('SearchModification', 'fixedMod'),
('SpectrumIdentificationItem', 'passThreshold')},
'charlists': {('Modification', 'residues'),
('SearchModification', 'residues')},
'floatlists': {('FragmentArray', 'values')},
'floats': {('Modification', 'avgMassDelta'),
('Modification', 'monoisotopicMassDelta'),
('Residue', 'mass'),
('SearchModification', 'massDelta'),
('SpectrumIdentificationItem', 'calculatedMassToCharge'),
('SpectrumIdentificationItem', 'calculatedPI'),
('SpectrumIdentificationItem', 'experimentalMassToCharge'),
('SubstitutionModification', 'avgMassDelta'),
('SubstitutionModification', 'monoisotopicMassDelta')},
'intlists': {('IonType', 'index'), ('MassTable', 'msLevel')},
'ints': {('BibliographicReference', 'year'),
('DBSequence', 'length'),
('Enzyme', 'missedCleavages'),
('IonType', 'charge'),
('Modification', 'location'),
('PeptideEvidence', 'end'),
('PeptideEvidence', 'start'),
('SearchDatabase', 'numDatabaseSequences'),
('SearchDatabase', 'numResidues'),
('SpectrumIdentificationItem', 'chargeState'),
('SpectrumIdentificationItem', 'rank'),
('SpectrumIdentificationList', 'numSequencesSearched'),
('SubstitutionModification', 'location')},
'lists': {'Affiliation',
'AmbiguousResidue',
'AnalysisSoftware',
'BibliographicReference',
'ContactRole',
'DBSequence',
'Enzyme',
'Filter',
'FragmentArray',
'InputSpectra',
'InputSpectrumIdentifications',
'IonType',
'MassTable',
'Measure',
'Modification',
'Peptide',
'PeptideEvidence',
'PeptideEvidenceRef',
'PeptideHypothesis',
'ProteinAmbiguityGroup',
'ProteinDetectionHypothesis',
'Residue',
'Sample',
'SearchDatabase',
'SearchDatabaseRef',
'SearchModification',
'SourceFile',
'SpecificityRules',
'SpectraData',
'SpectrumIdentification',
'SpectrumIdentificationItem',
'SpectrumIdentificationItemRef',
'SpectrumIdentificationList',
'SpectrumIdentificationProtocol',
'SpectrumIdentificationResult',
'SubSample',
'SubstitutionModification',
'TranslationTable',
'cv',
'cvParam'}}
_trafoxml_schema_defaults = {'bools': set(),
'charlists': set(),
'floatlists': set(),
'floats': {('Pair', 'from'), ('Pair', 'to'), ('TrafoXML', 'version')},
'intlists': set(),
'ints': {('Pairs', 'count')},
'lists': {'Pair', 'Param'}}
_featurexml_schema_defaults = {
'ints': {('PeptideHit', 'charge'),
# ('PeptideIdentification', 'spectrum_reference'),
('SearchParameters', 'missed_cleavages'),
# ('UnassignedPeptideIdentification', 'spectrum_reference'),
('featureList', 'count'),
('quality', 'dim'),
('position', 'dim'),
('feature', 'charge'),
('convexhull', 'nr'),
},
'floats': {('PeptideHit', 'score'),
('PeptideIdentification', 'MZ'),
('PeptideIdentification', 'RT'),
('PeptideIdentification', 'significance_threshold'),
('ProteinHit', 'coverage'),
('ProteinHit', 'score'),
('ProteinIdentification', 'significance_threshold'),
('SearchParameters', 'peak_mass_tolerance'),
('SearchParameters', 'precursor_peak_tolerance'),
('UnassignedPeptideIdentification', 'MZ'),
('UnassignedPeptideIdentification', 'RT'),
('UnassignedPeptideIdentification', 'significance_threshold'),
('featureMap', 'version'),
('pt', 'x'),
('pt', 'y'),
('quality', 'quality'),
('position', 'position'),
('feature', 'overallquality'),
('feature', 'intensity'),
},
'bools': {('PeptideIdentification', 'higher_score_better'),
('ProteinIdentification', 'higher_score_better'),
('SearchParameters', 'peak_mass_tolerance_ppm'),
('SearchParameters', 'precursor_peak_tolerance_ppm'),
('UnassignedPeptideIdentification', 'higher_score_better')},
'intlists': set(),
'floatlists': set(),
'charlists': set(),
'lists': {'FixedModification',
'IdentificationRun',
'PeptideHit',
'PeptideIdentification',
'ProteinHit',
'ProteinIdentification',
'SearchParameters',
'UnassignedPeptideIdentification',
'UserParam',
'VariableModification',
'convexhull',
'dataProcessing',
'feature',
'hposition',
'hullpoint',
'param',
'position',
'processingAction',
'pt',
'quality'}}
_tandem_schema_defaults = {'ints': {
('group', 'z'), ('aa', 'at')} | {('domain', k) for k in [
'missed_cleavages', 'start', 'end', 'y_ions', 'b_ions',
'a_ions', 'x_ions', 'c_ions', 'z_ions']},
'floats': {('group', k) for k in [
'fI', 'sumI', 'maxI', 'mh', 'expect']} | {
('domain', k) for k in [
'expect', 'hyperscore', 'b_score', 'y_score',
'a_score', 'x_score', 'c_score', 'z_score',
'nextscore', 'delta', 'mh']} | {
('protein', 'expect'), ('protein', 'sumI'),
('aa', 'modified')},
'bools': set(),
'lists': {'group', 'trace', 'attribute', 'protein', 'aa', 'note'},
'floatlists': {('values', 'values')},
'intlists': set(), 'charlists': set(), 'duration': {('group', 'rt')}}
_mzxml_schema_defaults = {'bools': {('dataProcessing', 'centroided'),
('dataProcessing', 'chargeDeconvoluted'),
('dataProcessing', 'deisotoped'),
('dataProcessing', 'spotIntegration'),
('maldi', 'collisionGas'),
('scan', 'centroided'),
('scan', 'chargeDeconvoluted'),
('scan', 'deisotoped')},
'charlists': set(),
'floatlists': set(),
'floats': {('dataProcessing', 'intensityCutoff'),
('precursorMz', 'precursorIntensity'),
('precursorMz', 'windowWideness'),
('precursorMz', 'precursorMz'),
('scan', 'basePeakIntensity'),
('scan', 'basePeakMz'),
('scan', 'cidGasPressure'),
('scan', 'collisionEnergy'),
('scan', 'compensationVoltage'),
('scan', 'endMz'),
('scan', 'highMz'),
('scan', 'ionisationEnergy'),
('scan', 'lowMz'),
('scan', 'startMz'),
('scan', 'totIonCurrent')},
'duration': {("scan", "retentionTime")
},
'intlists': set(),
'ints': {('msInstrument', 'msInstrumentID'),
('peaks', 'compressedLen'),
('precursorMz', 'precursorCharge'),
('robot', 'deadVolume'),
('scan', 'msInstrumentID'),
('scan', 'peaksCount'),
('scanOrigin', 'num'),
('scan', 'msLevel')},
'lists': {'dataProcessing',
'msInstrument',
'parentFile',
'peaks',
'plate',
'precursorMz',
'scanOrigin',
'spot'}}
_mzml_schema_defaults = {'ints': {
('spectrum', 'index'),
('instrumentConfigurationList', 'count'),
('binaryDataArray', 'encodedLength'),
('cvList', 'count'),
('binaryDataArray', 'arrayLength'),
('scanWindowList', 'count'),
('componentList', 'count'),
('sourceFileList', 'count'),
('productList', 'count'),
('referenceableParamGroupList', 'count'),
('scanList', 'count'),
('spectrum', 'defaultArrayLength'),
('dataProcessingList', 'count'),
('sourceFileRefList', 'count'),
('scanSettingsList', 'count'),
('selectedIonList', 'count'),
('chromatogram', 'defaultArrayLength'),
('precursorList', 'count'),
('chromatogram', 'index'),
('processingMethod', 'order'),
('targetList', 'count'),
('sampleList', 'count'),
('softwareList', 'count'),
('binaryDataArrayList', 'count'),
('spectrumList', 'count'),
('chromatogramList', 'count'),
('selectedIon', 'charge state')},
'floats': {},
'bools': {},
'lists': {'scan', 'spectrum', 'sample', 'cv', 'dataProcessing',
'cvParam', 'source', 'userParam', 'detector', 'product',
'referenceableParamGroupRef', 'selectedIon', 'sourceFileRef',
'binaryDataArray', 'analyzer', 'scanSettings',
'instrumentConfiguration', 'chromatogram', 'target',
'processingMethod', 'precursor', 'sourceFile',
'referenceableParamGroup', 'contact', 'scanWindow', 'software'},
'intlists': {},
'floatlists': {},
'charlists': {}}
_pepxml_schema_defaults = {'ints':
{('xpressratio_summary', 'xpress_light'),
('distribution_point', 'obs_5_distr'),
('distribution_point', 'obs_2_distr'),
('enzymatic_search_constraint', 'max_num_internal_cleavages'),
('asapratio_lc_heavypeak', 'right_valley'),
('libra_summary', 'output_type'),
('distribution_point', 'obs_7_distr'),
('spectrum_query', 'index'),
('data_filter', 'number'),
('roc_data_point', 'num_incorr'),
('search_hit', 'num_tol_term'),
('search_hit', 'num_missed_cleavages'),
('asapratio_lc_lightpeak', 'right_valley'),
('libra_summary', 'normalization'),
('specificity', 'min_spacing'),
('database_refresh_timestamp', 'min_num_enz_term'),
('enzymatic_search_constraint', 'min_number_termini'),
('xpressratio_result', 'light_lastscan'),
('distribution_point', 'obs_3_distr'),
('spectrum_query', 'end_scan'),
('analysis_result', 'id'),
('search_database', 'size_in_db_entries'),
('search_hit', 'hit_rank'),
('alternative_protein', 'num_tol_term'),
('search_hit', 'num_tot_proteins'),
('asapratio_summary', 'elution'),
('search_hit', 'tot_num_ions'),
('error_point', 'num_incorr'),
('mixture_model', 'precursor_ion_charge'),
('roc_data_point', 'num_corr'),
('search_hit', 'num_matched_ions'),
('dataset_derivation', 'generation_no'),
('xpressratio_result', 'heavy_firstscan'),
('xpressratio_result', 'heavy_lastscan'),
('error_point', 'num_corr'),
('spectrum_query', 'assumed_charge'),
('analysis_timestamp', 'id'),
('xpressratio_result', 'light_firstscan'),
('distribution_point', 'obs_4_distr'),
('asapratio_lc_heavypeak', 'left_valley'),
('fragment_masses', 'channel'),
('distribution_point', 'obs_6_distr'),
('affected_channel', 'channel'),
('search_result', 'search_id'),
('contributing_channel', 'channel'),
('asapratio_lc_lightpeak', 'left_valley'),
('asapratio_peptide_data', 'area_flag'),
('search_database', 'size_of_residues'),
('asapratio_peptide_data', 'cidIndex'),
('mixture_model', 'num_iterations'),
('mod_aminoacid_mass', 'position'),
('spectrum_query', 'start_scan'),
('asapratio_summary', 'area_flag'),
('mixture_model', 'tot_num_spectra'),
('search_summary', 'search_id'),
('xpressratio_timestamp', 'xpress_light'),
('distribution_point', 'obs_1_distr'),
('intensity', 'channel'),
('asapratio_contribution', 'charge'),
('libra_summary', 'centroiding_preference')},
'floats':
{('asapratio_contribution', 'error'),
('asapratio_lc_heavypeak', 'area_error'),
('modification_info', 'mod_nterm_mass'),
('distribution_point', 'model_4_neg_distr'),
('distribution_point', 'model_5_pos_distr'),
('spectrum_query', 'precursor_neutral_mass'),
('asapratio_lc_heavypeak', 'time_width'),
('xpressratio_summary', 'masstol'),
('affected_channel', 'correction'),
('distribution_point', 'model_7_neg_distr'),
('error_point', 'error'),
('intensity', 'target_mass'),
('roc_data_point', 'sensitivity'),
('distribution_point', 'model_4_pos_distr'),
('distribution_point', 'model_2_neg_distr'),
('distribution_point', 'model_3_pos_distr'),
('mixture_model', 'prior_probability'),
('roc_data_point', 'error'),
('intensity', 'normalized'),
('modification_info', 'mod_cterm_mass'),
('asapratio_lc_lightpeak', 'area_error'),
('distribution_point', 'fvalue'),
('distribution_point', 'model_1_neg_distr'),
('peptideprophet_summary', 'min_prob'),
('asapratio_result', 'mean'),
('point', 'pos_dens'),
('fragment_masses', 'mz'),
('mod_aminoacid_mass', 'mass'),
('distribution_point', 'model_6_neg_distr'),
('asapratio_lc_lightpeak', 'time_width'),
('asapratio_result', 'heavy2light_error'),
('peptideprophet_result', 'probability'),
('error_point', 'min_prob'),
('peptideprophet_summary', 'est_tot_num_correct'),
('roc_data_point', 'min_prob'),
('asapratio_result', 'heavy2light_mean'),
('distribution_point', 'model_5_neg_distr'),
('mixturemodel', 'neg_bandwidth'),
('asapratio_result', 'error'),
('xpressratio_result', 'light_mass'),
('point', 'neg_dens'),
('asapratio_lc_lightpeak', 'area'),
('distribution_point', 'model_1_pos_distr'),
('xpressratio_result', 'mass_tol'),
('mixturemodel', 'pos_bandwidth'),
('xpressratio_result', 'light_area'),
('asapratio_peptide_data', 'heavy_mass'),
('distribution_point', 'model_2_pos_distr'),
('search_hit', 'calc_neutral_pep_mass'),
('intensity', 'absolute'),
('asapratio_peptide_data', 'light_mass'),
('distribution_point', 'model_3_neg_distr'),
('aminoacid_modification', 'mass'),
('asapratio_lc_heavypeak', 'time'),
('asapratio_lc_lightpeak', 'time'),
('asapratio_lc_lightpeak', 'background'),
('mixture_model', 'est_tot_correct'),
('point', 'value'),
('asapratio_lc_heavypeak', 'background'),
('terminal_modification', 'mass'),
('fragment_masses', 'offset'),
('xpressratio_result', 'heavy_mass'),
('search_hit', 'protein_mw'),
('libra_summary', 'mass_tolerance'),
('spectrum_query', 'retention_time_sec'),
('distribution_point', 'model_7_pos_distr'),
('asapratio_lc_heavypeak', 'area'),
('alternative_protein', 'protein_mw'),
('asapratio_contribution', 'ratio'),
('xpressratio_result', 'heavy_area'),
('distribution_point', 'model_6_pos_distr')},
'bools':
{('sample_enzyme', 'independent'),
('intensity', 'reject'),
('libra_result', 'is_rejected')},
'intlists': set(),
'floatlists': set(),
'charlists': set(),
'lists': {'point', 'aminoacid_modification', 'msms_run_summary',
'mixturemodel', 'search_hit', 'mixturemodel_distribution',
'sequence_search_constraint', 'specificity', 'alternative_protein',
'analysis_result', 'data_filter', 'fragment_masses', 'error_point',
'parameter', 'spectrum_query', 'search_result', 'affected_channel',
'analysis_summary', 'roc_data_point', 'distribution_point',
'search_summary', 'mod_aminoacid_mass', 'search_score', 'intensity',
'analysis_timestamp', 'mixture_model', 'terminal_modification',
'contributing_channel', 'inputfile'}}
_traml_schema_defaults = {'bools': set(),
'charlists': set(),
'floatlists': set(),
'floats': {('Modification', 'averageMassDelta'),
('Modification', 'monoisotopicMassDelta')},
'intlists': set(),
'ints': {('Modification', 'location')},
'lists': {'Compound',
'Configuration',
'Contact',
'Instrument',
'IntermediateProduct',
'Interpretation',
'Modification',
'Peptide',
'Protein',
'ProteinRef',
'Publication',
'RetentionTime',
'RetentionTimeList',
'Software',
'SourceFile',
'Target',
'Transition',
'ValidationStatus',
'cv',
'cvParam',
'userParam'}}
_idxml_schema_defaults = {
'ints': {('PeptideHit', 'charge'), ('SearchParameters', 'missed_cleavages'),
('PeptideHit', 'NumMatchedMainIons'), ('PeptideHit', 'IsotopeError')},
'floats': {('IdXML', 'version'),
('PeptideHit', 'score'),
('PeptideIdentification', 'MZ'),
('PeptideIdentification', 'RT'),
('PeptideIdentification', 'significance_threshold'),
('PeptideHit', 'MS2IonCurrent'),
('PeptideHit', 'MeanErrorAll'),
('PeptideHit', 'MeanErrorTop7'),
('PeptideHit', 'MeanRelErrorAll'),
('PeptideHit', 'MeanRelErrorTop7'),
('PeptideHit', 'NTermIonCurrentRatio'),
('PeptideHit', 'CTermIonCurrentRatio'),
('PeptideHit', 'StdevErrorAll'),
('PeptideHit', 'StdevErrorTop7'),
('PeptideHit', 'StdevRelErrorAll'),
('PeptideHit', 'StdevRelErrorTop7'),
('PeptideHit', 'ExplainedIonCurrentRatio'),
('ProteinHit', 'coverage'),
('ProteinHit', 'score'),
('ProteinIdentification', 'significance_threshold'),
('SearchParameters', 'peak_mass_tolerance'),
('SearchParameters', 'precursor_peak_tolerance')},
'bools': {('PeptideIdentification', 'higher_score_better'),
('ProteinIdentification', 'higher_score_better'),
('SearchParameters', 'peak_mass_tolerance_ppm'),
('SearchParameters', 'precursor_peak_tolerance_ppm')},
'intlists': set(),
'floatlists': set(),
'charlists': set(),
'lists': {'FixedModification',
'IdentificationRun',
'PeptideHit',
'PeptideIdentification',
'ProteinHit',
'ProteinIdentification',
'SearchParameters',
'UserParam',
'VariableModification'}}
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/peff.py | pyteomics/peff.py | """
peff - PSI Extended FASTA Format
================================
PEFF is a forth-coming standard from PSI-HUPO formalizing and extending the
encoding of protein features and annotations for building search spaces for
proteomics. See `The PEFF specification <http://www.psidev.info/peff>`_ for
more up-to-date information on the standard.
Data manipulation
-----------------
Classes
.......
The PEFF parser inherits several properties from implementation in the :mod:`~.fasta` module,
building on top of the :class:`~.TwoLayerIndexedFASTA` reader.
Available classes:
:py:class:`IndexedPEFF` - Parse a PEFF format file in binary-mode, supporting
direct indexing by header string or by tag.
"""
# Copyright 2018 Joshua Klein, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
try:
from collections.abc import Sequence as SequenceABC, Mapping
except ImportError:
from collections import Sequence as SequenceABC, Mapping
from collections import OrderedDict, defaultdict
from .fasta import TwoLayerIndexedFASTA
class Header(Mapping):
"""Hold parsed properties of a key-value pair like a sequence's
definition line.
This object supports the :class:`Mapping` interface, and
keys may be accessed by attribute access notation.
"""
def __init__(self, mapping, original=None):
self._mapping = mapping
def __getitem__(self, key):
return self._mapping[key]
def __iter__(self):
return iter(self._mapping)
def items(self):
return self._mapping.items()
def keys(self):
return self._mapping.keys()
def values(self):
return self._mapping.values()
def __len__(self):
return len(self._mapping)
def __contains__(self, key):
return key in self._mapping
def __getattr__(self, key):
if key == "_mapping":
raise AttributeError(key)
try:
return self._mapping[key]
except KeyError:
raise AttributeError(key)
def __repr__(self):
return "{self.__class__.__name__}({mapping})".format(self=self, mapping=dict(self._mapping))
def __hash__(self):
return hash(self.defline)
def __eq__(self, other):
try:
return self._mapping == other._mapping
except AttributeError:
return str(self) == str(other)
def __ne__(self, other):
return not (self == other)
def __dir__(self):
base = set(dir(super(Header, self)))
keys = set(self._mapping.keys())
return list(base | keys)
class IndexedPEFF(TwoLayerIndexedFASTA):
"""Creates an :py:class:`IndexedPEFF` object.
Parameters
----------
source : str or file
The file to read. If a file object, it needs to be in *rb* mode.
parse : bool, optional
Defines whether the descriptions should be parsed in the produced tuples.
Default is :py:const:`True`.
kwargs : passed to the :py:class:`TwoLayerIndexedFASTA` constructor.
"""
kv_pattern = re.compile(r"\\(?P<key>\S+)=(?P<value>.+?)(?:\s(?=\\)|$)")
header_pattern = re.compile(r"^>?(\S+):(\S+)")
has_feature_index = re.compile(r"^\(?(\d+):")
header_group = 2
class _PEFFFeature(SequenceABC):
def __init__(self, *fields, **kwargs):
self.fields = tuple(fields)
self.id = kwargs.get('id')
self.feature_type = kwargs.get("feature_type")
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return not (self == other)
def __getitem__(self, i):
return self.fields[i]
def __len__(self):
return len(self.fields)
def __repr__(self):
return repr(tuple(self))
def __str__(self):
return "(%s%s)" % (
'%r:' % self.id if self.id is not None else '',
'|'.join(map(str, self)), )
def __init__(self, source, ignore_comments=False, **kwargs):
super(IndexedPEFF, self).__init__(
source, ignore_comments=ignore_comments, parser=self.parser,
header_pattern=self.header_pattern, **kwargs)
self.header_blocks = []
self.comments = []
self.version = None
self.number_of_entries = 0
self._parse_header()
def _parse_header(self):
self.seek(0)
line = self.readline().decode("ascii")
if not line.startswith("# PEFF"):
raise ValueError("Not a PEFF File")
self.version = tuple(map(int, line.strip()[7:].split(".")))
current_block = defaultdict(list)
in_header = True
while in_header:
line = self.readline().decode("ascii")
if not line.startswith("#"):
in_header = False
line = line.strip()[2:]
if '=' in line:
key, value = line.split("=", 1)
if key == "GeneralComment":
self.comments.append(value)
else:
current_block[key].append(value)
if line.startswith("//"):
if current_block:
self.header_blocks.append(
Header(OrderedDict((k, v if len(v) > 1 else v[0])
for k, v in current_block.items())))
current_block = defaultdict(list)
number_of_entries = 0
for block in self.header_blocks:
try:
number_of_entries += int(block['NumberOfEntries'])
except KeyError:
pass
self.number_of_entries = number_of_entries
def _extract_parenthesis_list(self, text):
chunks = []
chunk = []
paren_level = 0
i = 0
n = len(text)
while i < n:
c = text[i]
i += 1
if c == "(":
if paren_level > 0:
chunk.append(c)
paren_level += 1
elif c == ")":
if paren_level > 1:
chunk.append(c)
paren_level -= 1
if paren_level == 0:
if chunk:
chunks.append(chunk)
chunk = []
else:
chunk.append(c)
chunks = list(map(''.join, chunks))
return chunks
def _split_pipe_separated_tuple(self, text):
parts = text.split("|")
return parts
def _coerce_types(self, key, value):
value = value.strip()
feature_id_match = self.has_feature_index.search(value)
if feature_id_match:
feature_id = int(feature_id_match.group(1))
value = self.has_feature_index.sub('', value)
else:
feature_id = None
if "|" in value:
value = self._split_pipe_separated_tuple(value)
result = []
for i, v in enumerate(value):
result.append(self._coerce_value(key, v, i))
return self._PEFFFeature(*result, feature_type=key, id=feature_id)
else:
return self._coerce_value(key, value, 0)
def _coerce_value(self, key, value, index):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return str(value)
def parser(self, line):
match = self.header_pattern.match(line)
if not match:
raise ValueError(
"Failed to parse {!r} using {!r}".format(
line, self))
storage = OrderedDict()
prefix = None
db_uid = None
if line.startswith(">"):
line = line[1:]
prefix, line = line.split(":", 1)
db_uid, line = line.split(" ", 1)
storage['Prefix'] = prefix
storage['Tag'] = db_uid
kv_pattern = re.compile(r"\\(?P<key>\S+)=(?P<value>.+?)(?:\s(?=\\)|$)")
for key, value in kv_pattern.findall(line):
if not (value.startswith("(") or " (" in value):
storage[key] = self._coerce_types(key, value)
else:
# multi-value
storage[key] = [self._coerce_types(key, v) for v in self._extract_parenthesis_list(value)]
return Header(storage)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/mgf.py | pyteomics/mgf.py | """
mgf - read and write MS/MS data in Mascot Generic Format
========================================================
Summary
-------
`MGF <http://www.matrixscience.com/help/data_file_help.html>`_ is a simple
human-readable format for MS/MS data. It allows storing MS/MS peak lists and
exprimental parameters.
This module provides classes and functions for access to data stored in
MGF files.
Parsing is done using :py:class:`MGF` and :py:class:`IndexedMGF` classes.
The :py:func:`read` function can be used as an entry point.
MGF spectra are converted to dictionaries. MS/MS data points are
(optionally) represented as :py:mod:`numpy` arrays.
Also, common parameters can be read from MGF file header with
:py:func:`read_header` function.
:py:func:`write` allows creation of MGF files.
Classes
-------
:py:class:`MGF` - a text-mode MGF parser. Suitable to read spectra from a file consecutively.
Needs a file opened in text mode (or will open it if given a file name).
:py:class:`IndexedMGF` - a binary-mode MGF parser. When created, builds a byte offset index
for fast random access by spectrum titles. Sequential iteration is also supported.
Needs a seekable file opened in binary mode (if created from existing file object).
:py:class:`MGFBase` - abstract class, the common ancestor of the two classes above.
Can be used for type checking.
Functions
---------
:py:func:`read` - an alias for :py:class:`MGF` or :py:class:`IndexedMGF`.
:py:func:`get_spectrum` - read a single spectrum with given title from a file.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`read_header` - get a dict with common parameters for all spectra
from the beginning of MGF file.
:py:func:`write` - write an MGF file.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import numpy as np
except ImportError:
np = None
import itertools as it
import sys
import warnings
from . import auxiliary as aux
class MGFBase(aux.MaskedArrayConversionMixin):
"""Abstract mixin class representing an MGF file. Subclasses implement different approaches to parsing."""
_comments = set('#;!/')
_array_keys = ['m/z array', 'intensity array', 'charge array', 'ion array']
_array_keys_unicode = [u'm/z array', u'intensity array', u'charge array', u'ion array']
encoding = None
def __init__(self, source=None, **kwargs):
"""Create an MGF file object, set MGF-specific parameters.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MGF format. Default is
:py:const:`None`, which means read standard input.
use_header : bool, optional, keyword only
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`True`.
convert_arrays : one of {0, 1, 2}, optional, keyword only
If `0`, m/z, intensities and (possibly) charges or (possibly) ions will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional, keyword only
If `True` (default), fragment charges are reported. Disabling it improves performance.
read_ions : bool, optional
If `True` (default: False), fragment ions are reported. Disabling it improves performance.
Note that right now, only one of (read_charges, read_ions) may be True.
dtype : type or str or dict, optional, keyword only
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array' and/or 'ion array'.
encoding : str, optional, keyword only
File encoding.
"""
super(MGFBase, self).__init__(source, **kwargs)
self._use_header = kwargs.pop('use_header', True)
self._read_charges = kwargs.pop('read_charges', True)
self._read_ions = kwargs.pop('read_ions', False)
# Make sure no charges are read if ions are read
if self._read_ions:
self._read_charges = False
if self._use_header:
self._read_header()
else:
self._header = None
def __reduce_ex__(self, protocol):
return (self.__class__, (self._source_init,), self.__getstate__())
def __getstate__(self):
state = super(MGFBase, self).__getstate__()
state['use_header'] = self._use_header
state['header'] = self._header
return state
def __setstate__(self, state):
super(MGFBase, self).__setstate__(state)
self._header = state['header']
self._use_header = state['use_header']
@staticmethod
def parse_precursor_charge(charge_text, list_only=False):
return aux._parse_charge(charge_text, list_only=list_only)
@staticmethod
def parse_pepmass_charge(pepmass_str):
split = pepmass_str.split()
if len(split) > 3:
raise aux.PyteomicsError('MGF format error: cannot parse '
'PEPMASS = {}'.format(pepmass_str))
elif len(split) == 3:
charge = split[2]
try:
pepmass = tuple(map(float, split[:2]))
except ValueError:
raise aux.PyteomicsError('MGF format error: cannot parse '
'PEPMASS = {}'.format(pepmass_str))
else:
pepmass = tuple(map(float, split[:2]))
pepmass = pepmass + (None,) * (2-len(pepmass))
charge = None
return pepmass, charge
@staticmethod
def parse_peak_charge(charge_text, list_only=False):
return aux._parse_charge(charge_text, list_only=False)
@staticmethod
def parse_peak_ion(ion_text):
return aux._parse_ion(ion_text)
@property
def header(self):
if self._header is None:
self._read_header()
return self._header
def _read_header_lines(self, header_lines):
header = {}
for line in header_lines:
if line.strip() == 'BEGIN IONS':
break
l = line.split('=')
if len(l) == 2:
key = l[0].lower()
val = l[1].strip()
header[key] = val
if 'charge' in header:
header['charge'] = self.parse_precursor_charge(header['charge'], True)
self._header = header
def _read_spectrum_lines(self, lines):
"""Read a single spectrum from ``self._source``.
Returns
-------
out : dict
"""
masses = []
intensities = []
charges = []
ions = []
params = self.header.copy() if self._use_header else {}
for i, line in enumerate(lines):
sline = line.strip()
if sline == 'BEGIN IONS':
if i == 0:
continue
else:
raise aux.PyteomicsError('Error when parsing MGF: unexpected start of spectrum.')
if not sline or sline[0] in self._comments:
pass
elif sline == 'END IONS':
if 'pepmass' in params:
params['pepmass'], charge = self.parse_pepmass_charge(params['pepmass'])
if charge is not None:
params['charge'] = charge
if isinstance(params.get('charge'), (str, bytes)):
params['charge'] = self.parse_precursor_charge(params['charge'], True)
if 'rtinseconds' in params:
params['rtinseconds'] = aux.unitfloat(params['rtinseconds'], 'second')
out = {'params': params, 'm/z array': masses, 'intensity array': intensities}
if self._read_charges:
out['charge array'] = charges
if self._read_ions:
out['ion array'] = ions
self._build_all_arrays(out)
if self.encoding and sys.version_info.major == 2:
for key, ukey in zip(self._array_keys + ['params'], self._array_keys_unicode + [u'params']):
if key in out:
out[ukey] = out.pop(key)
return out
else:
if '=' in sline: # spectrum-specific parameters!
l = sline.split('=', 1)
params[l[0].lower()] = l[1].strip()
else: # this must be a peak list
l = sline.split()
try:
masses.append(float(l[0]))
intensities.append(float(l[1]))
if self._read_charges:
charges.append(self.parse_peak_charge(l[2]) if len(l) > 2 else 0)
if self._read_ions:
ions.append(self.parse_peak_ion(l[2]) if len(l) > 2 else "")
except ValueError:
raise aux.PyteomicsError(
'Error when parsing %s. Line:\n%s' % (getattr(self._source, 'name', 'MGF file'), line))
except IndexError:
pass
def get_spectrum(self, title):
raise NotImplementedError()
@staticmethod
def _get_time(spectrum):
try:
return spectrum['params']['rtinseconds']
except KeyError:
raise aux.PyteomicsError('RT information not found.')
class IndexedMGF(MGFBase, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexSavingTextReader):
"""
A class representing an MGF file. Supports the `with` syntax and direct iteration for sequential
parsing. Specific spectra can be accessed by title using the indexing syntax in constant time.
If created using a file object, it needs to be opened in binary mode.
When iterated, :py:class:`IndexedMGF` object yields spectra one by one.
Each 'spectrum' is a :py:class:`dict` with five keys: 'm/z array',
'intensity array', 'charge array', 'ion array' and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,
'ion_array' is an array of Ions (str)
and 'params' stores a :py:class:`dict` of parameters (keys and values are
:py:class:`str`, keys corresponding to MGF, lowercased).
Attributes
----------
header : dict
The file header.
time : RTLocator
A property used for accessing spectra by retention time.
"""
delimiter = 'BEGIN IONS'
def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,
dtype=None, encoding='utf-8', index_by_scans=False, read_ions=False, _skip_index=False, **kwargs):
"""
Create an :py:class:`IndexedMGF` (binary-mode) reader for a given MGF file.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MGF format. Default is
:py:const:`None`, which means read standard input.
.. note :: If a file object is given, it must be opened in binary mode.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`True`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
read_ions : bool, optional
If `True` (default: False), fragment ion types are reported. Disabling it improves performance.
Note that right now, only one of (read_charges, read_ions) may be True.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array' and/or 'ion array'.
encoding : str, optional
File encoding.
block_size : int, optinal
Size of the chunk (in bytes) used to parse the file when creating the byte offset index.
Returns
-------
out : IndexedMGF
The reader object.
"""
self._index_by_scans = index_by_scans
self._read_ions = read_ions
self.label = r'SCANS=(\d+)\s*' if index_by_scans else r'TITLE=([^\n]*\S)\s*'
super(IndexedMGF, self).__init__(source, parser_func=self._read, pass_file=False, args=(), kwargs={},
use_header=use_header, convert_arrays=convert_arrays,
read_charges=read_charges,
dtype=dtype, encoding=encoding, read_ions=read_ions, _skip_index=_skip_index,
**kwargs)
def __reduce_ex__(self, protocol):
return (self.__class__,
(self._source_init, False, self._convert_arrays, self._read_charges,
None, self.encoding, self._index_by_scans, self._read_ions, True),
self.__getstate__())
@aux._keepstate_method
def _read_header(self):
try:
first = next(v for v in self._offset_index.values())[0]
except StopIteration: # the index is empty, no spectra in file
first = -1
header_lines = self.read(first).decode(self.encoding).split('\n')
return self._read_header_lines(header_lines)
def _item_from_offsets(self, offsets):
start, end = offsets
lines = self._read_lines_from_offsets(start, end)
return self._read_spectrum_lines(lines)
def _read(self, **kwargs):
for _, offsets in self._offset_index.items():
spectrum = self._item_from_offsets(offsets)
yield spectrum
def get_spectrum(self, key):
return self.get_by_id(key)
def _warn_empty(self):
text = ("{} object has an empty index for file {}. If this is unexpected, consider adjusting `label` or "
"setting `index_by_scans={}`.".format(
self.__class__.__name__, getattr(self._source, 'name', self._source_init), not self._index_by_scans))
warnings.warn(text)
class MGF(MGFBase, aux.FileReader):
"""
A class representing an MGF file. Supports the `with` syntax and direct iteration for sequential
parsing. Specific spectra can be accessed by title using the indexing syntax (if the file is seekable),
but it takes linear time to search through the file. Consider using :py:class:`IndexedMGF` for
constant-time access to spectra.
:py:class:`MGF` object behaves as an iterator, **yielding** spectra one by one.
Each 'spectrum' is a :py:class:`dict` with five keys: 'm/z array',
'intensity array', 'charge array', 'ion array' and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
'charge array' is a masked array (:py:class:`numpy.ma.MaskedArray`) of ints,
'ion_array' is a masked array of Ions (str)
and 'params' stores a :py:class:`dict` of parameters (keys and values are
:py:class:`str`, keys corresponding to MGF, lowercased).
Attributes
----------
header : dict
The file header.
"""
def __init__(self, source=None, use_header=True, convert_arrays=2, read_charges=True,
read_ions=False, dtype=None, encoding=None):
"""
Create an :py:class:`MGF` (text-mode) reader for a given MGF file.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MGF format. Default is
:py:const:`None`, which means read standard input.
..note :: If a file object is given, it must be opened in text mode.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`True`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
read_ions : bool, optional
If `True` (default: False), fragment ion types are reported. Disabling it improves performance.
Note that right now, only one of (read_charges, read_ions) may be True.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array' and/or 'ion array'.
encoding : str, optional
File encoding.
Returns
-------
out : MGF
The reader object.
"""
super(MGF, self).__init__(source, mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={},
encoding=encoding, use_header=use_header, convert_arrays=convert_arrays, read_charges=read_charges,
read_ions=read_ions, dtype=dtype)
@aux._keepstate_method
def _read_header(self):
return self._read_header_lines(self._source)
def _read_spectrum(self):
return self._read_spectrum_lines(self._source)
def _read(self):
for line in self._source:
if line.strip() == 'BEGIN IONS':
yield self._read_spectrum()
@aux._keepstate_method
def get_spectrum(self, title):
for line in self._source:
sline = line.strip()
if sline[:5] == 'TITLE' and sline.split('=', 1)[1].strip() == title:
spectrum = self._read_spectrum()
spectrum['params']['title'] = title
return spectrum
def __getitem__(self, key):
return self.get_spectrum(key)
def read(*args, **kwargs):
"""Returns a reader for a given MGF file. Most of the parameters repeat the
instantiation signature of :py:class:`MGF` and :py:class:`IndexedMGF`.
Additional parameter `use_index` helps decide which class to instantiate
for given `source`.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MGF format. Default is
:py:const:`None`, which means read standard input.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`True`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
read_charges : bool, optional
If `True` (default), fragment charges are reported. Disabling it improves performance.
read_ions : bool, optional
If `True` (default: False), fragment ion types are reported. Disabling it improves performance.
Note that right now, only one of (read_charges, read_ions) may be True.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array' and/or 'ion array'.
encoding : str, optional
File encoding.
use_index : bool, optional
Determines which parsing method to use. If :py:const:`True` (default), an instance of
:py:class:`IndexedMGF` is created. This facilitates random access by spectrum titles.
If an open file is passed as `source`, it needs to be open in binary mode.
If :py:const:`False`, an instance of :py:class:`MGF` is created. It reads
`source` in text mode and is suitable for iterative parsing. Access by spectrum title
requires linear search and thus takes linear time.
block_size : int, optinal
Size of the chunk (in bytes) used to parse the file when creating the byte offset index.
(Accepted only for :py:class:`IndexedMGF`.)
Returns
-------
out : MGFBase
Instance of :py:class:`MGF` or :py:class:`IndexedMGF`.
"""
if args:
source = args[0]
else:
source = kwargs.get('source')
use_index = kwargs.pop('use_index', None)
use_index = aux._check_use_index(source, use_index, True)
tp = IndexedMGF if use_index else MGF
return tp(*args, **kwargs)
def get_spectrum(source, title, *args, **kwargs):
"""Read one spectrum (with given `title`) from `source`.
See :py:func:`read` for explanation of parameters affecting the output.
.. note :: Only the key-value pairs after the "TITLE =" line will be included in the output.
Parameters
----------
source : str or file or None
File to read from.
title : str
Spectrum title.
*args
Given to :py:func:`read`.
**kwargs
Given to :py:func:`read`.
Returns
-------
out : dict or None
A dict with the spectrum, if it is found, and None otherwise.
"""
with read(source, *args, **kwargs) as f:
return f[title]
@aux._keepstate
def read_header(source):
"""
Read the specified MGF file, get search parameters specified in the header
as a :py:class:`dict`, the keys corresponding to MGF format (lowercased).
Parameters
----------
source : str or file
File name or file object representing an file in MGF format.
Returns
-------
header : dict
"""
with aux._file_obj(source, 'r') as source:
header = {}
for line in source:
if line.strip() == 'BEGIN IONS':
break
l = line.split('=')
if len(l) == 2:
key = l[0].lower()
val = l[1].strip()
header[key] = val
if 'charge' in header:
header['charge'] = aux._parse_charge(header['charge'], True)
return header
_default_key_order = ['title', 'pepmass', 'rtinseconds', 'charge']
def _pepmass_repr(k, pepmass):
outstr = k.upper() + '='
if not isinstance(pepmass, (str, int, float)): # assume iterable
try:
outstr += ' '.join(str(x) for x in pepmass if x is not None)
except TypeError:
raise aux.PyteomicsError('Cannot handle parameter: PEPMASS = {}'.format(pepmass))
else:
outstr += str(pepmass)
return outstr
def _charge_repr(k, charge):
try:
val = aux.Charge(charge)
except (TypeError, aux.PyteomicsError):
val = aux.ChargeList(charge)
return '{}={}'.format(k.upper(), val)
def _default_repr(key, val):
return '{}={}'.format(key.upper(), val)
_default_value_formatters = {'pepmass': _pepmass_repr, 'charge': _charge_repr}
@aux._file_writer()
def write(spectra, output=None, header='', key_order=_default_key_order, fragment_format=None,
write_charges=True, write_ions=False, use_numpy=None, param_formatters=_default_value_formatters):
"""
Create a file in MGF format.
Parameters
----------
spectra : iterable
A **sequence** of dictionaries with keys 'm/z array', 'intensity array',
and 'params'. 'm/z array' and 'intensity array' should be sequences of
:py:class:`int`, :py:class:`float`, or :py:class:`str`. Strings will
be written 'as is'. The sequences should be of equal length, otherwise
excessive values will be ignored.
'params' should be a :py:class:`dict` with keys corresponding to MGF
format. Keys must be strings, they will be uppercased and used as is,
without any format consistency tests. Values can be of any type allowing
string representation.
'charge array' or 'ion array' can also be specified.
.. note ::
Passing a single spectrum will work, but will trigger a warning. This usage pattern is discouraged.
To ensure correct output when writing multiple spectra,
it is recommended to construct a sequence of spectra first and then call :py:func:`write` once.
.. seealso ::
This discussion of usage patterns of :py:func:`write`: https://github.com/levitsky/pyteomics/discussions/109
output : str or file or None, optional
Path or a file-like object open for writing. If an existing file is
specified by file name, it will be opened for writing.
Default value is :py:const:`None`, which means using standard output.
.. note::
The default mode for output files specified by name has been changed
from `a` to `w` in *pyteomics 4.6*. See `file_mode` to override the mode.
header : dict or (multiline) str or list of str, optional
In case of a single string or a list of strings, the header will be
written 'as is'. In case of dict, the keys (must be strings) will be
uppercased.
write_charges : bool, optional
If :py:const:`False`, fragment charges from 'charge array' will not be written.
Default is :py:const:`True`.
write_ions : bool, optional
If :py:const:`False`, fragment ions from 'ion array' will not be written.
If :py:const:`True`, then `write_charges` is set to :py:const:`False`.
Default is :py:const:`False`.
fragment_format : str, optional
Format string for m/z, intensity and charge (or ion annotation) of a fragment. Useful to set
the number of decimal places, e.g.:
``fragment_format='%.4f %.0f'``. Default is ``'{} {} {}'``.
.. note::
The supported format syntax differs depending on other parameters.
If `use_numpy` is :py:const:`True` and :py:mod:`numpy` is available,
fragment peaks will be written using :py:func:`numpy.savetxt`. Then,
`fragment_format` must be recognized by that function.
Otherwise, plain Python string formatting is done.
See `the docs
<https://docs.python.org/library/string.html#format-specification-mini-language>`_
for details on writing the format string.
If some or all charges are missing, an empty string is substituted
instead, so formatting as :py:class:`!float` or :py:class:`!int` will raise an exception.
Hence it is safer to just use ``{}`` for charges.
key_order : list, optional
A list of strings specifying the order in which params will be written in
the spectrum header. Unlisted keys will be in arbitrary order.
Default is :py:data:`_default_key_order`.
.. note:: This does not affect the order of lines in the global header.
param_formatters : dict, optional
A dict mapping parameter names to functions. Each function must accept
two arguments (key and value) and return a string.
Default is :py:data:`_default_value_formatters`.
use_numpy : bool, optional
Controls whether fragment peak arrays are written using :py:func:`numpy.savetxt`.
Using :py:func:`numpy.savetxt` is faster, but cannot handle sparse arrays of fragment charges.
You may want to disable this if you need to save spectra with 'charge arrays' with missing values.
If not specified, will be set to the opposite of `write_chrages`.
If :py:mod:`numpy` is not available, this parameter has no effect.
file_mode : str, keyword only, optional
If `output` is a file name, defines the mode the file will be opened in.
Otherwise will be ignored. Default is `'w'`.
.. note ::
The default changed from `'a'` in *pyteomics 4.6*.
encoding : str, keyword only, optional
Output file encoding (if `output` is specified by name).
Returns
-------
output : file
"""
def key_value_line(key, val):
return param_formatters.get(key, _default_repr)(key, val) + '\n'
nones = (None, np.nan, np.ma.masked) if np is not None else (None,)
if fragment_format is None:
fragment_format = '{} {} {}'
np_format_2 = '%.5f %.1f'
np_format_3 = '%.5f %.1f %d'
np_format_i = '%.5f %.1f %s'
else:
np_format_2 = np_format_3 = np_format_i = fragment_format
format_str = fragment_format + '\n'
if write_ions:
write_charges = False
if use_numpy is None:
use_numpy = not write_charges
if isinstance(header, dict):
head_dict = header.copy()
head_lines = [key_value_line(k, v) for k, v in header.items()]
head_str = '\n'.join(head_lines)
else:
if isinstance(header, str):
head_str = header
head_lines = header.split('\n')
else:
head_lines = list(header)
head_str = '\n'.join(header)
head_dict = {}
for line in head_lines:
if not line.strip() or any(line.startswith(c) for c in MGF._comments):
continue
l = line.split('=')
if len(l) == 2:
head_dict[l[0].lower()] = l[1].strip()
if head_str:
output.write(head_str + '\n\n')
if isinstance(spectra, dict) and 'm/z array' in spectra:
spectra = (spectra, )
warnings.warn("Passing a single spectrum to `write()` is discouraged. "
"To write a set of spectra, pass them to `write()` all at once. "
"For more info, see: https://github.com/levitsky/pyteomics/discussions/109.")
for spectrum in spectra:
output.write('BEGIN IONS\n')
found = set()
for key in it.chain(key_order, spectrum['params']):
if key not in found and key in spectrum['params']:
found.add(key)
val = spectrum['params'][key]
if val != head_dict.get(key):
output.write(key_value_line(key, val))
try:
success = True
if np is not None and use_numpy:
if (not write_charges or 'charge array' not in spectrum) and (not write_ions or 'ion array' not in spectrum):
X = np.empty((len(spectrum['m/z array']), 2))
X[:, 0] = spectrum['m/z array']
X[:, 1] = spectrum['intensity array']
np.savetxt(output, X, fmt=np_format_2)
elif isinstance(spectrum.get('charge array'), np.ndarray):
X = np.empty((len(spectrum['m/z array']), 3))
X[:, 0] = spectrum['m/z array']
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/mzml.py | pyteomics/mzml.py | """
mzml - reader for mass spectrometry data in mzML format
=======================================================
Summary
-------
mzML is a standard rich XML-format for raw mass spectrometry data storage.
Please refer to `psidev.info <http://www.psidev.info/index.php?q=node/257>`_
for the detailed specification of the format and structure of mzML files.
This module provides a minimalistic way to extract information from mzML
files. You can use the old functional interface (:py:func:`read`) or the new
object-oriented interface (:py:class:`MzML` or :py:class:`PreIndexedMzML`)
to iterate over entries in ``<spectrum>`` elements.
:py:class:`MzML` and :py:class:`PreIndexedMzML` also support direct indexing
with spectrum IDs.
Data access
-----------
:py:class:`MzML` - a class representing a single mzML file.
Other data access functions use this class internally.
:py:class:`PreIndexedMzML` - a class representing a single mzML file.
Uses byte offsets listed at the end of the file for quick access to spectrum elements.
:py:func:`read` - iterate through spectra in mzML file. Data from a
single spectrum are converted to a human-readable dict. Spectra themselves are
stored under 'm/z array' and 'intensity array' keys.
:py:func:`chain` - read multiple mzML files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
Controlled Vocabularies and Caching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mzML relies on controlled vocabularies to describe its contents extensibly.
Every :py:class:`MzML` needs a copy of PSI-MS CV, which it handles using the :py:mod:`psims` library.
If you want to save time when creating instances of :py:class:`MzML`, consider enabling the :py:mod:`psims` cache.
See `psims documentation <https://mobiusklein.github.io/psims/docs/build/html/controlled_vocabulary/controlled_vocabulary.html#caching>`_
on how to enable and configure the cache (alternatively, you can handle CV creation yourself and pass a pre-created instance
using the `cv` parameter to :py:class:`MzML`).
See also
`Controlled Vocabulary Terms <../data.html#controlled-vocabulary-terms-in-structured-data>`_
for more details on how they are used.
Handling Time Units and Other Qualified Quantities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mzML contains information which may be described as using a variety of different time units.
See `Unit Handling <../data.html#unit-handling>`_ for more information.
Deprecated functions
--------------------
:py:func:`version_info` - get version information about the mzML file.
You can just read the corresponding attribute of the :py:class:`MzML` object.
:py:func:`iterfind` - iterate over elements in an mzML file.
You can just call the corresponding method of the :py:class:`MzML` object.
Dependencies
------------
This module requires :py:mod:`lxml`, :py:mod:`numpy` and :py:mod:`psims`.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
import numpy as np
from . import xml, auxiliary as aux, _schema_defaults
from .xml import etree
NON_STANDARD_DATA_ARRAY = 'non-standard data array'
STANDARD_ARRAYS = set([
'm/z array',
'intensity array',
'charge array',
'signal to noise array',
'time array',
'wavelength array',
'flow rate array',
'pressure array',
'temperature array',
'mean charge array',
'resolution array',
'baseline array',
'noise array',
'sampled noise m/z array',
'sampled noise intensity array',
'sampled noise baseline array',
'ion mobility array',
'deconvoluted ion mobility drift time array',
'deconvoluted inverse reduced ion mobility array',
'deconvoluted ion mobility array',
'raw ion mobility drift time array',
'raw inverse reduced ion mobility array',
'raw ion mobility array',
'mean inverse reduced ion mobility array',
'mean ion mobility array',
'mean ion mobility drift time array',
'mass array',
'scanning quadrupole position lower bound m/z array',
'scanning quadrupole position upper bound m/z array',
])
class MzML(aux.BinaryArrayConversionMixin, xml.CVParamParser, aux.TimeOrderedIndexedReaderMixin, xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for mzML files."""
file_format = 'mzML'
_root_element = 'mzML'
_default_schema = _schema_defaults._mzml_schema_defaults
_default_version = '1.1.0'
_default_iter_tag = 'spectrum'
_structures_to_flatten = {'binaryDataArrayList', 'referenceableParamGroupRef'}
_indexed_tags = {'spectrum', 'chromatogram'}
def __init__(self, *args, **kwargs):
self.decode_binary = kwargs.pop('decode_binary', True)
self._referenceable_param_groups = {}
super(MzML, self).__init__(*args, **kwargs)
def __getstate__(self):
state = super(MzML, self).__getstate__()
state['decode_binary'] = self.decode_binary
return state
def __setstate__(self, state):
super(MzML, self).__setstate__(state)
self.decode_binary = state['decode_binary']
def _handle_referenceable_param_group(self, param_group_ref, **kwargs):
ref_name = param_group_ref.attrib['ref']
if ref_name not in self._referenceable_param_groups:
params = self._referenceable_param_groups[ref_name] = self._retrieve_param_group(ref_name)
return params
return self._referenceable_param_groups[ref_name]
@xml._keepstate
def _retrieve_param_group(self, ref_name):
group = self.get_by_id(ref_name)
group.pop("id", None)
return [xml._XMLParam(k, v, None) for k, v in group.items()]
def _detect_array_name(self, info):
"""Determine what the appropriate name for this
array is by inspecting the available param-based
keys.
Parameters
----------
info : dict
The collapsed binary tag plus
associated *Param data
Returns
-------
out : str
The name for this array entry
"""
# If this is a non-standard array, we hope the userParams
# will conform to the same array suffix pattern.
is_non_standard = False
# Accumulate possible name candidates
candidates = []
for k in info:
if k.endswith(' array') and not info[k]:
if NON_STANDARD_DATA_ARRAY == k:
is_non_standard = True
else:
candidates.append(k)
# A non-standard data array term key might have the name for the data array
# as the value.
nonstandard_name = info.get(NON_STANDARD_DATA_ARRAY)
if nonstandard_name:
return nonstandard_name
if isinstance(info.get('name'), list):
for val in info['name']:
if val.endswith(' array'):
if NON_STANDARD_DATA_ARRAY == val:
is_non_standard = True
else:
candidates.append(val)
# Name candidate resolution
n_candidates = len(candidates)
# Easy case, exactly one name given
if n_candidates == 1:
return candidates[0]
# We are missing information, but at least
# if we know the array is non-standard we
# can report it as such. Otherwise fall back
# to "binary". This fallback signals special
# behavior elsewhere.
if n_candidates == 0:
invalid = {"encodedLength", "dataProcessingRef", "arrayLength",
"binary"}
for k in info:
if k in invalid:
continue
candidates.append(k)
if len(candidates) == 0:
if is_non_standard:
return NON_STANDARD_DATA_ARRAY
warnings.warn("No options for non-standard data array")
return "binary"
else:
warnings.warn(
"Multiple options for naming binary array after no valid name found: %r" % candidates)
return max(candidates, key=len)
# Multiple choices means we need to make a decision which could
# mask data from the user. This should never happen but stay safe.
# There are multiple options to choose from. There is no way to
# make a good choice here. We first prefer the standardized
# arrays before falling back to just guessing.
else:
candidates = set(candidates)
# Maybe we just have a repeated term?
if len(candidates) == 1:
return next(iter(candidates))
warnings.warn(
"Multiple options for naming binary array: %r" % candidates)
standard_options = candidates & STANDARD_ARRAYS
if standard_options:
return max(standard_options, key=len)
return max(candidates, key=len)
def _determine_array_dtype(self, info):
dtype = None
types = {'32-bit float': np.float32, '64-bit float': np.float64,
'32-bit integer': np.int32, '64-bit integer': np.int64,
'null-terminated ASCII string': np.uint8}
for t, code in types.items():
if t in info:
dtype = code
del info[t]
break
# sometimes it's under 'name'
else:
if 'name' in info:
for t, code in types.items():
if t in info['name']:
dtype = code
info['name'].remove(t)
break
return dtype
def _determine_compression(self, info):
known_compression_types = set(self.compression_type_map)
found_compression_types = known_compression_types & set(info)
if found_compression_types:
found_compression_types = tuple(found_compression_types)
if len(found_compression_types) == 1:
del info[found_compression_types[0]]
return found_compression_types[0]
warnings.warn("Multiple options for binary array compression: %r" % (
found_compression_types,))
return found_compression_types[0]
elif "name" in info:
found_compression_types = known_compression_types & set(info['name'])
if found_compression_types:
found_compression_types = tuple(found_compression_types)
if len(found_compression_types) == 1:
del info['name'][found_compression_types[0]]
return found_compression_types[0]
else:
warnings.warn("Multiple options for binary array compression: %r" % (
found_compression_types,))
return found_compression_types[0]
else:
return 'no compression'
def _handle_binary(self, info, **kwargs):
"""Special handling when processing and flattening
a <binary> tag and its sibling *Param tags.
Parameters
----------
info : dict
Unprocessed binary array data and metadata
Returns
-------
out : dict
The processed and flattened data array and metadata
"""
dtype = self._determine_array_dtype(info)
compressed = self._determine_compression(info)
name = self._detect_array_name(info)
binary = info.pop('binary')
if not self.decode_binary:
info[name] = self._make_record(binary, compressed, dtype, name)
return info
if binary:
array = self.decode_data_array(binary, compressed, dtype)
else:
array = np.array([], dtype=dtype)
if name == 'binary':
info[name] = self._convert_array(None, array)
else:
info = {name: self._convert_array(name, array)}
return info
def _get_info_smart(self, element, **kw):
name = xml._local_name(element)
kwargs = dict(kw)
default_rec = (name not in {'indexedmzML', 'mzML'})
rec = kwargs.pop('recursive', None)
if rec is None:
rec = default_rec
info = self._get_info(element, recursive=rec, **kwargs)
if 'binary' in info and isinstance(info, dict):
info = self._handle_binary(info, **kwargs)
if 'binaryDataArray' in info and isinstance(info, dict):
for array in info.pop('binaryDataArray'):
info.update(array)
intkeys = {'ms level'}
for k in intkeys:
if k in info:
try:
info[k] = int(info[k])
except (ValueError, TypeError):
pass
return info
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in _ref. Removes the id attribute from `info`"""
for k, v in dict(info).items():
if k == 'ref':
by_id = self.get_by_id(v, retrieve_refs=True)
if by_id is None:
warnings.warn('Ignoring unresolved reference: ' + v)
else:
info.update(by_id)
del info[k]
info.pop('id', None)
@staticmethod
def _get_time(scan):
return scan['scanList']['scan'][0]['scan start time']
def read(source, read_schema=False, iterative=True, use_index=False, dtype=None, huge_tree=False, decode_binary=True, cv=None):
"""Parse `source` and iterate through spectra.
Parameters
----------
source : str or file
A path to a target mzML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
spectrum elements. Default is :py:const:`False`.
dtype : type or dict, optional
dtype to convert arrays to, one for both m/z and intensity arrays or one for each key.
If :py:class:`dict`, keys should be 'm/z array' and 'intensity array'.
decode_binary : bool, optional
Defines whether binary data should be decoded and included in the output
(under "m/z array", "intensity array", etc.).
Default is :py:const:`True`.
huge_tree : bool, optional
This option is passed to the `lxml` parser and defines whether
security checks for XML tree depth and node size should be disabled.
Default is :py:const:`False`.
Enable this option for trusted files to avoid XMLSyntaxError exceptions
(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).
cv : psims.controlled_vocabulary.controlled_vocabulary.ControlledVocabulary, optional
An instance of PSI-MS CV. If provided, the parser will use it for type checking.
Otherwise, a CV will be loaded from the Internet or from cache, if it is configured.
.. seealso ::
See `psims documentation <https://mobiusklein.github.io/psims/docs/build/html/controlled_vocabulary/controlled_vocabulary.html#caching>`_
about cache configuration.
Returns
-------
out : iterator
An iterator over the dicts with spectrum properties.
"""
return MzML(source, read_schema=read_schema, iterative=iterative,
use_index=use_index, dtype=dtype, huge_tree=huge_tree,
decode_binary=decode_binary)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`MzML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default
parameters. Not recommended without Internet connection or
if you don't like to get the related warnings.
decode_binary : bool, optional
Defines whether binary data should be decoded and included in the output
(under "m/z array", "intensity array", etc.).
Default is :py:const:`True`.
Returns
-------
out : iterator
"""
return MzML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(MzML)
# chain = aux._make_chain(read, 'read')
chain = aux.ChainBase._make_chain(MzML)
class PreIndexedMzML(MzML):
"""Parser class for mzML files, subclass of :py:class:`MzML`.
Uses byte offsets listed at the end of the file for quick access to spectrum elements.
"""
def build_byte_index(self):
"""
Build up a :class:`HierarchicalOffsetIndex` of offsets for elements. Calls :meth:`_find_index_list` or
falls back on regular :class:`MzML` indexing.
Returns
-------
out : HierarchicalOffsetIndex
"""
index = self._find_index_list()
if index:
return index
else:
warnings.warn('Could not extract the embedded offset index. Falling back to default indexing procedure.')
return super(PreIndexedMzML, self).build_byte_index()
@xml._keepstate
def _iterparse_index_list(self, offset):
index_map = xml.HierarchicalOffsetIndex()
index = index_map._inner_type()
self._source.seek(offset)
try:
for event, elem in etree.iterparse(self._source, events=('start', 'end'), remove_comments=True):
if event == 'start':
if elem.tag == 'index':
index = {}
index_map[elem.attrib['name']] = index
else:
if elem.tag == 'offset':
index[elem.attrib['idRef']] = int(elem.text)
elem.clear()
except etree.XMLSyntaxError:
# The iteration has reached the end of the indexList tag and the parser
# encounters the later elements in the document.
pass
return index_map
@xml._keepstate
def _find_index_list_offset(self):
"""
Search relative to the bottom of the file upwards to find the offsets
of the index lists.
Returns
-------
list of int
A list of byte offsets for `<indexList>` elements
"""
self._source.seek(-1024, 2)
text = self._source.read(1024)
index_offsets = list(map(int, re.findall(br'<indexListOffset>(\d+)</indexListOffset>', text)))
return index_offsets
@xml._keepstate
def _find_index_list(self):
"""
Extract lists of index offsets from the end of the file.
Returns
-------
dict of str -> dict of str -> int
"""
offsets = self._find_index_list_offset()
index_list = xml.HierarchicalOffsetIndex()
for offset in offsets:
# Sometimes the offset is at the very beginning of the file,
# due to a bug in an older version of ProteoWizard. If this crude
# check fails, don't bother searching the entire file, and fall back
# on the base class's mechanisms.
#
# Alternative behavior here would be to start searching for the start
# of the index from the bottom of the file, but this version of Proteowizard
# also emits invalid offsets which do not improve retrieval time.
if offset < 1024:
continue
index_list = self._iterparse_index_list(offset)
return index_list
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/ms1.py | pyteomics/ms1.py | """
ms1 - read and write MS/MS data in MS1 format
=============================================
Summary
-------
`MS1 <http://dx.doi.org/10.1002/rcm.1603>`_ is a simple
human-readable format for MS1 data. It allows storing MS1 peak lists and exprimental parameters.
This module provides minimalistic infrastructure for access to data stored in MS1 files.
Two main classes are :py:class:`MS1`, which provides an iterative, text-mode parser,
and :py:class:`IndexedMS1`, which is a binary-mode parser that supports random access using scan IDs
and retention times.
The function :py:func:`read` helps dispatch between the two classes.
Also, common parameters can be read from MS1 file header with :py:func:`read_header` function.
Classes
-------
:py:class:`MS1` - a text-mode MS1 parser. Suitable to read spectra from a file consecutively.
Needs a file opened in text mode (or will open it if given a file name).
:py:class:`IndexedMS1` - a binary-mode MS1 parser. When created, builds a byte offset index
for fast random access by spectrum ID. Sequential iteration is also supported.
Needs a seekable file opened in binary mode (if created from existing file object).
:py:class:`MS1Base` - abstract class, the common ancestor of the two classes above.
Can be used for type checking.
Functions
---------
:py:func:`read` - an alias for :py:class:`MS1` or :py:class:`IndexedMS1`.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`read_header` - get a dict with common parameters for all spectra
from the beginning of MS1 file.
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import auxiliary as aux
try:
import numpy as np
except ImportError:
np = None
class MS1Base(aux.ArrayConversionMixin):
"""Abstract class representing an MS1 file. Subclasses implement different approaches to parsing."""
_array_keys = ['m/z array', 'intensity array']
_float_keys = ['RTime', 'RetTime']
def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None, **kwargs):
"""
Create an instance of a :py:class:`MS1Base` parser.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS1 format. Default is
:py:const:`None`, which means read standard input.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`False`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array'.
encoding : str, optional
File encoding.
"""
super(MS1Base, self).__init__(source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype, encoding=encoding, **kwargs)
if convert_arrays and np is None:
raise aux.PyteomicsError('numpy is required for array conversion')
self._use_header = use_header
if use_header:
self._header = self._read_header()
else:
self._header = None
self._source_name = getattr(source, 'name', str(source))
def reset(self):
super(MS1Base, self).reset()
self._pending_line = None
@property
def header(self):
return self._header
def _read_header_lines(self, lines):
header = {}
for line in lines:
if line[0] != 'H':
break
tokens = line.split('\t', 2)
if len(tokens) < 3:
tokens = line.split(None, 2)
key = tokens[1]
val = tokens[2].strip()
header[key] = val
return header
def _make_scan(self, info):
for key in self._float_keys:
if key in info['params']:
info['params'][key] = float(info['params'][key])
self._build_all_arrays(info)
return info
def _handle_S(self, line, sline, params):
sline = line.strip().split(None, 3)
params['scan'] = tuple(sline[1:3])
if len(sline) == 4: # in MS2 the S line contains the precursor m/z as a 4th column
params['precursor m/z'] = float(sline[3])
def _handle_I(self, line, sline, params):
params[sline[1]] = sline[2] if len(sline) > 2 else ''
def _handle_Z(self, line, sline, params):
params.setdefault('charge', []).append(float(sline[1]))
params.setdefault('neutral mass', []).append(float(sline[2]))
def _handle_D(self, line, sline, params):
params.setdefault('analyzer', []).append(sline[1:])
def _handle_peak(self, line, sline, info):
try:
info['m/z array'].append(float(sline[0])) # this may cause
info['intensity array'].append(float(sline[1])) # exceptions...
except ValueError:
raise aux.PyteomicsError(
'Error when parsing %s. Line: %s' % (self._source_name, line))
except IndexError:
pass
def _read_spectrum_lines(self, lines):
params = {}
info = {'params': params}
for k in self._array_keys:
info[k] = []
if self._use_header:
params.update(self.header)
if self._pending_line:
reading_spectrum = True
self._handle_S(self._pending_line, None, params)
else:
reading_spectrum = False
line_count = 0
for i, line in enumerate(lines):
line_count = i
sline = line.strip().split(None, 2)
if not sline:
continue
if not reading_spectrum:
if sline[0] == 'S':
reading_spectrum = True
self._handle_S(line, sline, params)
# otherwise we are not interested; do nothing, just move along
else:
if not sline:
pass
elif sline[0] == 'S':
self._pending_line = line
return self._make_scan(info)
else:
if sline[0] == 'I': # spectrum-specific parameters!
self._handle_I(line, sline, params)
elif sline[0] == 'Z': # MS2-specific charge state guess
self._handle_Z(line, sline, params)
elif sline[0] == 'D': # MS2-specific analyzer annotation
self._handle_D(line, sline, params)
else: # this must be a peak list
self._handle_peak(line, sline, info)
self._pending_line = None
if line_count == 0:
return
return self._make_scan(info)
def __getstate__(self):
state = super(MS1Base, self).__getstate__()
state['use_header'] = self._use_header
state['header'] = self._header
return state
def __setstate__(self, state):
super(MS1Base, self).__setstate__(state)
self._use_header = state['use_header']
self._header = state['header']
def __reduce_ex__(self, protocol):
return (self.__class__,
(self._source_init, False, self._convert_arrays, None, self.encoding),
self.__getstate__())
class MS1(MS1Base, aux.FileReader):
"""
A class representing an MS1 file. Supports the `with` syntax and direct iteration for sequential
parsing.
:py:class:`MS1` object behaves as an iterator, **yielding** spectra one by one.
Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',
'intensity array', and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
and 'params' stores a :py:class:`dict` of parameters.
Attributes
----------
header : dict
The file header.
"""
def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding=None, **kwargs):
"""
Create an :py:class:`MS1` (text-mode) reader for a given MS1 file.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS1 format. Default is
:py:const:`None`, which means read standard input.
.. note :: If a file object is given, it must be opened in text mode.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`False`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array'.
encoding : str, optional
File encoding.
Returns
-------
out : MS1
The reader object.
"""
super(MS1, self).__init__(source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype, encoding=encoding,
mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={})
@aux._keepstate_method
def _read_header(self):
return self._read_header_lines(self._source)
def _read(self):
def get_next_spectrum():
return self._read_spectrum_lines(self._source)
for spectrum in iter(get_next_spectrum, None):
yield spectrum
class IndexedMS1(MS1Base, aux.TaskMappingMixin, aux.TimeOrderedIndexedReaderMixin, aux.IndexedTextReader):
"""
A class representing an MS1 file. Supports the `with` syntax and direct iteration for sequential
parsing. Specific spectra can be accessed by title using the indexing syntax in constant time.
If created using a file object, it needs to be opened in binary mode.
When iterated, :py:class:`IndexedMS1` object yields spectra one by one.
Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array', 'intensity array' and 'params'.
'm/z array' and 'intensity array' store :py:class:`numpy.ndarray`'s of floats,
and 'params' stores a :py:class:`dict` of parameters (keys and values are
:py:class:`str`, keys corresponding to MS1).
.. warning ::
Labels for scan objects are constructed as the first number in the S line, as follows:
for a line ``S 0 1`` the label is `'0'`. If these labels are not unique
for the scans in the file, the indexed parser will not work correctly. Consider using
:py:class:`MS1` instead.
Attributes
----------
header : dict
The file header.
time : RTLocator
A property used for accessing spectra by retention time.
"""
delimiter = '\nS'
label = r'^[\n]?S\s+(\S+)'
def __init__(self, source=None, use_header=False, convert_arrays=True, dtype=None, encoding='utf-8', _skip_index=False, **kwargs):
"""
Create an :py:class:`IndexedMS1` (binary-mode) reader for a given MS1 file.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS1 format. Default is
:py:const:`None`, which means read standard input.
.. note :: If a file object is given, it must be opened in binary mode.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`True`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array', 'intensity array', 'charge array'.
encoding : str, optional
File encoding.
block_size : int, optinal
Size of the chunk (in bytes) used to parse the file when creating the byte offset index.
Returns
-------
out : IndexedMS1
The reader object.
"""
super(IndexedMS1, self).__init__(source, use_header=use_header, convert_arrays=convert_arrays, dtype=dtype, encoding=encoding,
parser_func=self._read, pass_file=False, args=(), kwargs={}, _skip_index=_skip_index, **kwargs)
def __reduce_ex__(self, protocol):
return (self.__class__,
(self._source_init, False, self._convert_arrays, None, self.encoding, True),
self.__getstate__())
@aux._keepstate_method
def _read_header(self):
try:
first = next(v for v in self._offset_index.values())[0]
except StopIteration: # the index is empty, no spectra in file
first = -1
header_lines = self.read(first).decode(self.encoding).split('\n')
return self._read_header_lines(header_lines)
def _item_from_offsets(self, offsets):
start, end = offsets
lines = self._read_lines_from_offsets(start, end)
return self._read_spectrum_lines(lines)
def _read(self, **kwargs):
for _, offsets in self._offset_index.items():
spectrum = self._item_from_offsets(offsets)
yield spectrum
def get_spectrum(self, key):
return self.get_by_id(key)
def _get_time(self, spectrum):
try:
return spectrum['params']['RTime']
except KeyError:
raise aux.PyteomicsError('RT information not found.')
def read_header(source, *args, **kwargs):
"""
Read the specified MS1 file, get the parameters specified in the header
as a :py:class:`dict`.
Parameters
----------
source : str or file
File name or file object representing an file in MS1 format.
Returns
-------
header : dict
"""
kwargs['use_header'] = True
return read(source, *args, **kwargs).header
def read(*args, **kwargs):
"""Read an MS1 file and return entries iteratively.
Read the specified MS1 file, **yield** spectra one by one.
Each 'spectrum' is a :py:class:`dict` with three keys: 'm/z array',
'intensity array', and 'params'. 'm/z array' and
'intensity array' store :py:class:`numpy.ndarray`'s of floats,
and 'params' stores a :py:class:`dict` of parameters.
Parameters
----------
source : str or file or None, optional
A file object (or file name) with data in MS1 format. Default is
:py:const:`None`, which means read standard input.
use_header : bool, optional
Add the info from file header to each dict. Spectrum-specific parameters
override those from the header in case of conflict.
Default is :py:const:`False`.
convert_arrays : one of {0, 1, 2}, optional
If `0`, m/z, intensities and (possibly) charges will be returned as regular lists.
If `1`, they will be converted to regular :py:class:`numpy.ndarray`'s.
If `2`, charges will be reported as a masked array (default).
The default option is the slowest. `1` and `2` require :py:mod:`numpy`.
dtype : type or str or dict, optional
dtype argument to :py:mod:`numpy` array constructor, one for all arrays or one for each key.
Keys should be 'm/z array' and/or 'intensity array'.
encoding : str, optional
File encoding.
use_index : bool, optional
Determines which parsing method to use. If :py:const:`True`, an instance of
:py:class:`IndexedMS1` is created. This facilitates random access by scan titles.
If an open file is passed as `source`, it needs to be open in binary mode.
If :py:const:`False` (default), an instance of :py:class:`MS1` is created. It reads
`source` in text mode and is suitable for iterative parsing.
.. warning ::
Labels for scan objects are constructed as the first number in the S line, as follows:
for a line ``S 0 1`` the label is `'0'`. If these labels are not unique
for the scans in the file, the indexed parser will not work correctly.
block_size : int, optinal
Size of the chunk (in bytes) used to parse the file when creating the byte offset index.
(Accepted only for :py:class:`IndexedMS1`.)
Returns
-------
out : :py:class:`MS1Base`
An instance of :py:class:`MS1` or :py:class:`IndexedMS1`, depending on `use_index` and `source`.
"""
if args:
source = args[0]
else:
source = kwargs.get('source')
use_index = kwargs.pop('use_index', None)
use_index = aux._check_use_index(source, use_index, False)
tp = IndexedMS1 if use_index else MS1
return tp(*args, **kwargs)
chain = aux._make_chain(read, 'read')
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/achrom.py | pyteomics/achrom.py | """
achrom - additive model of polypeptide chromatography
=====================================================
Summary
-------
The additive model of polypeptide chromatography, or achrom, is the most basic
model for peptide retention time prediction. The main equation behind
achrom has the following form:
.. math::
RT = (1 + m\\,ln N) \\sum_{i=1}^{i=N}{RC_i n_i} + RT_0
Here, :math:`RC_i` is the retention coefficient of the amino acid
residues of the i-th type, :math:`n_i` corresponds to the number of amino acid
residues of type :math:`i` in the peptide sequence, N is the total number of
different *types* of amino acid residues present,
and :math:`RT_0` is a constant retention time shift.
In order to use achrom, one needs to find the retention
coeffcients, using experimentally determined retention times for a training set
of peptide retention times, i.e. to *calibrate* the model.
Calibration
-----------
:py:func:`get_RCs` - find a set of retention coefficients using a
given set of peptides with known retention times and a fixed value of
length correction parameter.
:py:func:`get_RCs_vary_lcp` - find the best length correction parameter
and a set of retention coefficients for a given peptide sample.
Retention time calculation
--------------------------
:py:func:`calculate_RT` - calculate the retention time of a peptide
using a given set of retention coefficients.
Data
----
:py:data:`RCs_guo_ph2_0` - a set of retention coefficients (RCs)
from [#Guo1]_. Conditions: Synchropak RP-P C18 column (250 x 4.1 mm
I.D.), gradient (A = 0.1% aq. TFA, pH 2.0; B = 0.1% TFA in acetonitrile) at
1% B/min, flow rate 1 ml/min, 26 centigrades.
:py:data:`RCs_guo_ph7_0` - a set of retention coefficients (RCs)
from [#Guo1]_. Conditions: Synchropak RP-P C18 column (250 x 4.1 mm
I.D.), gradient (A = aq. 10 mM (NH4)2HPO4 - 0.1 M NaClO4, pH 7.0; B
= 0.1 M NaClO4 in 60% aq. acetonitrile) at 1.67% B/min, flow rate 1
ml/min, 26 centigrades.
:py:data:`RCs_meek_ph2_1` - a set of RCs from [#Meek]_. Conditions: Bio-Rad
"ODS" column, gradient (A = 0.1 M NaClO4, 0.1% phosphoric acid in
water; B = 0.1 M NaClO4, 0.1% phosphoric acid in 60%
aq. acetonitrile) at 1.25% B/min, room temperature.
:py:data:`RCs_meek_ph7_4` - a set of RCs from [#Meek]_. Conditions: Bio-Rad
"ODS" column, gradient (A = 0.1 M NaClO4, 5 mM phosphate buffer in
water; B = 0.1 M NaClO4, 5 mM phosphate buffer in 60%
aq. acetonitrile) at 1.25% B/min, room temperature.
:py:data:`RCs_browne_tfa` - a set of RCs found in
[#Browne]_. Conditions: Waters mjuBondapak C18 column, gradient (A =
0.1% aq. TFA, B = 0.1% TFA in acetonitrile) at 0.33% B/min, flow
rate 1.5 ml/min.
:py:data:`RCs_browne_hfba` - a set of RCs found in
[#Browne]_. Conditions: Waters mjuBondapak C18 column, gradient (A =
0.13% aq. HFBA, B = 0.13% HFBA in acetonitrile) at 0.33% B/min, flow
rate 1.5 ml/min.
:py:data:`RCs_palmblad` - a set of RCs from
[#Palmblad]_. Conditions: a fused silica column (80-100 x 0.200 mm
I.D.) packed in-house with C18 ODS-AQ; solvent A = 0.5% aq. HAc,
B = 0.5% HAc in acetonitrile.
:py:data:`RCs_yoshida` - a set of RCs for normal phase chromatography
from [#Yoshida]_. Conditions:
TSK gel Amide-80 column (250 x 4.6 mm I.D.), gradient (A = 0.1% TFA
in ACN-water (90:10); B = 0.1% TFA in ACN-water (55:45)) at 0.6%
water/min, flow rate 1.0 ml/min, 40 centigrades.
:py:data:`RCs_yoshida_lc` - a set of length-corrected RCs for normal phase
chromatography. The set was calculated in [#Moskovets]_ for the data from
[#Yoshida]_.
Conditions:
TSK gel Amide-80 column (250 x 4.6 mm I.D.), gradient (A = 0.1% TFA
in ACN-water (90:10); B = 0.1% TFA in ACN-water (55:45)) at 0.6%
water/min, flow rate 1.0 ml/min, 40 centigrades.
:py:data:`RCs_zubarev` - a set of length-corrected RCs calculated
on a dataset used in [#Goloborodko]_.
Conditions: Reprosil-Pur C18-AQ column (150 x 0.075 mm I.D.), gradient (A =
0.5% AA in water; B = 0.5% AA in ACN-water (90:10)) at
0.5% water/min, flow rate 200.0 nl/min, room temperature.
:py:data:`RCs_gilar_atlantis_ph3_0` - a set of retention coefficients obtained
in [#Gilar]_.
Conditions: Atlantis HILIC silica column, (150 x 2.1 mm I.D.), 3 um, 100 A,
gradient (A = water, B = ACN, C = 200 mM ammonium formate):
0 min, 5% A, 90% B, 5% C; 62.5 min, 55% A, 40% B, 5% C
at 0.2 ml/min, temperature 40 C, pH 3.0
:py:data:`RCs_gilar_atlantis_ph4_5` - a set of retention coefficients obtained
in [#Gilar]_.
Conditions: Atlantis HILIC silica column, (150 x 2.1 mm I.D.), 3 um, 100 A,
gradient (A = water, B = ACN, C = 200 mM ammonium formate):
0 min, 5% A, 90% B, 5% C; 62.5 min, 55% A, 40% B, 5% C
at 0.2 ml/min, temperature 40 C, pH 4.5
:py:data:`RCs_gilar_atlantis_ph10_0` - a set of retention coefficients
obtained in [#Gilar]_.
Conditions: Atlantis HILIC silica column, (150 x 2.1 mm I.D.), 3 um, 100 A,
gradient (A = water, B = ACN, C = 200 mM ammonium formate):
0 min, 5% A, 90% B, 5% C; 62.5 min, 55% A, 40% B, 5% C
at 0.2 ml/min, temperature 40 C, pH 10.0
:py:data:`RCs_gilar_beh` - a set of retention coefficients obtained in
[#Gilar]_.
Conditions: ACQUITY UPLC BEH HILIC column (150 x 2.1 mm I.D.), 1.7 um, 130 A,
Mobile phase A: 10 mM ammonium formate buffer, pH 4.5 prepared by
titrating 10 mM solution of FA with ammonium hydroxide. Mobile phase B:
90% ACN, 10% mobile phase A (v:v).
Gradient: 90-60% B in 50 min.
:py:data:`RCs_gilar_beh_amide` - a set of retention coefficients obtained in
[#Gilar]_.
Conditions: ACQUITY UPLC BEH glycan column (150 x 2.1 mm I.D.), 1.7 um, 130 A,
Mobile phase A: 10 mM ammonium formate buffer, pH 4.5 prepared by
titrating 10 mM solution of FA with ammonium hydroxide. Mobile phase B:
90% ACN, 10% mobile phase A (v:v).
Gradient: 90-60% B in 50 min.
:py:data:`RCs_gilar_rp` - a set of retention coefficients obtained in
[#Gilar]_.
Conditions: ACQUITY UPLC BEH C18 column (100 mm x 2.1 mm I.D.), 1.7 um, 130 A.
Mobile phase A: 0.02% TFA in water, mobile phase B: 0.018% TFA in ACN.
Gradient: 0 to 50% B in 50 min, flow rate 0.2 ml/min, temperature 40 C.,
pH 2.6.
:py:data:`RCs_krokhin_100A_fa` - a set of retention coefficients obtained in
[#Krokhin]_.
Conditions: 300 um x 150mm PepMap100 (Dionex, 0.1% FA), packed with
5-um Luna C18(2) (Phenomenex, Torrance, CA), pH=2.0.
Both eluents A (2% ACN in water) and B (98% ACN) contained
0.1% FA as ion-pairing modifier. 0.33% ACN/min
linear gradient (0-30% B).
:py:data:`RCs_krokhin_100A_tfa` - a set of retention coefficients obtained in
[#Krokhin]_.
Conditions: 300 um x 150mm PepMap100 (Dionex, 0.1% TFA), packed with
5-um Luna C18(2) (Phenomenex, Torrance, CA), pH=2.0.
Both eluents A (2% ACN in water) and B (98% ACN) contained
0.1% TFA as ion-pairing modifier. 0.33% ACN/min
linear gradient (0-30% B).
Theory
------
The additive model of polypeptide chromatography, or the model of
retention coefficients was the earliest attempt to describe the dependence of
retention time of a polypeptide in liquid chromatography on its sequence
[#Meek]_, [#Guo1]_. In this model, each amino acid is assigned a number, or
a *retention coefficient* (RC) describing its retention properties. The
retention time (RT) during a gradient elution is then calculated as:
.. math::
RT = \\sum_{i=1}^{i=N}{RC_i \\cdot n_i} + RT_0,
which is the sum of retention coefficients of all amino acid residues in a
polypeptide. This equation can also be expressed in terms of linear
algebra:
.. math::
RT = \\bar{aa} \\cdot \\bar{RC} + RT_0,
where :math:`\\bar{aa}` is a vector of amino acid composition,
i.e. :math:`\\bar{aa}_i` is the number of amino acid residues of i-th
type in a polypeptide; :math:`\\bar{RC}` is a vector of respective
retention coefficients.
In this formulation, it is clear that additive model gives the same results for
any two peptides with different sequences but the same amino acid
composition. In other words, **additive model is not sequence-specific**.
The additive model has two advantages over all other models of chromatography
- it is easy to understand and use. The rule behind the additive model is as
simple as it could be: **each amino acid residue shifts retention time by a
fixed value, depending only on its type**. This rule allows geometrical
interpretation. Each peptide may be represented by a point in 21-dimensional
space, with first 20 coordinates equal to the amounts of corresponding amino
acid residues in the peptide and 21-st coordinate equal to RT. The additive
model assumes that a line may be drawn through these points. Of course, this
assumption is valid only partially, and most points would not lie on the
line. But the line would describe the main trend and could be used to estimate
retention time for peptides with known amino acid composition.
This best fit line is described by retention coefficients and :math:`RT_0`.
The procedure of finding these coefficients is called *calibration*. There is
`an analytical solution to calibration of linear models
<http://en.wikipedia.org/wiki/Linear_regression>`_, which makes them
especially useful in real applications.
Several attempts were made in order to improve the accuracy of prediction by
the additive model (for a review of the field we suggest to read [#Baczek]_
and [#Babushok]_). The two implemented in this module are the logarithmic
length correction term described in [#MantLogLen]_ and additional sets of
retention coefficients for terminal amino acid residues [#Tripet]_.
Logarithmic length correction
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This enhancement was firstly described in [#MantLogLen]_. Briefly, it was
found that the following equation better describes the dependence of RT on the
peptide sequence:
.. math::
RT = \\sum_{i=1}^{i=N}{RC_i} + m\\,ln N \\sum_{i=1}^{i=N}{RC_i} + RT_0
We would call the second term :math:`m\\,ln N \\sum_{i=1}^{i=N}{RC_i}` *the
length correction term* and m - *the length correction parameter*. The
simplified and vectorized form of this equation would be:
.. math::
RT = (1 + m\\,ln N) \\, \\bar{RC} \\cdot \\bar{aa} + RT_0
This equation may be reduced to a linear form and solved by the standard
methods.
Terminal retention coefficients
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Another significant improvement may be obtained through introduction of
separate sets of retention coefficients for terminal amino acid residues
[#Tripet]_.
References
----------
.. [#Meek] Meek, J. L. `Prediction of peptide retention times in high-pressure
liquid chromatography on the basis of amino acid composition.
<http://www.ncbi.nlm.nih.gov/pubmed/6929513>`_
PNAS, 1980, 77 (3), 1632-1636.
.. [#Guo1] Guo, D.; Mant, C. T.; Taneja, A. K.; Parker, J. M. R.; Hodges,
R. S. `Prediction of peptide retention times in reversed-phase
high-performance liquid chromatography I. Determination of retention
coefficients of amino acid residues of model synthetic peptides.
<http://dx.doi.org/10.1016/0021-9673(86)80102-9>`_
Journal of Chromatography A, 1986, 359, 499-518.
.. [#Baczek] Baczek, T.; Kaliszan, R. `Predictions of peptides' retention times
in reversed-phase liquid chromatography as a new supportive tool to improve
protein identification in proteomics.
<http://dx.doi.org/10.1002/pmic.200800544>`_
Proteomics, 2009, 9 (4), 835-47.
.. [#Babushok] Babushok, V. I.; Zenkevich, I. G. `Retention Characteristics of
Peptides in RP-LC: Peptide Retention Prediction.
<http://dx.doi.org/10.1365/s10337-010-1721-8>`_
Chromatographia, 2010, 72 (9-10), 781-797.
.. [#MantLogLen] Mant, C. T.; Zhou, N. E.; Hodges, R. S. `Correlation of
protein retention times in reversed-phase chromatography with polypeptide
chain length and hydrophobicity.
<http://dx.doi.org/10.1016/S0021-9673(01)93882-8>`_
Journal of Chromatography A, 1989, 476, 363-375.
.. [#Tripet] Tripet, B.; Cepeniene, D.; Kovacs, J. M.; Mant, C. T.; Krokhin,
O. V.; Hodges, R. S. `Requirements for prediction of peptide retention time
in reversed-phase high-performance liquid chromatography:
hydrophilicity/hydrophobicity of side-chains at the N- and C-termini of
peptides are dramatically affected by the end-groups and location.
<http://dx.doi.org/10.1016/j.chroma.2006.12.024>`_
Journal of chromatography A, 2007, 1141 (2), 212-25.
.. [#Browne] Browne, C. A.; Bennett, H. P. J.; Solomon, S. `The
isolation of peptides by high-performance liquid chromatography
using predicted elution positions
<http://www.sciencedirect.com/science/article/pii/000326978290238X>`_.
Analytical Biochemistry, 1982, 124 (1), 201-208.
.. [#Palmblad] Palmblad, M.; Ramstrom, M.; Markides, K. E.; Hakansson,
P.; Bergquist, J. `Prediction of Chromatographic Retention and
Protein Identification in Liquid Chromatography/Mass
Spectrometry
<http://pubs.acs.org/doi/abs/10.1021/ac0256890>`_.
Analytical Chemistry, 2002, 74 (22), 5826-5830.
.. [#Yoshida] Yoshida, T. Calculation of peptide retention
coefficients in normal-phase liquid chromatography. Journal of
Chromatography A, 1998, 808 (1-2), 105-112.
.. [#Moskovets] Moskovets, E.; Goloborodko A. A.; Gorshkov A. V.; Gorshkov M.V.
`Limitation of predictive 2-D liquid chromatography in reducing the database
search space in shotgun proteomics: In silico studies.
<http://dx.doi.org/10.1002/jssc.201100798>`_
Journal of Separation Science, 2012, 35 (14), 1771-1778.
.. [#Goloborodko] Goloborodko A. A.; Mayerhofer C.; Zubarev A. R.;
Tarasova I. A.; Gorshkov A. V.; Zubarev, R. A.; Gorshkov, M. V.
`Empirical approach to false discovery rate
estimation in shotgun proteomics. <http://dx.doi.org/10.1002/rcm.4417>`_
Rapid communications in mass spectrometry, 2010, 24(4), 454-62.
.. [#Gilar] Gilar, M., & Jaworski, A. (2011). `Retention behavior of peptides in
hydrophilic-interaction chromatography.
<http://dx.doi.org/10.1016/j.chroma.2011.04.005>`_
Journal of chromatography A, 1218(49), 8890-6.
.. [#Krokhin] Dwivedi, R. C.; Spicer, V.; Harder, M.; Antonovici, M.; Ens, W.;
Standing, K. G.; Wilkins, J. A.; Krokhin, O. V. (2008). `Practical
implementation of 2D HPLC scheme with accurate peptide retention prediction
in both dimensions for high-throughput bottom-up proteomics
<http://pubs.acs.org/doi/abs/10.1021/ac800984n>`_.
Analytical Chemistry, 80(18), 7036-42.
Dependencies
------------
This module requires :py:mod:`numpy` and, optionally, :py:mod:`scikit-learn`
(for MAE regression).
--------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .auxiliary import linear_regression, PyteomicsError
try:
from sklearn.linear_model import QuantileRegressor
except ImportError:
QuantileRegressor = None
from . import parser
def get_RCs(sequences, RTs, lcp=-0.21, term_aa=False, metric='mse', **kwargs):
"""Calculate the retention coefficients of amino acids using
retention times of a peptide sample and a fixed value of length
correction parameter.
Parameters
----------
sequences : list of str
List of peptide sequences.
RTs: list of float
List of corresponding retention times.
lcp : float, optional
A multiplier before ln(L) term in the equation for the retention
time of a peptide. Set to -0.21 by default.
term_aa : bool, optional
If :py:const:`True`, terminal amino acids are treated as being
modified with 'ntermX'/'ctermX' modifications. :py:const:`False`
by default.
metric : str, optional
Metric for the regression problem. Set to "mse" (mean squared
error) by default. Alternative: "mae" (mean absolute error),
which uses quantile regression.
.. note ::
`"mae"` requires :py:mod:`scikit-learn` for
`quantile regression <https://scikit-learn.org/stable/auto_examples/linear_model/plot_quantile_regression.html>`_.
labels : list of str, optional
List of all possible amino acids and terminal groups
If not given, any modX labels are allowed.
Returns
-------
RC_dict : dict
Dictionary with the calculated retention coefficients.
- RC_dict['aa'] -- amino acid retention coefficients.
- RC_dict['const'] -- constant retention time shift.
- RC_dict['lcp'] -- length correction parameter.
Examples
--------
>>> RCs = get_RCs(['A','AA'], [1.0, 2.0], 0.0, labels=['A'])
>>> abs(RCs['aa']['A'] - 1) < 1e-6 and abs(RCs['const']) < 1e-6
True
>>> RCs = get_RCs(['A','AA','B'], [1.0, 2.0, 2.0], 0.0, labels=['A','B'])
>>> abs(RCs['aa']['A'] - 1) + abs(RCs['aa']['B'] - 2) + \
abs(RCs['const']) < 1e-6
True
"""
labels = kwargs.get('labels')
# Make a list of all amino acids present in the sample.
peptide_dicts = [
parser.amino_acid_composition(peptide, False, term_aa, allow_unknown_modifications=True, labels=labels)
if not isinstance(peptide, dict) else peptide
for peptide in sequences]
detected_amino_acids = {aa for peptide_dict in peptide_dicts for aa in peptide_dict}
# Determine retention coefficients using multidimensional linear
# regression.
composition_array = []
for pdict in peptide_dicts:
loglen = np.log(parser.length(pdict))
composition_array.append([pdict.get(aa, 0.) * (1. + lcp * loglen) for aa in detected_amino_acids] + [1.])
# Add normalizing conditions for terminal retention coefficients. The
# condition we are using here is quite arbitrary. It implies that the sum
# of N- or C-terminal RCs minus the sum of corresponding internal RCs must
# be equal to zero.
if term_aa:
for term_label in ['nterm', 'cterm']:
normalizing_peptide = []
for aa in detected_amino_acids:
if aa.startswith(term_label):
normalizing_peptide.append(1.0)
elif (term_label+aa) in detected_amino_acids:
normalizing_peptide.append(-1.0)
else:
normalizing_peptide.append(0.0)
normalizing_peptide.append(0.0)
composition_array.append(normalizing_peptide)
RTs.append(0.0)
if metric == 'mse':
# Use least square linear regression.
RCs, _, _, _ = np.linalg.lstsq(np.array(composition_array), np.array(RTs), rcond=None)
elif metric == 'mae':
if QuantileRegressor is None:
raise PyteomicsError("`metric='mae'` requires scikit-learn.")
# Use Quantile regression.
QR = QuantileRegressor(fit_intercept=False, alpha=0, solver='highs')
QR.fit(np.array(composition_array), np.array(RTs))
RCs = QR.coef_
else:
raise PyteomicsError('Invalid metric "{}". Must be "mse" or "mae".'.format(metric))
# Remove normalizing elements from the RTs vector.
if term_aa:
for term_label in ['nterm', 'cterm']:
RTs.pop()
# Form output.
RC_dict = {}
RC_dict['aa'] = dict(zip(list(detected_amino_acids), [rc.item() for rc in RCs[:len(detected_amino_acids)]]))
RC_dict['aa'][parser.std_nterm] = 0.0
RC_dict['aa'][parser.std_cterm] = 0.0
RC_dict['const'] = RCs[len(detected_amino_acids)].item()
RC_dict['lcp'] = lcp
# Find remaining terminal RCs.
if term_aa:
for term_label in ['nterm', 'cterm']:
# Check if there are terminal RCs remaining undefined.
undefined_term_RCs = [aa for aa in RC_dict['aa'] if aa[1:5] != 'term' and term_label + aa not in RC_dict['aa']]
if not undefined_term_RCs:
continue
# Find a linear relationship between internal and terminal RCs.
defined_term_RCs = [aa for aa in RC_dict['aa'] if aa[1:5] != 'term' and term_label + aa in RC_dict['aa']]
a, b, r, stderr = linear_regression(
[RC_dict['aa'][aa] for aa in defined_term_RCs],
[RC_dict['aa'][term_label+aa] for aa in defined_term_RCs])
# Define missing terminal RCs using this linear equation.
for aa in undefined_term_RCs:
RC_dict['aa'][term_label + aa] = a * RC_dict['aa'][aa] + b
return RC_dict
def get_RCs_vary_lcp(sequences, RTs, term_aa=False, lcp_range=(-1.0, 1.0), metric='mse', **kwargs):
"""Find the best combination of a length correction parameter and
retention coefficients for a given peptide sample.
Parameters
----------
sequences : list of str
List of peptide sequences.
RTs : list of float
List of corresponding retention times.
term_aa : bool, optional
If True, terminal amino acids are treated as being
modified with 'ntermX'/'ctermX' modifications. False by default.
metric : str, optional
Metric for the regression problem. Set to "mse" (mean squared
error) by default. Alternative: "mae" (mean absolute error).
.. note ::
`"mae"` requires :py:mod:`scikit-learn` for
`quantile regression <https://scikit-learn.org/stable/auto_examples/linear_model/plot_quantile_regression.html>`_.
lcp_range : 2-tuple of float, optional
Range of possible values of the length correction parameter.
labels : list of str, optional
List of labels for all possible amino acids and terminal groups
If not given, any modX labels are allowed.
lcp_accuracy : float, optional
The accuracy of the length correction parameter calculation.
Returns
-------
RC_dict : dict
Dictionary with the calculated retention coefficients.
- RC_dict['aa'] -- amino acid retention coefficients.
- RC_dict['const'] -- constant retention time shift.
- RC_dict['lcp'] -- length correction parameter.
Examples
--------
>>> RCs = get_RCs_vary_lcp(['A', 'AA', 'AAA'], \
[1.0, 2.0, 3.0], \
labels=['A'])
>>> abs(RCs['aa']['A'] - 1) + abs(RCs['lcp']) + abs(RCs['const']) < 1e-6
True
"""
labels = kwargs.get('labels')
best_r = -1.1
best_RC_dict = {}
lcp_accuracy = kwargs.get('lcp_accuracy', 0.1)
min_lcp = lcp_range[0]
max_lcp = lcp_range[1]
step = (max_lcp - min_lcp) / 10.0
peptide_dicts = [
parser.amino_acid_composition(peptide, False, term_aa, allow_unknown_modifications=True, labels=labels)
if not isinstance(peptide, dict) else peptide
for peptide in sequences]
while step > lcp_accuracy:
lcp_grid = np.arange(min_lcp, max_lcp, (max_lcp - min_lcp) / 10.0)
for lcp in lcp_grid:
RC_dict = get_RCs(peptide_dicts, RTs, lcp.item(), term_aa, labels=labels, metric=metric)
regression_coeffs = linear_regression(
RTs,
[calculate_RT(peptide, RC_dict) for peptide in peptide_dicts])
if regression_coeffs[2] > best_r:
best_r = regression_coeffs[2]
best_RC_dict = dict(RC_dict)
min_lcp = best_RC_dict['lcp'] - step
max_lcp = best_RC_dict['lcp'] + step
step = (max_lcp - min_lcp) / 10.0
return best_RC_dict
def calculate_RT(peptide, RC_dict, raise_no_mod=True):
"""Calculate the retention time of a peptide using a given set
of retention coefficients.
Parameters
----------
peptide : str or dict
A peptide sequence or amino acid composition.
RC_dict : dict
A set of retention coefficients, length correction parameter and
a fixed retention time shift. Keys are: 'aa', 'lcp' and 'const'.
raise_no_mod : bool, optional
If :py:const:`True` then an exception is raised when a modified amino
acid from `peptides` is not found in `RC_dict`. If :py:const:`False`,
then the retention coefficient for the non-modified amino acid residue
is used instead. :py:const:`True` by default.
Returns
-------
RT : float
Calculated retention time.
Examples
--------
>>> RT = calculate_RT('AA', {'aa': {'A': 1.1}, 'lcp':0.0, 'const': 0.1})
>>> abs(RT - 2.3) < 1e-6 # Float comparison
True
>>> RT = calculate_RT('AAA', {'aa': {'ntermA': 1.0, 'A': 1.1, 'ctermA': 1.2},\
'lcp': 0.0, 'const':0.1})
>>> abs(RT - 3.4) < 1e-6 # Float comparison
True
>>> RT = calculate_RT({'A': 3}, {'aa': {'ntermA': 1.0, 'A': 1.1, 'ctermA': 1.2},\
'lcp': 0.0, 'const':0.1})
>>> abs(RT - 3.4) < 1e-6 # Float comparison
True
"""
amino_acids = [aa for aa in RC_dict['aa']
if not (aa[:5] == 'nterm' or aa[:5] == 'cterm')]
# Check if there are retention coefficients for terminal amino acids.
term_aa = False
for aa in RC_dict['aa']:
if aa[:5] == 'nterm' or aa[:5] == 'cterm':
term_aa = True
break
# Calculate retention time.
if isinstance(peptide, dict):
peptide_dict = peptide
else:
peptide_dict = parser.amino_acid_composition(peptide, False, term_aa, allow_unknown_modifications=True, labels=amino_acids)
RT = 0.0
for aa in peptide_dict:
if aa not in RC_dict['aa']:
if len(aa) == 1:
raise PyteomicsError('No RC for residue "{}".'.format(aa))
if (not raise_no_mod) and aa[-1] in RC_dict['aa']:
RT += peptide_dict[aa] * RC_dict['aa'][aa[-1]]
else:
raise PyteomicsError(
'Residue "{0}" not found in RC_dict. '.format(aa) +
'Set raise_no_mod=False to ignore this error ' +
'and use the RC for "{0}"" instead.'.format(aa[-1]))
else:
RT += peptide_dict[aa] * RC_dict['aa'][aa]
length_correction_term = (
1.0 + RC_dict.get('lcp', 0) * np.log(parser.length(peptide_dict)))
RT *= length_correction_term
RT += RC_dict.get('const', 0)
return RT.item()
RCs_guo_ph2_0 = {'aa': {'K': -2.1,
'G': -0.2,
'L': 8.1,
'A': 2.0,
'C': 2.6,
'E': 1.1,
'D': 0.2,
'F': 8.1,
'I': 7.4,
'H': -2.1,
'M': 5.5,
'N': -0.6,
'Q': 0.0,
'P': 2.0,
'S': -0.2,
'R': -0.6,
'T': 0.6,
'W': 8.8,
'V': 5.0,
'Y': 4.5,
'H-': 0.0,
'-OH': 0.0},
'lcp': 0.0,
'const': 0.0}
"""A set of retention coefficients from Guo, D.; Mant, C. T.; Taneja,
A. K.; Parker, J. M. R.; Hodges, R. S. Prediction of peptide
retention times in reversed-phase high-performance liquid
chromatography I. Determination of retention coefficients of amino
acid residues of model synthetic peptides. Journal of Chromatography
A, 1986, 359, 499-518.
Conditions: Synchropak RP-P C18 column (250 x 4.1 mm I.D.), gradient
(A = 0.1% aq. TFA, pH 2.0; B = 0.1% TFA in acetonitrile) at 1% B/min,
flow rate 1 ml/min, 26 centigrades.
"""
RCs_guo_ph7_0 = {'aa': {'K': -0.2,
'G': -0.2,
'L': 9.0,
'A': 2.2,
'C': 2.6,
'E': -1.3,
'D': -2.6,
'F': 9.0,
'I': 8.3,
'H': 2.2,
'M': 6.0,
'N': -0.8,
'Q': 0.0,
'P': 2.2,
'S': -0.5,
'R': 0.9,
'T': 0.3,
'W': 9.5,
'V': 5.7,
'Y': 4.6,
'H-': 0.0,
'-OH': 0.0},
'lcp': 0.0,
'const': 0.0}
"""A set of retention coefficients from Guo, D.; Mant, C. T.; Taneja,
A. K.; Parker, J. M. R.; Hodges, R. S. Prediction of peptide
retention times in reversed-phase high-performance liquid
chromatography I. Determination of retention coefficients of amino
acid residues of model synthetic peptides. Journal of Chromatography
A, 1986, 359, 499-518.
Conditions: Synchropak RP-P C18 column (250 x 4.1 mm I.D.), gradient
(A = aq. 10 mM (NH4)2HPO4 - 0.1 M NaClO4, pH 7.0; B = 0.1 M NaClO4 in
60% aq. acetonitrile) at 1.67% B/min, flow rate 1 ml/min, 26
centigrades.
"""
RCs_meek_ph2_1 = {'aa': {'K': -3.2,
'G': -0.5,
'L': 10.0,
'A': -0.1,
'C': -2.2,
'E': -7.5,
'D': -2.8,
'F': 13.9,
'I': 11.8,
'H': 0.8,
'M': 7.1,
'N': -1.6,
'Q': -2.5,
'P': 8.0,
'S': -3.7,
'R': -4.5,
'T': 1.5,
'W': 18.1,
'V': 3.3,
'Y': 8.2,
'H-': 0.0,
'-OH': 0.0},
'lcp': 0.0,
'const': 0.0}
"""A set of retention coefficients determined in Meek,
J. L. Prediction of peptide retention times in high-pressure liquid
chromatography on the basis of amino acid composition. PNAS, 1980, 77
(3), 1632-1636.
.. note :: C stands for Cystine.
Conditions: Bio-Rad "ODS" column, gradient (A = 0.1 M NaClO4,
0.1% phosphoric acid in water; B = 0.1 M NaClO4, 0.1% phosphoric acid
in 60% aq. acetonitrile) at 1.25% B/min, room temperature.
"""
RCs_meek_ph7_4 = {'aa': {'K': 0.1,
'G': 0.0,
'L': 8.8,
'A': 0.5,
'C': -6.8,
'E': -16.9,
'D': -8.2,
'F': 13.2,
'I': 13.9,
'H': -3.5,
'M': 4.8,
'N': 0.8,
'Q': -4.8,
'P': 6.1,
'S': 1.2,
'R': 0.8,
'T': 2.7,
'W': 14.9,
'V': 2.7,
'Y': 6.1,
'H-': 0.0,
'-OH': 0.0},
'lcp': 0.0,
'const': 0.0}
"""A set of retention coefficients determined in Meek,
J. L. Prediction of peptide retention times in high-pressure liquid
chromatography on the basis of amino acid composition. PNAS, 1980, 77
(3), 1632-1636.
.. note :: C stands for Cystine.
Conditions: Bio-Rad "ODS" column, gradient (A = 0.1 M NaClO4,
5 mM phosphate buffer in water; B = 0.1 M NaClO4, 5 mM phosphate buffer
in 60% aq. acetonitrile) at 1.25% B/min, room temperature.
"""
RCs_browne_tfa = {'aa': {'K': -3.7,
'G': -1.2,
'L': 20.0,
'A': 7.3,
'C': -9.2,
'E': -7.1,
'D': -2.9,
'F': 19.2,
'I': 6.6,
'H': -2.1,
'M': 5.6,
'N': -5.7,
'Q': -0.3,
'P': 5.1,
'S': -4.1,
'pS': -6.5,
'R': -3.6,
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | true |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/traml.py | pyteomics/traml.py | """
traml - targeted MS transition data in TraML format
===================================================
Summary
-------
TraML is a standard rich XML-format for targeted mass spectrometry method definitions.
Please refer to `psidev.info <http://www.psidev.info/traml>`_
for the detailed specification of the format and structure of TraML files.
This module provides a minimalistic way to extract information from TraML
files. You can use the object-oriented interface (:class:`TraML` instances) to
access target definitions and transitions. :class:`TraML` objects also support
indexing with entity IDs directly.
Data access
-----------
:py:class:`TraML` - a class representing a single TraML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through transitions in TraML format.
:py:func:`chain` - read multiple TraML files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
Controlled Vocabularies and Caching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TraML relies on controlled vocabularies to describe its contents extensibly.
Every :py:class:`TraML` needs a copy of PSI-MS CV, which it handles using the :py:mod:`psims` library.
If you want to save time when creating instances of :py:class:`TraML`, consider enabling the :py:mod:`psims` cache.
See `psims documentation <https://mobiusklein.github.io/psims/docs/build/html/controlled_vocabulary/controlled_vocabulary.html#caching>`_
on how to enable and configure the cache (alternatively, you can handle CV creation yourself and pass a pre-created instance
using the `cv` parameter to :py:class:`TraML`).
See also
`Controlled Vocabulary Terms <../data.html#controlled-vocabulary-terms-in-structured-data>`_
for more details on how they are used.
Handling Time Units and Other Qualified Quantities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TraML contains information which may be described as using a variety of different time units.
See `Unit Handling <../data.html#unit-handling>`_ for more information.
Deprecated functions
--------------------
:py:func:`version_info` - get version information about the TraML file.
You can just read the corresponding attribute of the :py:class:`TraML` object.
:py:func:`iterfind` - iterate over elements in an TraML file.
You can just call the corresponding method of the :py:class:`TraML` object.
Dependencies
------------
This module requires :py:mod:`lxml`
-------------------------------------------------------------------------------
"""
# Copyright 2018 Joshua Klein, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from . import xml, _schema_defaults, auxiliary as aux
class TraML(xml.CVParamParser, xml.MultiProcessingXML, xml.IndexSavingXML):
"""Parser class for TraML files."""
file_format = 'TraML'
_root_element = 'TraML'
_default_schema = _schema_defaults._traml_schema_defaults
_default_version = '1.0.0'
_default_iter_tag = 'Transition'
_indexed_tags = {
'Transition',
'Peptide',
'Compound',
'Target',
'Protein',
'Compound',
}
_element_handlers = xml.CVParamParser._element_handlers.copy()
_element_handlers.update({
'Modification': xml.CVParamParser._promote_empty_parameter_to_name,
'Interpretation': xml.CVParamParser._promote_empty_parameter_to_name,
'Software': xml.CVParamParser._promote_empty_parameter_to_name,
})
def __init__(self, *args, **kwargs):
kwargs.setdefault('retrieve_refs', True)
super(TraML, self).__init__(*args, **kwargs)
def _get_info_smart(self, element, **kw):
kwargs = dict(kw)
rec = kwargs.pop('recursive', None)
info = self._get_info(element, recursive=(rec if rec is not None else True), **kwargs)
return info
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in `Ref`. Removes the id attribute from `info`"""
for k, v in dict(info).items():
if k[-3:] in {'Ref', 'ref'}:
if isinstance(v, str):
key = v
elif isinstance(v, dict):
key = v['ref']
else:
if k != 'ref':
info[k[:-3]] = info.pop(k)
continue
try:
by_id = self.get_by_id(key, retrieve_refs=True)
except KeyError:
warnings.warn('Ignoring unresolved reference: ' + key)
else:
if k == 'ref':
info.update(by_id)
else:
# by_id.pop('id', None)
info[k[:-3]] = by_id
del info[k]
def read(source, retrieve_refs=True, read_schema=False, iterative=True, use_index=False, huge_tree=False):
"""Parse `source` and iterate through transitions.
Parameters
----------
source : str or file
A path to a target TraML file or the file object itself.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the TraML header. Otherwise, use default parameters.
Not recommended without Internet connection or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
spectrum elements. Default is :py:const:`False`.
huge_tree : bool, optional
This option is passed to the `lxml` parser and defines whether
security checks for XML tree depth and node size should be disabled.
Default is :py:const:`False`.
Enable this option for trusted files to avoid XMLSyntaxError exceptions
(e.g. `XMLSyntaxError: xmlSAX2Characters: huge text node`).
Returns
-------
out : TraML
A :py:class:`TraML` object, suitable for iteration and possibly random access.
"""
return TraML(source, retrieve_refs=retrieve_refs, read_schema=read_schema, iterative=iterative,
use_index=use_index, huge_tree=huge_tree)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`TraML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the mzIdentML header. Otherwise, use default
parameters. Not recommended without Internet connection or
if you don't like to get the related warnings.
Returns
-------
out : iterator
"""
return TraML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(TraML)
chain = aux.ChainBase._make_chain(TraML)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/electrochem.py | pyteomics/electrochem.py | """
electrochem - electrochemical properties of polypeptides
========================================================
Summary
-------
This module is used to calculate the
electrochemical properties of polypeptide molecules.
The theory behind most of this module is based on the Henderson-Hasselbalch
equation and was thoroughly described in a number of sources [#Aronson]_,
[#Moore]_.
Briefly, the formula for the charge of a polypeptide in given pH is the following:
.. math::
Q_{peptide} = \sum{\\frac{Q_i}{1+10^{Q_i(pH-pK_i)}}},
where the sum is taken over all ionizable groups of the polypeptide, and
:math:`Q_i` is -1 and +1 for acidic and basic functional groups,
respectively.
Charge and pI functions
-----------------------
:py:func:`charge` - calculate the charge of a polypeptide
:py:func:`pI` - calculate the isoelectric point of a polypeptide
GRand AVerage of hYdropathicity (GRAVY)
---------------------------------------
:py:func:`gravy` - calculate the GRAVY index of a polypeptide
Data
----
:py:data:`pK_lehninger` - a set of pK from [#Lehninger]_.
:py:data:`pK_sillero` - a set of pK from [#Sillero]_.
:py:data:`pK_dawson` - a set of pK from [#Dawson]_, the pK values for NH2-
and -OH are taken from [#Sillero]_.
:py:data:`pK_rodwell` - a set of pK from [#Rodwell]_.
:py:data:`pK_bjellqvist` - a set of pK from [#Bjellqvist]_.
:py:data:`pK_nterm_bjellqvist` - a set of N-terminal pK from [#Bjellqvist]_.
:py:data:`pK_cterm_bjellqvist` - a set of C-terminal pK from [#Bjellqvist]_.
:py:data:`hydropathicity_KD` - a set of hydropathicity indexes from [#Kyte]_.
References
----------
.. [#Aronson] Aronson, J. N. The Henderson-Hasselbalch equation
revisited. Biochemical Education, 1983, 11 (2), 68.
`Link. <http://dx.doi.org/10.1016/0307-4412(83)90046-8>`_
.. [#Moore] Moore, D. S.. Amino acid and peptide net charges: A
simple calculational procedure. Biochemical Education, 1986, 13 (1), 10-12.
`Link. <http://dx.doi.org/10.1016/0307-4412(85)90114-1>`_
.. [#Lehninger] Nelson, D. L.; Cox, M. M. Lehninger Principles of
Biochemistry, Fourth Edition; W. H. Freeman, 2004; p. 1100.
.. [#Sillero] Sillero, A.; Ribeiro, J. Isoelectric points of proteins:
Theoretical determination. Analytical Biochemistry, 1989, 179 (2), 319-325.
`Link. <http://dx.doi.org/10.1016/0003-2697(89)90136-X>`_
.. [#Dawson] Dawson, R. M. C.; Elliot, D. C.; Elliot, W. H.; Jones, K. M.
Data for biochemical research. Oxford University Press, 1989; p. 592.
.. [#Rodwell] Rodwell, J. Heterogeneity of component bands in isoelectric
focusing patterns. Analytical Biochemistry, 1982, 119 (2), 440-449.
`Link. <http://dx.doi.org/10.1016/0003-2697(82)90611-X>`_
.. [#Bjellqvist] Bjellqvist, B., Basse, B., Olsen, E. and Celis, J.E.
Reference points for comparisons of two-dimensional maps of proteins from
different human cell types defined in a pH scale where isoelectric points
correlate with polypeptide compositions. Electrophoresis 1994, 15, 529-539.
`Link. <http://dx.doi.org/10.1002/elps.1150150171>`_
.. [#Kyte] Kyte, J.; Doolittle, R. F..
A simple method for displaying the hydropathic character of a protein.
Journal of molecular biology 1982, 157 (1), 105-32.
`Link. <https://doi.org/10.1016/0022-2836(82)90515-0>`_
-------------------------------------------------------------------------------
"""
# Copyright 2012 Anton Goloborodko, Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import parser
from .auxiliary import PyteomicsError
from collections import Counter
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
def charge(sequence, pH, **kwargs):
"""Calculate the charge of a polypeptide in given pH or list of pHs using
a given list of amino acid electrochemical properties.
.. warning::
Be cafeful when supplying a list with a parsed sequence or a dict with
amino acid composition as `sequence`. Such values must be obtained
with enabled `show_unmodified_termini` option.
.. warning::
If you provide `pK_nterm` or `pK_cterm` and provide `sequence` as a dict,
it is assumed that it was obtained with ``term_aa=True`` (see
:py:func:`pyteomics.parser.amino_acid_composition` for details).
Parameters
----------
sequence : str or list or dict
A string with a polypeptide sequence, a list with a parsed
sequence or a dict of amino acid composition.
pH : float or iterable of floats
pH or iterable of pHs for which the charge is calculated.
pK : dict {str: [(float, int), ...]}, optional
A set of pK of amino acids' ionizable groups. It is a dict, where keys
are amino acid labels and the values are lists of tuples (pK,
charge_in_ionized_state), a tuple per ionizable group. The default
value is `pK_lehninger`.
pK_nterm : dict {str: [(float, int),]}, optional
pK_cterm : dict {str: [(float, int),]}, optional
Sets of pK of N-terminal and C-terminal (respectively) amino acids'
ionizable groups. Dicts with the same structure as ``pK``. These
values (if present) are used for N-terminal and C-terminal residues,
respectively. If given, `sequence` must be a :py:class:`str` or a
:py:class:`list`. The default value is an empty dict.
Returns
-------
out : float or list of floats
A single value of charge or a list of charges.
"""
peptide_dict, pK = _prepare_charge_dict(sequence, **kwargs)
# Process the case when pH is a single float.
pH_list = pH if isinstance(pH, Iterable) else [pH,]
charge_list = _charge_for_dict(peptide_dict, pH_list, pK)
return charge_list[0] if not isinstance(pH, Iterable) else charge_list
def _prepare_charge_dict(sequence, **kwargs):
nterm = cterm = n_aa = c_aa = None
pK = kwargs.get('pK', pK_lehninger).copy()
pK_nterm = kwargs.get('pK_nterm', {})
pK_cterm = kwargs.get('pK_cterm', {})
if isinstance(sequence, dict):
peptide_dict = sequence.copy()
for k, v in sequence.items():
if k[-1] == '-':
if v > 1 or nterm:
raise PyteomicsError(
'More that one N-terminal group in {}'.format(
sequence))
nterm = k
if k[0] == '-':
if v > 1 or cterm:
raise PyteomicsError(
'More that one C-terminal group in {}'.format(
sequence))
cterm = k
if k[:5] == 'nterm':
if v > 1 or n_aa:
raise PyteomicsError(
'More that one N-terminal residue in {}'.format(
sequence))
n_aa = k[5:]
peptide_dict[n_aa] = peptide_dict.get(n_aa, 0) + 1
if k[:5] == 'cterm':
if v > 1 or c_aa:
raise PyteomicsError(
'More that one C-terminal residue in {}'.format(
sequence))
c_aa = k[5:]
peptide_dict[c_aa] = peptide_dict.get(c_aa, 0) + 1
if nterm is None or cterm is None:
raise PyteomicsError('Peptide must have two explicit terminal groups')
if (n_aa is None or c_aa is None) and (pK_nterm or pK_cterm):
raise PyteomicsError('Two terminal residues must be present in '
'peptide (designated as "ntermX" and "ctermX", where "X" is '
'the one-letter residue label). Use '
'``term_aa=True`` when calling '
'`parser.amino_acid_composition`.')
elif isinstance(sequence, (str, list)):
if isinstance(sequence, str):
if sequence.isupper() and sequence.isalpha():
parsed_sequence = [parser.std_nterm] + list(sequence) + [parser.std_cterm]
else:
parsed_sequence = parser.parse(sequence, show_unmodified_termini=True)
elif isinstance(sequence, list):
if sequence[0][-1] != '-' or sequence[-1][0] != '-':
raise PyteomicsError('Parsed sequences must contain terminal '
'groups at 0-th and last positions.')
parsed_sequence = sequence
n_aa = parsed_sequence[1]
c_aa = parsed_sequence[-2]
nterm = parsed_sequence[0]
cterm = parsed_sequence[-1]
peptide_dict = Counter(parsed_sequence)
else:
raise PyteomicsError('Unsupported type of sequence: %s' % type(sequence))
if nterm in pK_nterm:
if n_aa in pK_nterm[nterm]:
pK[nterm] = pK_nterm[nterm][n_aa]
if cterm in pK_cterm:
if c_aa in pK_cterm[cterm]:
pK[cterm] = pK_cterm[cterm][c_aa]
return peptide_dict, pK
def _charge_for_dict(peptide_dict, pH_list, pK):
# Calculate the charge for each value of pH.
charge_list = []
for pH_value in pH_list:
charge = 0
for aa in peptide_dict:
for ionizable_group in pK.get(aa, []):
charge += peptide_dict[aa] * ionizable_group[1] * (
1. / (1. + 10 ** (ionizable_group[1] * (pH_value - ionizable_group[0]))))
charge_list.append(charge)
return charge_list
def pI(sequence, pI_range=(0.0, 14.0), precision_pI=0.01, **kwargs):
"""Calculate the isoelectric point of a polypeptide using a given set
of amino acids' electrochemical properties.
.. warning::
Be cafeful when supplying a list with a parsed sequence or a dict with
amino acid composition as `sequence`. Such values must be obtained
with enabled `show_unmodified_termini` option.
Parameters
----------
sequence : str or list or dict
A string with a polypeptide sequence, a list with a parsed
sequence or a dict of amino acid composition.
pI_range : tuple (float, float)
The range of allowable pI values. Default is (0.0, 14.0).
precision_pI : float
The precision of the calculated pI. Default is 0.01.
pK : dict {str: [(float, int), ...]}, optional
A set of pK of amino acids' ionizable groups. It is a dict, where keys
are amino acid labels and the values are lists of tuples (pK,
charge_in_ionized_state), a tuple per ionizable group. The default
value is `pK_lehninger`.
pK_nterm : dict {str: [(float, int),]}, optional
pK_cterm : dict {str: [(float, int),]}, optional
Sets of pK of N-terminal and C-terminal (respectively) amino acids'
ionizable groups. Dicts with the same structure as ``pK``. These
values (if present) are used for N-terminal and C-terminal residues,
respectively. If given, `sequence` must be a :py:class:`str` or a
:py:class:`list`. The default value is an empty dict.
Returns
-------
out : float
"""
pK = kwargs.get('pK', pK_lehninger.copy())
pK_nterm = {}
pK_cterm = {}
if isinstance(sequence, str) or isinstance(sequence, list):
pK_nterm = kwargs.get('pK_nterm', {})
pK_cterm = kwargs.get('pK_cterm', {})
elif isinstance(sequence, dict) and (('pK_nterm' in kwargs) or ('pK_cterm' in kwargs)):
raise PyteomicsError('Can not use terminal features for %s' % type(sequence))
peptide_dict, pK = _prepare_charge_dict(sequence, pK=pK, pK_cterm=pK_cterm, pK_nterm=pK_nterm)
# The algorithm is based on the fact that charge(pH) is a monotonic function.
left_x, right_x = pI_range
left_y = _charge_for_dict(peptide_dict, [left_x], pK)[0]
right_y = _charge_for_dict(peptide_dict, [right_x], pK)[0]
while (right_x - left_x) > precision_pI:
if left_y * right_y > 0:
return left_x if abs(left_y) < abs(right_y) else right_x
middle_x = (left_x + right_x) / 2.0
middle_y = _charge_for_dict(peptide_dict, [middle_x], pK)[0]
if middle_y * left_y < 0:
right_x = middle_x
right_y = middle_y
else:
left_x = middle_x
left_y = middle_y
return (left_x + right_x) / 2.0
pK_lehninger = {
'E': [(4.25, -1)],
'R': [(12.48, 1)],
'Y': [(10.07, -1)],
'D': [(3.65, -1)],
'H': [(6.00, +1)],
'K': [(10.53, +1)],
'C': [(8.18, -1)],
'H-': [(9.69, +1)],
'-OH': [(2.34, -1)],
}
"""A set of pK from Nelson, D. L.; Cox, M. M. Lehninger Principles of
Biochemistry, Fourth Edition; W. H. Freeman, 2004; p. 1100.
"""
pK_sillero = {
'E': [(4.5, -1)],
'R': [(12.0, +1)],
'Y': [(10.0, -1)],
'D': [(4.0, -1)],
'H': [(6.4, +1)],
'K': [(10.4, +1)],
'C': [(9.0, -1)],
'H-': [(8.2, +1)],
'-OH': [(3.2, -1)],
}
"""A set of pK from Sillero, A.; Ribeiro, J. Isoelectric points of proteins:
Theoretical determination. Analytical Biochemistry, vol. 179 (2), pp. 319-325,
1989.
"""
pK_dawson = {
'E': [(4.3, -1)],
'R': [(12.0, +1)],
'Y': [(10.1, -1)],
'D': [(3.9, -1)],
'H': [(6.0, +1)],
'K': [(10.5, +1)],
'C': [(8.3, -1)],
'H-': [(8.2, +1)],
'-OH': [(3.2, -1)],
}
"""A set of pK from Dawson, R. M. C.; Elliot, D. C.; Elliot, W. H.; Jones,
K. M. Data for biochemical research. Oxford University Press, 1989; p. 592.
pKs for NH2- and -OH are taken from `pK_sillero`.
"""
pK_rodwell = {
'E': [(4.25, -1)],
'R': [(11.5, +1)],
'Y': [(10.7, -1)],
'D': [(3.86, -1)],
'H': [(6.0, +1)],
'K': [(11.5, +1)],
'C': [(8.33, -1)],
'H-': [(8.0, +1)],
'-OH': [(3.1, -1)],
}
"""A set of pK from Rodwell, J. Heterogeneity of component bands in
isoelectric focusing patterns. Analytical Biochemistry, vol. 119 (2),
pp. 440-449, 1982.
"""
pK_bjellqvist = {
'E': [(4.45, -1)],
'R': [(12.0, +1)],
'Y': [(10.0, -1)],
'D': [(4.05, -1)],
'H': [(5.98, +1)],
'K': [(10.0, +1)],
'C': [(9.0, -1)],
'H-': [(7.5, +1)],
'-OH': [(3.55, -1)],
}
"""
A set of pK from Bjellqvist, B., Basse, B., Olsen, E. and Celis, J.E.
Reference points for comparisons of two-dimensional maps of proteins from
different human cell types defined in a pH scale where isoelectric points
correlate with polypeptide compositions. Electrophoresis 1994, 15, 529-539.
"""
pK_nterm_bjellqvist = {
'H-': {
'A': [(7.59, +1)],
'M': [(7.0, +1)],
'S': [(6.93, +1)],
'P': [(8.36, +1)],
'T': [(6.82, +1)],
'V': [(7.44, +1)],
'E': [(7.7, +1)]
}
}
"""
A set of N-terminal pK from Bjellqvist, B., Basse, B., Olsen, E. and Celis, J.E.
Reference points for comparisons of two-dimensional maps of proteins from
different human cell types defined in a pH scale where isoelectric points
correlate with polypeptide compositions. Electrophoresis 1994, 15, 529-539.
"""
pK_cterm_bjellqvist = {
'-OH': {
'D': [(4.55, -1)],
'E': [(4.75, -1)]
}
}
"""
A set of C-terminal pK from Bjellqvist, B., Basse, B., Olsen, E. and Celis, J.E.
Reference points for comparisons of two-dimensional maps of proteins from
different human cell types defined in a pH scale where isoelectric points
correlate with polypeptide compositions. Electrophoresis 1994, 15, 529-539.
"""
hydropathicity_KD = {
"A": 1.800,
"R": -4.500,
"N": -3.500,
"D": -3.500,
"C": 2.500,
"Q": -3.500,
"E": -3.500,
"G": -0.400,
"H": -3.200,
"I": 4.500,
"L": 3.800,
"K": -3.900,
"M": 1.900,
"F": 2.800,
"P": -1.600,
"S": -0.800,
"T": -0.700,
"W": -0.900,
"Y": -1.300,
"V": 4.200,
}
"""
A set of hydropathicity indexes obtained from Kyte J., Doolittle F. J. Mol. Biol. 157:105-132 (1982).
"""
def gravy(sequence, hydropathicity=hydropathicity_KD):
"""
Calculate GRand AVerage of hYdropathicity (GRAVY) index for amino acid sequence.
Parameters
----------
sequence : str
Polypeptide sequence in one-letter format.
hydropathicity : dict, optional
Hydropathicity indexes of amino acids. Default is :py:data:`hydropathicity_KD`.
Returns
-------
out : float
GRand AVerage of hYdropathicity (GRAVY) index.
Examples
>>> gravy('PEPTIDE')
-1.4375
"""
try:
return sum(hydropathicity[aa] for aa in sequence) / len(sequence)
except KeyError as e:
raise PyteomicsError("Hydropathicity for amino acid {} not provided.".format(e.args[0]))
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/usi.py | pyteomics/usi.py | """
usi - Universal Spectrum Identifier (USI) parser and minimal PROXI client
=========================================================================
Summary
-------
`USI <http://www.psidev.info/usi>`_ is a standardized method of referencing a specific
spectrum in a dataset, possibly attached to an interpretation. This module includes a
:class:`USI` type which can represent these constructs, :meth:`~USI.parse` them and
reconstruct them.
One use-case for USI is to request spectrum information from a `PROXI <http://www.psidev.info/proxi>`_
service host. PROXI services are available from several of the major national proteomics data hosts,
including MassIVE, PeptideAtlas, PRIDE, and jPOST.
.. seealso::
LeDuc, Richard D., Eric W. Deutsch, Pierre-Alain Binz, Ryan T. Fellers, Anthony J. Cesnik,
Joshua A. Klein, Tim Van Den Bossche, et al.
"Proteomics Standards Initiative's ProForma 2.0: Unifying the Encoding of Proteoforms and Peptidoforms."
ArXiv:2109.11352 [q-Bio], September 23, 2021. http://arxiv.org/abs/2109.11352.
Data access
-----------
:py:class:`USI` for representing Universal Spectrum Identifiers. Call :meth:`USI.parse` to parse a USI
string.
:py:func:`proxi` to request a USI from a remote service. Provides access to the PeptideAtlas, MassIVE,
PRIDE and jPOST hosts.
"""
import json
import warnings
import threading
import multiprocessing
from collections import namedtuple, defaultdict
try:
from multiprocessing.dummy import Pool as ThreadPool
except ImportError:
ThreadPool = None
try:
from urllib2 import Request, urlopen
except ImportError:
from urllib.request import Request, urlopen
try:
import numpy as np
def coerce_array(array_data):
return np.array([float(v) for v in array_data])
except ImportError:
def coerce_array(array_data):
return [float(v) for v in array_data]
from .auxiliary import PyteomicsError
class USI(namedtuple("USI", ['protocol', 'dataset', 'datafile', 'scan_identifier_type', 'scan_identifier', 'interpretation'])):
'''Represent a Universal Spectrum Identifier (USI).
.. note::
This implementation will capture the interpretation component but will not interpret it at this time.
Attributes
----------
protocol: str
The protocol to use to access the data (usually mzspec)
dataset: str
The name or accession number for the dataset the spectrum residues in
datafile: str
The basename of the data file from :attr:`dataset` to retrieve the spectrum from
scan_identifier_type: str
The format of the scan identifier, one of (scan, index, nativeId, trace)
scan_identifier: str
A usually numerical but potentially comma separated value encoded as a string to uniquely
identify the spectrum to be recovered from :attr:`datafile` in :attr:`dataset`.
interpretation: str
The trailing material of the USI, such as the ProForma peptide sequence and charge
'''
def __str__(self):
return ':'.join(filter(lambda x: x is not None, self))
@classmethod
def parse(cls, usi):
'''Parse a USI string into a :class:`USI` object.
Parameters
----------
usi: str
The USI string to parse
Returns
-------
USI
'''
return cls(*_usi_parser(str(usi)))
def cast_numeric(value):
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
return value
def _usi_parser(usi):
tokens = usi.split(":", 5)
protocol = tokens[0]
dataset = tokens[1]
datafile = tokens[2]
scan_identifier_type = tokens[3]
scan_identifier = tokens[4]
try:
interpretation = tokens[5]
except IndexError:
interpretation = None
return (protocol, dataset, datafile, scan_identifier_type, scan_identifier, interpretation)
class _PROXIBackend(object):
'''A base class for all PROXI backends to implement the gory details of HTTP requests
and protocol parsing.
If special processing needs to be done to interpret the spectrum returned from the service
provider, override the :meth:`_coerce` method.
If extra information needs to be provided to the service provider for them to fulfill the
request not passed through the URL, override the :meth:`_request` method.
Attributes
----------
name: str
The name of the backend service
url_template: str
The URL with {} fields to populate with the USI and any other relevant options, like protocol version
or the like.
options: dict
Additional options to be used when preparing the request URL.
'''
def __init__(self, name, url_template, **kwargs):
kwargs.setdefault('version', '0.1')
self.name = name
self.url_template = url_template
self.options = kwargs
def __repr__(self):
return "{self.__class__.__name__}({self.options})".format(self=self)
def _request(self, usi):
url = self.url_template.format(usi=usi, **self.options)
req = Request(url)
response = urlopen(req)
if response.getcode() != 200:
raise ValueError("PROXI Service Response Code %r" % (response.getcode()))
data = response.read().decode("utf-8")
data = json.loads(data)
return data
def get(self, usi):
'''Retrieve a ``USI`` from the host PROXI service over the network.
Parameters
----------
usi : str or :class:`USI`
The universal spectrum identifier to retrieve.
Returns
-------
dict:
The spectrum as represented by the requested PROXI host.
'''
data = self._request(usi)
result = self._coerce(data)
return result
def _coerce(self, data):
'''Override and extend this method to change how the spectrum information is refined.
This implementation just deals with properly formatting the peak arrays and doing minor
cosmetic name normalization.
Parameters
----------
data: dict
The raw mzSpecML representation parsed from JSON
Returns
-------
dict:
The coerced spectrum data of appropriate types
'''
if isinstance(data, list):
data_collection = data
data = data_collection[0]
result = {}
result['attributes'] = data.pop('attributes', [])
for attrib in result['attributes']:
if 'value' in attrib and isinstance(attrib['value'], str) and attrib['value'][0].isdigit():
try:
attrib['value'] = cast_numeric(attrib['value'])
except TypeError:
continue
result['m/z array'] = coerce_array(data.pop('mzs', []))
result['intensity array'] = coerce_array(data.pop('intensities', []))
for key, value in data.items():
if key in result:
raise ValueError(
"Attempting to set explicit value for {key!r}".format(key=key))
result[key] = value
return result
def __call__(self, usi):
return self.get(usi)
class PeptideAtlasBackend(_PROXIBackend):
_url_template = "http://www.peptideatlas.org/api/proxi/v{version}/spectra?resultType=full&usi={usi!s}"
def __init__(self, **kwargs):
super(PeptideAtlasBackend, self).__init__(
'PeptideAtlas', self._url_template, **kwargs)
class MassIVEBackend(_PROXIBackend):
_url_template = "http://massive.ucsd.edu/ProteoSAFe/proxi/v{version}/spectra?resultType=full&usi={usi}"
def __init__(self, **kwargs):
super(MassIVEBackend, self).__init__(
'MassIVE', self._url_template, **kwargs)
class PRIDEBackend(_PROXIBackend):
_url_template = "http://www.ebi.ac.uk/pride/proxi/archive/v{version}/spectra?resultType=full&usi={usi}"
def __init__(self, **kwargs):
super(PRIDEBackend, self).__init__(
'PRIDE', self._url_template, **kwargs)
class JPOSTBackend(_PROXIBackend):
_url_template = 'https://repository.jpostdb.org/proxi/spectra?resultType=full&usi={usi}'
def __init__(self, **kwargs):
super(JPOSTBackend, self).__init__('jPOST', self._url_template, **kwargs)
kwargs.pop("version", None)
class ProteomeExchangeBackend(_PROXIBackend):
_url_template = 'http://proteomecentral.proteomexchange.org/api/proxi/v{version}/spectra?resultType=full&usi={usi!s}'
def __init__(self, **kwargs):
super(ProteomeExchangeBackend, self).__init__(
'ProteomeExchange', self._url_template, **kwargs)
class PROXIAggregator(object):
'''Aggregate across requests across multiple PROXI servers.
Will attempt to coalesce responses from responding servers into a single spectrum
representation.
Attributes
----------
backends : :class:`dict` mapping :class:`str` to :class:`_PROXIBackend`
The backend servers to query. Defaults to the set of all available backends.
n_threads : int
The number of threads to run concurrently to while making requests. Defaults
to the number of servers to query.
timeout : float
The number of seconds to wait for a response.
ephemeral_pool : bool
Whether or not to tear down the thread pool between requests.
'''
_coalesce_resolution_methods = ("first", )
def __init__(self, backends=None, n_threads=None, timeout=15, merge=True, ephemeral_pool=True, **kwargs):
if backends is None:
backends = {k: v() for k, v in _proxies.items()}
if n_threads is None:
n_threads = len(backends)
self.lock = threading.RLock()
self.timeout = timeout
self.backends = backends
self.n_threads = n_threads
self.ephemeral_pool = ephemeral_pool
self.pool = None
self.merge = merge
def _init_pool(self):
if ThreadPool is None:
return False
if self.pool is not None:
return True
with self.lock:
if self.pool is None:
self.pool = ThreadPool(self.n_threads)
return True
def _clean_up_pool(self):
if self.pool:
self.pool.close()
self.pool.terminate()
self.pool = None
def _fetch_usi(self, usi):
use_pool = self._init_pool()
agg = []
if use_pool:
with self.lock:
for backend in self.backends.values():
result = self.pool.apply_async(backend.get, (usi, ))
agg.append((backend, result))
tmp = []
for backend, res in agg:
try:
res = res.get(self.timeout)
tmp.append((backend, res))
except (multiprocessing.TimeoutError, Exception) as err:
tmp.append((backend, err))
agg = tmp
if self.ephemeral_pool:
self._clean_up_pool()
else:
for backend in self.backends.values():
try:
agg.append(backend, backend.get(usi))
except Exception as err:
agg.append((backend, err))
continue
return agg
def coalesce(self, responses, method='first'):
'''Merge responses from disparate servers into a single spectrum representation.
The merging process will use the first of every array encountered, and all unique
attributes.
Parameters
----------
responses : list
A list of response values, pairs (:class:`_PROXIBackend` and either
:class:`dict` or :class:`Exception`).
method : str
The name of the coalescence technique to use. Currently only "first" is
supported.
Returns
-------
result : :class:`dict`
The coalesced spectrum
'''
if method not in self._coalesce_resolution_methods:
raise ValueError("Coalescence method %r not recognized" % (method, ))
def collapse_attribute(values):
try:
acc = list({v.get('value', '') for v in values})
except TypeError:
acc = []
for v in values:
if v['value'] not in acc:
acc.append(v['value'])
result = []
template = values[0].copy()
for v in acc:
t = template.copy()
t['value'] = v
result.append(t)
return result
arrays = {}
attributes = defaultdict(list)
found = []
error = []
for backend, response in responses:
if isinstance(response, Exception):
error.append((backend.name, (response)))
continue
else:
found.append(backend.name)
for array_name in ('m/z array', 'intensity array'):
if array_name not in arrays:
arrays[array_name] = response[array_name]
else:
array = response[array_name]
if len(array) != len(arrays[array_name]):
warnings.warn("Length mismatch from %s for %s" %
(backend.name, array_name))
arrays[array_name] = max((array, arrays[array_name]), key=len)
elif not np.allclose(array, arrays[array_name]):
warnings.warn("Value mismatch from %s for %s" %
(backend.name, array_name))
for attr in response['attributes']:
attributes[attr.get('accession', attr.get('name'))].append(attr)
finalized_attributes = []
for k, v in attributes.items():
finalized_attributes.extend(collapse_attribute(v))
result = {"responders": found, 'errors': error, 'attributes': finalized_attributes}
result.update(arrays)
if 'm/z array' not in result:
raise ValueError("No valid responses found")
return result
def tag_with_source(self, responses):
'''Mark each response with it's source.
Parameters
----------
responses : list
A list of response values, pairs (:class:`_PROXIBackend` and either
:class:`dict` or :class:`Exception`).
Returns
-------
result : list[dict]
The tagged :class:`dict` for each response.
'''
output = []
for backend, response in responses:
if isinstance(response, dict):
response['source'] = backend
else:
response = {
"source": backend,
"error": response
}
output.append(response)
return output
def get(self, usi):
'''Retrieve a ``USI`` from each PROXI service over the network.
Parameters
----------
usi : str or :class:`USI`
The universal spectrum identifier to retrieve.
Returns
-------
result : dict or list[dict]
The spectrum coalesced from all responding PROXI hosts if :attr:`merge` is :const:`True`,
or a list of responses marked by host.
'''
agg = self._fetch_usi(usi)
if self.merge:
return self.coalesce(agg)
else:
return self.tag_with_source(agg)
def __call__(self, usi):
return self.get(usi)
def __del__(self):
self._clean_up_pool()
_proxies = {
"peptide_atlas": PeptideAtlasBackend,
"massive": MassIVEBackend,
"pride": PRIDEBackend,
"jpost": JPOSTBackend,
'proteome_exchange': ProteomeExchangeBackend,
}
default_backend = 'peptide_atlas'
AGGREGATOR_KEY = "aggregator"
AGGREGATOR = PROXIAggregator()
def proxi(usi, backend=default_backend, **kwargs):
'''Retrieve a ``USI`` from a `PROXI <http://www.psidev.info/proxi>`.
Parameters
----------
usi : str or :class:`USI`
The universal spectrum identifier to request.
backend : str or :class:`Callable`
Either the name of a PROXI host (peptide_atlas, massive, pride, jpost, or aggregator),
or a callable object (which :class:`_PROXIBackend` instances are) which will be used
to resolve the USI. The "aggregator" backend will use a :class:`PROXIAggregator` instance
which will request the same USI from all the registered servers and attempt to merge their
responses into a single whole. See :meth:`PROXIAggregator.coalesce` for more details on the
merging process.
**kwargs:
extra arguments passed when constructing the backend by name.
Returns
-------
dict :
The spectrum as represented by the requested PROXI host.
'''
if isinstance(backend, str):
if backend == AGGREGATOR_KEY:
backend = AGGREGATOR
elif backend in _proxies:
backend = _proxies[backend](**kwargs)
else:
raise PyteomicsError("Unknown PROXI backend name: {}.".format(backend))
elif isinstance(backend, type) and issubclass(backend, (_PROXIBackend, PROXIAggregator)):
backend = backend(**kwargs)
elif callable(backend):
backend = backend
else:
raise TypeError("Unrecognized backend type: {0.__name__}".format(type(backend)))
return backend(usi)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/openms/idxml.py | pyteomics/openms/idxml.py | """
idxml - idXML file reader
=========================
Summary
-------
**idXML** is a format specified in the
`OpenMS <http://open-ms.sourceforge.net/about/>`_ project.
It defines a list of peptide identifications.
This module provides a minimalistic way to extract information from idXML
files. You can use the old functional interface (:py:func:`read`) or the new
object-oriented interface (:py:class:`IDXML`) to iterate over entries in
``<PeptideIdentification>`` elements. Note that each entry can contain more than one PSM
(peptide-spectrum match). They are accessible with ``'PeptideHit'`` key.
:py:class:`IDXML` objects also support direct indexing by element ID.
Data access
-----------
:py:class:`IDXML` - a class representing a single idXML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through peptide-spectrum matches in an idXML
file. Data from a single PSM group are converted to a human-readable dict.
Basically creates an :py:class:`IDXML` object and reads it.
:py:func:`chain` - read multiple files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
:py:func:`DataFrame` - read idXML files into a :py:class:`pandas.DataFrame`.
Target-decoy approach
---------------------
:py:func:`filter` - read a chain of idXML files and filter to a certain
FDR using TDA.
:py:func:`filter.chain` - chain a series of filters applied independently to
several files.
:py:func:`filter.chain.from_iterable` - chain a series of filters applied
independently to an iterable of files.
:py:func:`filter_df` - filter idXML files and return a :py:class:`pandas.DataFrame`.
:py:func:`is_decoy` - determine if a "SpectrumIdentificationResult" should be
consiudered decoy.
:py:func:`fdr` - estimate the false discovery rate of a set of identifications
using the target-decoy approach.
:py:func:`qvalues` - get an array of scores and local FDR values for a PSM
set using the target-decoy approach.
Deprecated functions
--------------------
:py:func:`version_info` - get information about idXML version and schema.
You can just read the corresponding attribute of the :py:class:`IDXML`
object.
:py:func:`get_by_id` - get an element by its ID and extract the data from it.
You can just call the corresponding method of the :py:class:`IDXML`
object.
:py:func:`iterfind` - iterate over elements in an idXML file.
You can just call the corresponding method of the :py:class:`IDXML`
object.
Dependencies
------------
This module requires :py:mod:`lxml`.
-------------------------------------------------------------------------------
"""
# Copyright 2020 Lev Levitsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from .. import auxiliary as aux
from .. import xml, _schema_defaults
class IDXML(xml.IndexedXML):
"""Parser class for idXML files."""
file_format = 'idXML'
_root_element = 'IdXML'
_default_schema = _schema_defaults._idxml_schema_defaults
_default_version = '1.5'
_default_iter_tag = 'PeptideIdentification'
_structures_to_flatten = {}
_indexed_tags = {'ProteinHit'}
_schema_location_param = 'noNamespaceSchemaLocation'
def __init__(self, *args, **kwargs):
kwargs.setdefault('retrieve_refs', True)
super(IDXML, self).__init__(*args, **kwargs)
def _get_info_smart(self, element, **kwargs):
"""Extract the info in a smart way depending on the element type"""
name = xml._local_name(element)
kwargs = dict(kwargs)
rec = kwargs.pop("recursive", None)
# Try not to recursively unpack the root element
# unless the user really wants to.
if name == self._root_element:
info = self._get_info(element, recursive=(rec if rec is not None else False), **kwargs)
else:
info = self._get_info(element, recursive=(rec if rec is not None else True), **kwargs)
for k in ['start', 'end']:
v = info.get(k)
if isinstance(v, list) and len(v) == 2:
info[k] = [int(x) for x in v[0].split()]
for k in ['aa_before', 'aa_after']:
if k in info:
info[k] = info[k].split()
return info
def _retrieve_refs(self, info, **kwargs):
"""Retrieves and embeds the data for each attribute in `info` that
ends in _ref. Removes the id attribute from `info`"""
for k, v in dict(info).items():
if k[-5:] == '_refs':
try:
by_id = [self.get_by_id(x, retrieve_refs=True) for x in v.split()]
except KeyError:
warnings.warn('Ignoring unresolved reference: ' + v)
else:
for x in by_id:
x.pop('id', None)
info[k[:-5]] = by_id
del info[k]
def read(source, **kwargs):
"""Parse `source` and iterate through peptide-spectrum matches.
.. note:: This function is provided for backward compatibility only.
It simply creates an :py:class:`IDXML` instance using
provided arguments and returns it.
Parameters
----------
source : str or file
A path to a target IDXML file or the file object itself.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`True`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the IDXML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
build_id_cache : bool, optional
Defines whether a cache of element IDs should be built and stored on the
created :py:class:`IDXML` instance. Default value is the value of
`retrieve_refs`.
.. note:: This parameter is ignored when ``use_index`` is ``True`` (default).
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
the indexed elements. If :py:const:`True` (default), `build_id_cache` is ignored.
indexed_tags : container of bytes, optional
Defines which elements need to be indexed. Empty set by default.
Returns
-------
out : IDXML
An iterator over the dicts with PSM properties.
"""
kwargs = kwargs.copy()
kwargs.setdefault('retrieve_refs', True)
kwargs['build_id_cache'] = kwargs.get('build_id_cache', kwargs.get('retrieve_refs'))
return IDXML(source, **kwargs)
def iterfind(source, path, **kwargs):
"""Parse `source` and yield info on elements with specified local
name or by specified "XPath".
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`iterfind` calls on one file, you should
create an :py:class:`IDXML` object and use its
:py:meth:`!iterfind` method.
Parameters
----------
source : str or file
File name or file-like object.
path : str
Element name or XPath-like expression. Only local names separated
with slashes are accepted. An asterisk (`*`) means any element.
You can specify a single condition in the end, such as:
``"/path/to/element[some_value>1.5]"``
Note: you can do much more powerful filtering using plain Python.
The path can be absolute or "free". Please don't specify
namespaces.
recursive : bool, optional
If :py:const:`False`, subelements will not be processed when
extracting info from elements. Default is :py:const:`True`.
retrieve_refs : bool, optional
If :py:const:`True`, additional information from references will be
automatically added to the results. The file processing time will
increase. Default is :py:const:`False`.
iterative : bool, optional
Specifies whether iterative XML parsing should be used. Iterative
parsing significantly reduces memory usage and may be just a little
slower. When `retrieve_refs` is :py:const:`True`, however, it is
highly recommended to disable iterative parsing if possible.
Default value is :py:const:`True`.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the IDXML header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
build_id_cache : bool, optional
Defines whether a cache of element IDs should be built and stored on the
created :py:class:`IDXML` instance. Default value is the value of
`retrieve_refs`.
Returns
-------
out : iterator
"""
kwargs = kwargs.copy()
kwargs['build_id_cache'] = kwargs.get('build_id_cache', kwargs.get('retrieve_refs'))
return IDXML(source, **kwargs).iterfind(path, **kwargs)
version_info = xml._make_version_info(IDXML)
def get_by_id(source, elem_id, **kwargs):
"""Parse `source` and return the element with `id` attribute equal
to `elem_id`. Returns :py:const:`None` if no such element is found.
.. note:: This function is provided for backward compatibility only.
If you do multiple :py:func:`get_by_id` calls on one file, you should
create an :py:class:`IDXML` object and use its
:py:meth:`!get_by_id` method.
Parameters
----------
source : str or file
A path to a target mzIdentML file of the file object itself.
elem_id : str
The value of the `id` attribute to match.
Returns
-------
out : :py:class:`dict` or :py:const:`None`
"""
return IDXML(source, **kwargs).get_by_id(elem_id, **kwargs)
chain = aux.ChainBase._make_chain(IDXML)
def is_decoy(psm, prefix=None):
"""Given a PSM dict, return :py:const:`True` if it is marked as decoy,
and :py:const:`False` otherwise.
Parameters
----------
psm : dict
A dict, as yielded by :py:func:`read`.
prefix : ignored
Returns
-------
out : bool
"""
return psm['PeptideHit'][0]['target_decoy'] == 'decoy'
def DataFrame(*args, **kwargs):
"""Read idXML files into a :py:class:`pandas.DataFrame`.
Requires :py:mod:`pandas`.
.. warning :: Only the first 'PeptideHit' element is considered in every 'PeptideIdentification'.
Parameters
----------
*args
Passed to :py:func:`chain`
**kwargs
Passed to :py:func:`chain`
sep : str or None, keyword only, optional
Some values related to PSMs (such as protein information) are variable-length
lists. If `sep` is a :py:class:`str`, they will be packed into single string using
this delimiter. If `sep` is :py:const:`None`, they are kept as lists. Default is
:py:const:`None`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
data = []
sep = kwargs.pop('sep', None)
with chain(*args, **kwargs) as f:
for item in f:
info = {}
for k, v in item.items():
if isinstance(v, (str, int, float)):
info[k] = v
peptide_hit = item.get('PeptideHit', [None])[0]
if peptide_hit is not None:
info.update((k, v) for k, v in peptide_hit.items() if isinstance(v, (str, int, float)))
protein = peptide_hit.get('protein')
if protein:
accessions, isd, starts, ends, scores, aa_bs, aa_as = [], [], [], [], [], [], []
for d, start, end, aab, aaa in zip(protein, peptide_hit['start'], peptide_hit['end'], peptide_hit['aa_before'], peptide_hit['aa_after']):
accessions.append(d.get('accession'))
isd.append(d.get('target_decoy'))
scores.append(d.get('score'))
starts.append(start)
ends.append(end)
aa_bs.append(aab)
aa_as.append(aaa)
isd = all(x == 'decoy' for x in isd)
if sep is not None:
if all(isinstance(acc, str) for acc in accessions):
accessions = sep.join(accessions)
if all(isinstance(aaa, str) for aaa in aa_as):
aa_as = sep.join(aa_as)
if all(isinstance(aab, str) for aab in aa_bs):
aa_bs = sep.join(aa_bs)
if all(acc is None for acc in accessions):
accessions = None
info.update((k, v) for k, v in protein[0].items() if isinstance(v, (str, int, float, list)))
info['accession'] = accessions
info['is decoy'] = isd
info['start'] = starts
info['end'] = ends
info['aa_before'] = aa_bs
info['aa_after'] = aa_as
data.append(info)
df = pd.DataFrame(data)
return df
def filter_df(*args, **kwargs):
"""Read idXML files or DataFrames and return a :py:class:`DataFrame` with filtered PSMs.
Positional arguments can be idXML files or DataFrames.
Requires :py:mod:`pandas`.
.. warning :: Only the first 'PeptideHit' element is considered in every 'PeptideIdentification'.
Parameters
----------
key : str / iterable / callable, keyword only, optional
Peptide identification score. Default is 'score'. You will probably need to change it.
is_decoy : str / iterable / callable, keyword only, optional
Default is 'is decoy'.
*args
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
**kwargs
Passed to :py:func:`auxiliary.filter` and/or :py:func:`DataFrame`.
Returns
-------
out : pandas.DataFrame
"""
import pandas as pd
kwargs.setdefault('key', 'score')
if all(isinstance(arg, pd.DataFrame) for arg in args):
df = pd.concat(args)
else:
df = DataFrame(*args, **kwargs)
if 'is_decoy' not in kwargs:
kwargs['is_decoy'] = 'is decoy'
return aux.filter(df, **kwargs)
fdr = aux._make_fdr(is_decoy, None)
_key = lambda x: x['PeptideHit'][0]['score']
qvalues = aux._make_qvalues(chain, is_decoy, None, _key)
filter = aux._make_filter(chain, is_decoy, None, _key, qvalues)
filter.chain = aux._make_chain(filter, 'filter', True)
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
levitsky/pyteomics | https://github.com/levitsky/pyteomics/blob/17e7f6bad7d273846de0a398a3870afe10c74be6/pyteomics/openms/featurexml.py | pyteomics/openms/featurexml.py | """
featurexml - reader for featureXML files
========================================
Summary
-------
**featureXML** is a format specified in the
`OpenMS <http://open-ms.sourceforge.net/about/>`_ project.
It defines a list of LC-MS features observed in an experiment.
This module provides a minimalistic way to extract information from **featureXML**
files. You can use the old functional interface (:py:func:`read`) or the new
object-oriented interface (:py:class:`FeatureXML`)
to iterate over entries in ``<feature>`` elements.
:py:class:`FeatureXML` also supports direct indexing with feature IDs.
Data access
-----------
:py:class:`FeatureXML` - a class representing a single featureXML file.
Other data access functions use this class internally.
:py:func:`read` - iterate through features in a featureXML file. Data from a
single feature are converted to a human-readable dict.
:py:func:`chain` - read multiple featureXML files at once.
:py:func:`chain.from_iterable` - read multiple files at once, using an
iterable of files.
Dependencies
------------
This module requres :py:mod:`lxml`.
--------------------------------------------------------------------------------
"""
from .. import xml, auxiliary as aux, _schema_defaults, version
class FeatureXML(xml.MultiProcessingXML):
"""Parser class for featureXML files."""
file_format = 'featureXML'
_root_element = 'featureMap'
_default_schema = _schema_defaults._featurexml_schema_defaults
_default_version = '1.9'
_default_iter_tag = 'feature'
_structures_to_flatten = {}
_indexed_tags = {'feature'}
_schema_location_param = 'noNamespaceSchemaLocation'
_offending_keys = {'ints': {
('PeptideIdentification', 'spectrum_reference'),
('UnassignedPeptideIdentification', 'spectrum_reference'),
('quality', 'quality')
}}
_missing_keys = {'floats': {('quality', 'quality')}}
def _get_info_smart(self, element, **kw):
kw['recursive'] = kw.get('recursive', True)
info = self._get_info(element, **kw)
return info
@xml._keepstate
def _get_schema_info(self, read_schema=True):
schema_info = super(FeatureXML, self)._get_schema_info(read_schema)
if not read_schema:
return schema_info
file_version, schema = self.version_info
if version.VersionInfo(file_version) < version.VersionInfo(self._default_version):
for k, s in self._offending_keys.items():
if k in schema_info:
for elem in s:
try:
schema_info[k].remove(elem)
except KeyError:
pass
for t, s in self._missing_keys.items():
schema_info.setdefault(t, set()).update(s)
return schema_info
def read(source, read_schema=True, iterative=True, use_index=False):
"""Parse `source` and iterate through features.
Parameters
----------
source : str or file
A path to a target featureXML file or the file object itself.
read_schema : bool, optional
If :py:const:`True`, attempt to extract information from the XML schema
mentioned in the file header (default). Otherwise, use default
parameters. Disable this to avoid waiting on slow network connections or
if you don't like to get the related warnings.
iterative : bool, optional
Defines whether iterative parsing should be used. It helps reduce
memory usage at almost the same parsing speed. Default is
:py:const:`True`.
use_index : bool, optional
Defines whether an index of byte offsets needs to be created for
spectrum elements. Default is :py:const:`False`.
Returns
-------
out : iterator
An iterator over the dicts with feature properties.
"""
return FeatureXML(source, read_schema=read_schema, iterative=iterative, use_index=use_index)
chain = aux._make_chain(read, 'read')
| python | Apache-2.0 | 17e7f6bad7d273846de0a398a3870afe10c74be6 | 2026-01-05T07:13:10.794267Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.