func_code_string stringlengths 52 1.94M | func_documentation_string stringlengths 1 47.2k |
|---|---|
def __clientDefer(self, c):
def handle_headers(r):
self.gotHeaders(c.response_headers)
return r
return c.deferred.addBoth(handle_headers) | Return a deferred for a HTTP client, after handling incoming headers |
def __doDownloadPage(self, *args, **kwargs):
logger.debug("download page: %r, %r", args, kwargs)
return self.__clientDefer(downloadPage(*args, **kwargs)) | Works like client.downloadPage(), but handle incoming headers |
def verify_credentials(self, delegate=None):
"Verify a user's credentials."
parser = txml.Users(delegate)
return self.__downloadPage('/account/verify_credentials.xml', parser) | Verify a user's credentials. |
def update(self, status, source=None, params={}):
"Update your status. Returns the ID of the new post."
params = params.copy()
params['status'] = status
if source:
params['source'] = source
return self.__parsed_post(self.__post('/statuses/update.xml', params),
... | Update your status. Returns the ID of the new post. |
def retweet(self, id, delegate):
parser = txml.Statuses(delegate)
return self.__postPage('/statuses/retweet/%s.xml' % (id), parser) | Retweet a post
Returns the retweet status info back to the given delegate |
def friends(self, delegate, params={}, extra_args=None):
return self.__get('/statuses/friends_timeline.xml', delegate, params,
txml.Statuses, extra_args=extra_args) | Get updates from friends.
Calls the delgate once for each status object received. |
def home_timeline(self, delegate, params={}, extra_args=None):
return self.__get('/statuses/home_timeline.xml', delegate, params,
txml.Statuses, extra_args=extra_args) | Get updates from friends.
Calls the delgate once for each status object received. |
def user_timeline(self, delegate, user=None, params={}, extra_args=None):
if user:
params['id'] = user
return self.__get('/statuses/user_timeline.xml', delegate, params,
txml.Statuses, extra_args=extra_args) | Get the most recent updates for a user.
If no user is specified, the statuses for the authenticating user are
returned.
See search for example of how results are returned. |
def public_timeline(self, delegate, params={}, extra_args=None):
"Get the most recent public timeline."
return self.__get('/statuses/public_timeline.atom', delegate, params,
extra_args=extra_args) | Get the most recent public timeline. |
def direct_messages(self, delegate, params={}, extra_args=None):
return self.__get('/direct_messages.xml', delegate, params,
txml.Direct, extra_args=extra_args) | Get direct messages for the authenticating user.
Search results are returned one message at a time a DirectMessage
objects |
def send_direct_message(self, text, user=None, delegate=None, screen_name=None, user_id=None, params={}):
params = params.copy()
if user is not None:
params['user'] = user
if user_id is not None:
params['user_id'] = user_id
if screen_name is not None:
... | Send a direct message |
def replies(self, delegate, params={}, extra_args=None):
return self.__get('/statuses/replies.atom', delegate, params,
extra_args=extra_args) | Get the most recent replies for the authenticating user.
See search for example of how results are returned. |
def follow_user(self, user, delegate):
parser = txml.Users(delegate)
return self.__postPage('/friendships/create/%s.xml' % (user), parser) | Follow the given user.
Returns the user info back to the given delegate |
def unfollow_user(self, user, delegate):
parser = txml.Users(delegate)
return self.__postPage('/friendships/destroy/%s.xml' % (user), parser) | Unfollow the given user.
Returns the user info back to the given delegate |
def list_friends(self, delegate, user=None, params={}, extra_args=None, page_delegate=None):
if user:
url = '/statuses/friends/' + user + '.xml'
else:
url = '/statuses/friends.xml'
return self.__get_maybe_paging(url, delegate, params, txml.PagedUserList, extra_ar... | Get the list of friends for a user.
Calls the delegate with each user object found. |
def show_user(self, user):
url = '/users/show/%s.xml' % (user)
d = defer.Deferred()
self.__downloadPage(url, txml.Users(lambda u: d.callback(u))) \
.addErrback(lambda e: d.errback(e))
return d | Get the info for a specific user.
Returns a delegate that will receive the user in a callback. |
def search(self, query, delegate, args=None, extra_args=None):
if args is None:
args = {}
args['q'] = query
return self.__doDownloadPage(self.search_url + '?' + self._urlencode(args),
txml.Feed(delegate, extra_args), agent=self.agent) | Perform a search query.
Results are given one at a time to the delegate. An example delegate
may look like this:
def exampleDelegate(entry):
print entry.title |
def startService(self):
service.Service.startService(self)
self._toState('idle')
try:
self.connect()
except NoConsumerError:
pass | Start the service.
This causes a transition to the C{'idle'} state, and then calls
L{connect} to attempt an initial conection. |
def connect(self, forceReconnect=False):
if self._state == 'stopped':
raise Error("This service is not running. Not connecting.")
if self._state == 'connected':
if forceReconnect:
self._toState('disconnecting')
return True
else... | Check current conditions and initiate connection if possible.
This is called to check preconditions for starting a new connection,
and initating the connection itself.
If the service is not running, this will do nothing.
@param forceReconnect: Drop an existing connection to reconnnect... |
def makeConnection(self, protocol):
self._errorState = None
def cb(result):
self.protocol = None
if self._state == 'stopped':
# Don't transition to any other state. We are stopped.
pass
else:
if isinstance(resul... | Called when the connection has been established.
This method is called when an HTTP 200 response has been received,
with the protocol that decodes the individual Twitter stream elements.
That protocol will call the consumer for all Twitter entries received.
The protocol, stored in L{pr... |
def _reconnect(self, errorState):
def connect():
if self.noisy:
log.msg("Reconnecting now.")
self.connect()
backOff = self.backOffs[errorState]
if self._errorState != errorState or self._delay is None:
self._errorState = errorState
... | Attempt to reconnect.
If the current back-off delay is 0, L{connect} is called. Otherwise,
it will cause a transition to the C{'waiting'} state, ultimately
causing a call to L{connect} when the delay expires. |
def _toState(self, state, *args, **kwargs):
try:
method = getattr(self, '_state_%s' % state)
except AttributeError:
raise ValueError("No such state %r" % state)
log.msg("%s: to state %r" % (self.__class__.__name__, state))
self._state = state
meth... | Transition to the next state.
@param state: Name of the next state. |
def _state_stopped(self):
if self._reconnectDelayedCall:
self._reconnectDelayedCall.cancel()
self._reconnectDelayedCall = None
self.loseConnection() | The service is not running.
This is the initial state, and the state after L{stopService} was
called. To get out of this state, call L{startService}. If there is a
current connection, we disconnect. |
def _state_connecting(self):
def responseReceived(protocol):
self.makeConnection(protocol)
if self._state == 'aborting':
self._toState('disconnecting')
else:
self._toState('connected')
def trapError(failure):
self._... | A connection is being started.
A succesful attempt results in the state C{'connected'} when the
first response from Twitter has been received. Transitioning
to the state C{'aborting'} will cause an immediate disconnect instead,
by transitioning to C{'disconnecting'}.
Errors wil... |
def _state_error(self, reason):
log.err(reason)
def matchException(failure):
for errorState, backOff in self.backOffs.iteritems():
if 'errorTypes' not in backOff:
continue
if failure.check(*backOff['errorTypes']):
... | The connection attempt resulted in an error.
Attempt a reconnect with a back-off algorithm. |
def lineReceived(self, line):
if line and line.isdigit():
self._expectedLength = int(line)
self._rawBuffer = []
self._rawBufferLength = 0
self.setRawMode()
else:
self.keepAliveReceived() | Called when a line is received.
We expect a length in bytes or an empty line for keep-alive. If
we got a length, switch to raw mode to receive that amount of bytes. |
def rawDataReceived(self, data):
self._rawBuffer.append(data)
self._rawBufferLength += len(data)
if self._rawBufferLength >= self._expectedLength:
receivedData = ''.join(self._rawBuffer)
expectedData = receivedData[:self._expectedLength]
extraData = r... | Called when raw data is received.
Fill the raw buffer C{_rawBuffer} until we have received at least
C{_expectedLength} bytes. Call C{datagramReceived} with the received
byte string of the expected size. Then switch back to line mode with
the remainder of the buffer. |
def fromDict(cls, data):
obj = cls()
obj.raw = data
for name, value in data.iteritems():
if cls.SIMPLE_PROPS and name in cls.SIMPLE_PROPS:
setattr(obj, name, value)
elif cls.COMPLEX_PROPS and name in cls.COMPLEX_PROPS:
value = cls.... | Fill this objects attributes from a dict for known properties. |
def datagramReceived(self, data):
try:
obj = json.loads(data)
except ValueError, e:
log.err(e, 'Invalid JSON in stream: %r' % data)
return
if u'text' in obj:
obj = Status.fromDict(obj)
else:
log.msg('Unsupported object ... | Decode the JSON-encoded datagram and call the callback. |
def connectionLost(self, reason):
self.setTimeout(None)
if reason.check(ResponseDone, PotentialDataLoss):
self.deferred.callback(None)
else:
self.deferred.errback(reason) | Called when the body is complete or the connection was lost.
@note: As the body length is usually not known at the beginning of the
response we expect a L{PotentialDataLoss} when Twitter closes the
stream, instead of L{ResponseDone}. Other exceptions are treated
as error conditions. |
def simpleListFactory(list_type):
def create(delegate, extra_args=None):
return listParser(list_type, delegate, extra_args)
return create | Used for simple parsers that support only one type of object |
def setSubDelegates(self, namelist, before=None, after=None):
if len(namelist) > 1:
def set_sub(i):
i.setSubDelegates(namelist[1:], before, after)
self.setBeforeDelegate(namelist[0], set_sub)
elif len(namelist) == 1:
self.setDelegate(namelist[... | Set a delegate for a sub-sub-item, according to a list of names |
def _split_path(path):
path = path.strip('/')
list_path = path.split('/')
sentinel = list_path.pop(0)
return sentinel, list_path, path | split a path return by the api
return
- the sentinel:
- the rest of the path as a list.
- the original path stripped of / for normalisation. |
def path_dispatch_rename(rename_like_method):
def _wrapper_method(self, old_path, new_path):
old_path, _old_path, old_sentinel = _split_path(old_path);
new_path, _new_path, new_sentinel = _split_path(new_path);
if old_sentinel != new_sentinel:
raise... | decorator for rename-like function, that need dispatch on 2 arguments |
def deactivate(profile='default'):
with jconfig(profile) as config:
deact = True;
if not getattr(config.NotebookApp.contents_manager_class, 'startswith',lambda x:False)('jupyterdrive'):
deact=False
if 'gdrive' not in getattr(config.NotebookApp.tornado_settings,'get', lambda ... | should be a matter of just unsetting the above keys |
def sequence_type(seq):
if isinstance(seq, coral.DNA):
material = 'dna'
elif isinstance(seq, coral.RNA):
material = 'rna'
elif isinstance(seq, coral.Peptide):
material = 'peptide'
else:
raise ValueError('Input was not a recognized coral.sequence object.')
return ... | Validates a coral.sequence data type.
:param sequence_in: input DNA sequence.
:type sequence_in: any
:returns: The material - 'dna', 'rna', or 'peptide'.
:rtype: str
:raises: ValueError |
def pcr(template, primer1, primer2, min_tm=50.0, min_primer_len=14):
# Find match in top or bottom strands for each primer
p1_matches = coral.analysis.anneal(template, primer1, min_tm=min_tm,
min_len=min_primer_len)
p2_matches = coral.analysis.anneal(template, pri... | Simulate a PCR.
:param template: DNA template from which to PCR.
:type template: coral.DNA
:param primer1: First PCR primer.
:type primer1: coral.Primer
:param primer2: First PCR primer.
:type primer2: coral.Primer
:param min_tm: Minimum melting temperature (Tm) at which primers must bind
... |
def digest(dna, restriction_enzyme):
pattern = restriction_enzyme.recognition_site
located = dna.locate(pattern)
if not located[0] and not located[1]:
return [dna]
# Bottom strand indices are relative to the bottom strand 5' end.
# Convert to same type as top strand
pattern_len = le... | Restriction endonuclease reaction.
:param dna: DNA template to digest.
:type dna: coral.DNA
:param restriction_site: Restriction site to use.
:type restriction_site: RestrictionSite
:returns: list of digested DNA fragments.
:rtype: coral.DNA list |
def _cut(dna, index, restriction_enzyme):
# TODO: handle case where cut site is outside of recognition sequence,
# for both circular and linear cases where site is at index 0
# Find absolute indices at which to cut
cut_site = restriction_enzyme.cut_site
top_cut = index + cut_site[0]
bottom_... | Cuts template once at the specified index.
:param dna: DNA to cut
:type dna: coral.DNA
:param index: index at which to cut
:type index: int
:param restriction_enzyme: Enzyme with which to cut
:type restriction_enzyme: coral.RestrictionSite
:returns: 2-element list of digested sequence, incl... |
def ipynb_to_rst(directory, filename):
print(filename)
os.chdir(directory)
subprocess.Popen(["ipython", "nbconvert", "--to", "rst",
filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory) | Converts a given file in a directory to an rst in the same directory. |
def convert_ipynbs(directory):
# The ipython_examples dir has to be in the same dir as this script
for root, subfolders, files in os.walk(os.path.abspath(directory)):
for f in files:
if ".ipynb_checkpoints" not in root:
if f.endswith("ipynb"):
ipynb_t... | Recursively converts all ipynb files in a directory into rst files in
the same directory. |
def _context_walk(dna, window_size, context_len, step):
# Generate window indices
window_start_ceiling = len(dna) - context_len - window_size
window_starts = range(context_len - 1, window_start_ceiling, step)
window_ends = [start + window_size for start in window_starts]
# Generate left and rig... | Generate context-dependent 'non-boundedness' scores for a DNA sequence.
:param dna: Sequence to score.
:type dna: coral.DNA
:param window_size: Window size in base pairs.
:type window_size: int
:param context_len: The number of bases of context to use when analyzing
each win... |
def windows(self, window_size=60, context_len=90, step=10):
self.walked = _context_walk(self.template, window_size, context_len,
step)
self.core_starts, self.core_ends, self.scores = zip(*self.walked)
return self.walked | Walk through the sequence of interest in windows of window_size,
evaluate free (unbound) pair probabilities.
:param window_size: Window size in base pairs.
:type window_size: int
:param context_len: The number of bases of context to use when
analyzing each wi... |
def plot(self):
try:
from matplotlib import pylab
except ImportError:
raise ImportError('Optional dependency matplotlib not installed.')
if self.walked:
fig = pylab.figure()
ax1 = fig.add_subplot(111)
ax1.plot(self.core_starts,... | Plot the results of the run method. |
def anneal(template, primer, min_tm=50.0, min_len=10):
# TODO: add possibility for primer basepair mismatch
if len(primer) < min_len:
msg = 'Primer length is shorter than min_len argument.'
raise PrimerLengthError(msg)
if len(template) < min_len:
msg = 'Template is shorter than ... | Simulates a primer binding event. Will find the maximum subset
of bases in the primer that binds to the template, including overhang
sequences. **Note**: Primer binding locations indicate the 3' end of the
primer, not the begining of the annealing sequence.
:param template: DNA template for which to bi... |
def primer(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
end_gc=False, tm_parameters='cloning', overhang=None,
structure=False):
# Check Tm of input sequence to see if it's already too low
seq_tm = coral.analysis.tm(dna, parameters=tm_parameters)
if seq_tm < (tm - tm_un... | Design primer to a nearest-neighbor Tm setpoint.
:param dna: Sequence for which to design a primer.
:type dna: coral.DNA
:param tm: Ideal primer Tm in degrees C.
:type tm: float
:param min_len: Minimum primer length.
:type min_len: int
:param tm_undershoot: Allowed Tm undershoot.
:type ... |
def primers(dna, tm=65, min_len=10, tm_undershoot=1, tm_overshoot=3,
end_gc=False, tm_parameters='cloning', overhangs=None,
structure=False):
if not overhangs:
overhangs = [None, None]
templates = [dna, dna.reverse_complement()]
primer_list = []
for template, overhan... | Design primers for PCR amplifying any arbitrary sequence.
:param dna: Input sequence.
:type dna: coral.DNA
:param tm: Ideal primer Tm in degrees C.
:type tm: float
:param min_len: Minimum primer length.
:type min_len: int
:param tm_undershoot: Allowed Tm undershoot.
:type tm_undershoot:... |
def assemble_oligos(dna_list, reference=None):
# FIXME: this protocol currently only supports 5' ends on the assembly
# Find all matches for every oligo. If more than 2 per side, error.
# Self-oligo is included in case the 3' end is self-complementary.
# 1) Find all unique 3' binders (and non-binde... | Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur fr... |
def bind_unique(reference, query_list, min_overlap=12, right=True):
size = min_overlap
found = []
# Reverse complementing here provides massive speedup?
rev_query = [seq.reverse_complement() for seq in query_list]
while not found and not size > len(reference):
for i, seq in enumerate(re... | (5' or 3' region on reference sequence that uniquely matches the reverse
complement of the associated (5' or 3') region of one sequence in a list of
query sequences.
:param reference: Reference sequence.
:type reference: coral.DNA
:param query_list: List of query sequences.
:type query_list: co... |
def nonmatches(self):
# For every result, keep a dictionary of mismatches, insertions, and
# deletions
report = []
for result in self.aligned_results:
report.append(self._analyze_single(self.aligned_reference, result))
return report | Report mismatches, indels, and coverage. |
def plot(self):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Constants to use throughout drawing
n = len(self.results)
nbases = len(self.aligned_reference)
barheight = 0.4
# Vary height of figure based on number of results
... | Make a summary plot of the alignment and highlight nonmatches. |
def _analyze_single(self, reference, result):
# TODO: Recalculate coverage based on reference (e.g. sequencing result
# longer than template
reference_str = str(reference)
result_str = str(result)
report = {'mismatches': [], 'insertions': [], 'deletions': []}
for... | Report mistmatches and indels for a single (aligned) reference and
result. |
def _remove_n(self):
for i, result in enumerate(self.results):
largest = max(str(result).split('N'), key=len)
start = result.locate(largest)[0][0]
stop = start + len(largest)
if start != stop:
self.results[i] = self.results[i][start:stop] | Remove terminal Ns from sequencing results. |
def random_dna(n):
return coral.DNA(''.join([random.choice('ATGC') for i in range(n)])) | Generate a random DNA sequence.
:param n: Output sequence length.
:type n: int
:returns: Random DNA sequence of length n.
:rtype: coral.DNA |
def random_codons(peptide, frequency_cutoff=0.0, weighted=False, table=None):
if table is None:
table = CODON_FREQ_BY_AA['sc']
# Process codon table using frequency_cutoff
new_table = _cutoff(table, frequency_cutoff)
# Select codons randomly or using weighted distribution
rna = ''
f... | Generate randomized codons given a peptide sequence.
:param peptide: Peptide sequence for which to generate randomized
codons.
:type peptide: coral.Peptide
:param frequency_cutoff: Relative codon usage cutoff - codons that
are rarer will not be used. Frequen... |
def _cutoff(table, frequency_cutoff):
new_table = {}
# IDEA: cutoff should be relative to most-frequent codon, not average?
for amino_acid, codons in table.iteritems():
average_cutoff = frequency_cutoff * sum(codons.values()) / len(codons)
new_table[amino_acid] = {}
for codon, f... | Generate new codon frequency table given a mean cutoff.
:param table: codon frequency table of form {amino acid: codon: frequency}
:type table: dict
:param frequency_cutoff: value between 0 and 1.0 for mean frequency cutoff
:type frequency_cutoff: float
:returns: A codon frequency table with some c... |
def fetch_genome(genome_id):
# TODO: Can strandedness by found in fetched genome attributes?
# TODO: skip read/write step?
# Using a dummy email for now - does this violate NCBI guidelines?
email = 'loremipsum@gmail.com'
Entrez.email = email
print 'Downloading Genome...'
handle = Entrez... | Acquire a genome from Entrez |
def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False,
nogu=False, noclosinggu=False, constraints=None,
canonicalbponly=False, partition=-1, pfscale=None, gquad=False):
cmd_args = []
cmd_kwargs = {'--temp=': str(temp)}
cmd_kwargs['--dangles=']... | Run the RNAcofold command and retrieve the result in a dictionary.
:param strand1: Strand 1 for running RNAcofold.
:type strand1: coral.DNA or coral.RNA
:param strand1: Strand 2 for running RNAcofold.
:type strand2: coral.DNA or coral.RNA
:param temp: Temperature at which to run... |
def fold(self, strand, temp=37.0, dangles=2, nolp=False, nogu=False,
noclosinggu=False, constraints=None, canonicalbponly=False,
partition=False, pfscale=None, imfeelinglucky=False, gquad=False):
cmd_args = []
cmd_kwargs = {'--temp=': str(temp)}
cmd_kwargs['--d... | Run the RNAfold command and retrieve the result in a dictionary.
:param strand: The DNA or RNA sequence on which to run RNAfold.
:type strand: coral.DNA or coral.RNA
:param temp: Temperature at which to run the calculations.
:type temp: float
:param dangles: How to treat danglin... |
def dimers(primer1, primer2, concentrations=[5e-7, 3e-11]):
# It is not reasonable (yet) to use a long template for doing these
# computations directly, as NUPACK does an exhaustive calculation and
# would take too long without a cluster.
# Instead, this function compares primer-primer binding to
... | Calculate expected fraction of primer dimers.
:param primer1: Forward primer.
:type primer1: coral.DNA
:param primer2: Reverse primer.
:type primer2: coral.DNA
:param template: DNA template.
:type template: coral.DNA
:param concentrations: list of concentrations for primers and the
... |
def read_dna(path):
filename, ext = os.path.splitext(os.path.split(path)[-1])
genbank_exts = ['.gb', '.ape']
fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
abi_exts = ['.abi', '.ab1']
if any([ext == extension for extension in genbank_exts]):
file_format = 'genbank'
elif any([ext == ... | Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA |
def read_sequencing(directory):
dirfiles = os.listdir(directory)
seq_exts = ['.seq', '.abi', '.ab1']
# Exclude files that aren't sequencing results
seq_paths = [x for x in dirfiles if os.path.splitext(x)[1] in seq_exts]
paths = [os.path.join(directory, x) for x in seq_paths]
sequences = [re... | Read .seq and .abi/.ab1 results files from a dir.
:param directory: Path to directory containing sequencing files.
:type directory: str
:returns: A list of DNA sequences.
:rtype: coral.DNA list |
def write_dna(dna, path):
# Check if path filetype is valid, remember for later
ext = os.path.splitext(path)[1]
if ext == '.gb' or ext == '.ape':
filetype = 'genbank'
elif ext == '.fa' or ext == '.fasta':
filetype = 'fasta'
else:
raise ValueError('Only genbank or fasta f... | Write DNA to a file (genbank or fasta).
:param dna: DNA sequence to write to file
:type dna: coral.DNA
:param path: file path to write. Has to be genbank or fasta file.
:type path: str |
def write_primers(primer_list, path, names=None, notes=None):
# Check for notes and names having the right length, apply them to primers
if names is not None:
if len(names) != len(primer_list):
names_msg = 'Mismatch in number of notes and primers.'
raise PrimerAnnotationErro... | Write a list of primers out to a csv file. The first three columns are
compatible with the current IDT order form (name, sequence, notes). By
default there are no notes, which is an optional parameter.
:param primer_list: A list of primers.
:type primer_list: coral.Primer list
:param path: A path t... |
def _process_feature_type(feature_type, bio_to_coral=True):
err_msg = 'Unrecognized feature type: {}'.format(feature_type)
if bio_to_coral:
try:
name = coral.constants.genbank.TO_CORAL[feature_type]
except KeyError:
raise ValueError(err_msg)
else:
try:
... | Translate genbank feature types into usable ones (currently identical).
The feature table is derived from the official genbank spec (gbrel.txt)
available at http://www.insdc.org/documents/feature-table
:param feature_type: feature to convert
:type feature_type: str
:param bio_to_coral: from coral t... |
def _seqfeature_to_coral(feature):
# Some genomic sequences don't have a label attribute
# TODO: handle genomic cases differently than others. Some features lack
# a label but should still be incorporated somehow.
qualifiers = feature.qualifiers
if 'label' in qualifiers:
feature_name = ... | Convert a Biopython SeqFeature to a coral.Feature.
:param feature: Biopython SeqFeature
:type feature: Bio.SeqFeature |
def _coral_to_seqfeature(feature):
bio_strand = 1 if feature.strand == 1 else -1
ftype = _process_feature_type(feature.feature_type, bio_to_coral=False)
sublocations = []
if feature.gaps:
# There are gaps. Have to define location_operator and add subfeatures
location_operator = 'jo... | Convert a coral.Feature to a Biopython SeqFeature.
:param feature: coral Feature.
:type feature: coral.Feature |
def as_ord_matrix(matrix, alphabet):
ords = [ord(c) for c in alphabet]
ord_matrix = np.zeros((max(ords) + 1, max(ords) + 1), dtype=np.integer)
for i, row_ord in enumerate(ords):
for j, col_ord in enumerate(ords):
ord_matrix[row_ord, col_ord] = matrix[i, j]
return ord_matrix | Given the SubstitutionMatrix input, generate an equivalent matrix that
is indexed by the ASCII number of each residue (e.g. A -> 65). |
def aligner(seqj, seqi, method='global', gap_open=-7, gap_extend=-7,
gap_double=-7, matrix=submat.DNA_SIMPLE.matrix,
alphabet=submat.DNA_SIMPLE.alphabet):
amatrix = as_ord_matrix(matrix, alphabet)
NONE, LEFT, UP, DIAG = range(4) # NONE is 0
max_j = len(seqj)
max_i = len(seq... | Calculates the alignment of two sequences. The global method uses
a global Needleman-Wunsh algorithm, local does a a local
Smith-Waterman alignment, global_cfe does a global alignment with
cost-free ends and glocal does an alignment which is global only with
respect to the shorter sequence, also known a... |
def score_alignment(a, b, gap_open, gap_extend, matrix):
al = a
bl = b
l = len(al)
score = 0
assert len(bl) == l, 'Alignment lengths must be the same'
mat = as_ord_matrix(matrix)
gap_started = 0
for i in range(l):
if al[i] == '-' or bl[i] == '-':
score += gap_ext... | Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open... |
def build_docs(directory):
os.chdir(directory)
process = subprocess.Popen(["make", "html"], cwd=directory)
process.communicate() | Builds sphinx docs from a given directory. |
def gibson_primers(dna1, dna2, overlap='mixed', maxlen=80, overlap_tm=65.0,
insert=None, primer_kwargs=None):
if primer_kwargs is None:
primer_kwargs = {}
# Annealing sequences
# DNA 2 primer is a forward primer
fwd_anneal = coral.design.primer(dna2, **primer_kwargs)
... | Design Gibson primers given two DNA sequences (connect left to right)
:param dna1: First piece of DNA for which to design primers. Once Gibsoned,
would be connected at its right side to dna2.
:type dna1: coral.DNA
:param dna2: First piece of DNA for which to design primers. Once Gibsoned,
... |
def gibson(seq_list, circular=True, overlaps='mixed', overlap_tm=65,
maxlen=80, terminal_primers=True, primer_kwargs=None):
# Input checking
if circular:
n_overlaps = len(seq_list)
else:
n_overlaps = len(seq_list) - 1
if type(overlaps) is str:
overlaps = [overlaps... | Design Gibson primers given a set of sequences
:param seq_list: List of DNA sequences to stitch together
:type seq_list: list containing coral.DNA
:param circular: If true, designs primers for making a circular construct.
If false, designs primers for a linear construct.
:type circ... |
def _decompose(string, n):
binary = [int(x) for x in bin(n)[2:]]
new_string = string
counter = 1
while counter <= len(binary):
if binary[-counter]:
yield new_string
new_string += new_string
counter += 1 | Given string and multiplier n, find m**2 decomposition.
:param string: input string
:type string: str
:param n: multiplier
:type n: int
:returns: generator that produces m**2 * string if m**2 is a factor of n
:rtype: generator of 0 or 1 |
def reverse_complement(sequence, material):
code = dict(COMPLEMENTS[material])
reverse_sequence = sequence[::-1]
return ''.join([code[str(base)] for base in reverse_sequence]) | Reverse complement a sequence.
:param sequence: Sequence to reverse complement
:type sequence: str
:param material: dna, rna, or peptide.
:type material: str |
def check_alphabet(seq, material):
errs = {'dna': 'DNA', 'rna': 'RNA', 'peptide': 'peptide'}
if material == 'dna' or material == 'rna' or material == 'peptide':
alphabet = ALPHABETS[material]
err_msg = errs[material]
else:
msg = 'Input material must be \'dna\', \'rna\', or \'pep... | Verify that a given string is valid DNA, RNA, or peptide characters.
:param seq: DNA, RNA, or peptide sequence.
:type seq: str
:param material: Input material - 'dna', 'rna', or 'pepide'.
:type sequence: str
:returns: Whether the `seq` is a valid string of `material`.
:rtype: bool
:raises: ... |
def process_seq(seq, material):
check_alphabet(seq, material)
seq = seq.upper()
return seq | Validate and process sequence inputs.
:param seq: input sequence
:type seq: str
:param material: DNA, RNA, or peptide
:type: str
:returns: Uppercase version of `seq` with the alphabet checked by
check_alphabet().
:rtype: str |
def palindrome(seq):
seq_len = len(seq)
if seq_len % 2 == 0:
# Sequence has even number of bases, can test non-overlapping seqs
wing = seq_len / 2
l_wing = seq[0: wing]
r_wing = seq[wing:]
if l_wing == r_wing.reverse_complement():
return True
else... | Test whether a sequence is palindrome.
:param seq: Sequence to analyze (DNA or RNA).
:type seq: coral.DNA or coral.RNA
:returns: Whether a sequence is a palindrome.
:rtype: bool |
def copy(self):
# Significant performance improvements by skipping alphabet check
return type(self)(self.seq, self.material, run_checks=False) | Create a copy of the current instance.
:returns: A safely editable copy of the current sequence. |
def locate(self, pattern):
if len(pattern) > len(self):
raise ValueError('Search pattern longer than searchable ' +
'sequence.')
seq = self.seq
pattern = str(pattern).upper()
re_pattern = '(?=' + pattern + ')'
matches = [index.sta... | Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: Indices of pattern matches.
:rtype: list of ints |
def copy(self):
return type(self)(self.name, self.start, self.stop, self.feature_type,
gene=self.gene, locus_tag=self.locus_tag,
qualifiers=self.qualifiers, strand=self.strand) | Return a copy of the Feature.
:returns: A safely editable copy of the current feature.
:rtype: coral.Feature |
def nupack_multi(seqs, material, cmd, arguments, report=True):
nupack_pool = multiprocessing.Pool()
try:
args = [{'seq': seq,
'cmd': cmd,
'material': material,
'arguments': arguments} for seq in seqs]
nupack_iterator = nupack_pool.imap(run_... | Split Nupack commands over processors.
:param inputs: List of sequences, same format as for coral.analysis.Nupack.
:type inpus: list
:param material: Input material: 'dna' or 'rna'.
:type material: str
:param cmd: Command: 'mfe', 'pairs', 'complexes', or 'concentrations'.
:type cmd: str
:pa... |
def run_nupack(kwargs):
run = NUPACK(kwargs['seq'])
output = getattr(run, kwargs['cmd'])(**kwargs['arguments'])
return output | Run picklable Nupack command.
:param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
:returns: Variable - whatever `cmd` returns. |
def pfunc_multi(self, strands, permutation=None, temp=37.0, pseudo=False,
material=None, dangles='some', sodium=1.0, magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strands, material, multi=True)
# Set up c... | Compute the partition function for an ordered complex of strands.
Runs the \'pfunc\' command.
:param strands: List of strands to use as inputs to pfunc -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g... |
def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False,
material=None, dangles='some', sodium=1.0, magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args ... | Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strand: Strand on which to run pairs. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param cutoff: Only probabilities above this cutoff appear... |
def pairs_multi(self, strands, cutoff=0.001, permutation=None, temp=37.0,
pseudo=False, material=None, dangles='some', sodium=1.0,
magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strands, materi... | Compute the pair probabilities for an ordered complex of strands.
Runs the \'pairs\' command.
:param strands: List of strands to use as inputs to pairs -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g... |
def mfe(self, strand, degenerate=False, temp=37.0, pseudo=False,
material=None, dangles='some', sodium=1.0, magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args ... | Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
command.
:param strand: Strand on which to run mfe. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param degenerate: Setting to True will result in returning... |
def mfe_multi(self, strands, permutation=None, degenerate=False, temp=37.0,
pseudo=False, material=None, dangles='some', sodium=1.0,
magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strands, material... | Compute the MFE for an ordered complex of strands. Runs the \'mfe\'
command.
:param strands: Strands on which to run mfe. Strands must be either
coral.DNA or coral.RNA).
:type strands: list
:param permutation: The circular permutation of strands to test in
... |
def subopt(self, strand, gap, temp=37.0, pseudo=False, material=None,
dangles='some', sodium=1.0, magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args = self.... | Compute the suboptimal structures within a defined energy gap of the
MFE. Runs the \'subopt\' command.
:param strand: Strand on which to run subopt. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param gap: Energy gap within ... |
def count(self, strand, pseudo=False):
# Set up command flags
if pseudo:
cmd_args = ['-pseudo']
else:
cmd_args = []
# Set up the input file and run the command
stdout = self._run('count', cmd_args, [str(strand)]).split('\n')
# Return the c... | Enumerates the total number of secondary structures over the
structural ensemble Ω(π). Runs the \'count\' command.
:param strand: Strand on which to run count. Strands must be either
coral.DNA or coral.RNA).
:type strand: list
:param pseudo: Enable pseudoknots.
... |
def count_multi(self, strands, permutation=None, pseudo=False):
# Set up command flags
cmd_args = ['-multi']
if pseudo:
cmd_args.append('-pseudo')
# Set up the input file and run the command
if permutation is None:
permutation = range(1, len(stran... | Enumerates the total number of secondary structures over the
structural ensemble Ω(π) with an ordered permutation of strands. Runs
the \'count\' command.
:param strands: List of strands to use as inputs to count -multi.
:type strands: list
:param permutation: The circular permut... |
def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None,
dangles='some', sodium=1.0, magnesium=0.0):
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args =... | Calculate the free energy of a given sequence structure. Runs the
\'energy\' command.
:param strand: Strand on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param dotparens: The structure in dotparens no... |
def complexes(self, strands, max_size, ordered=False, pairs=False,
mfe=False, cutoff=0.001, degenerate=False, temp=37.0,
pseudo=False, material=None, dangles='some', sodium=1.0,
magnesium=0.0):
# TODO: Consider returning a pandas dataframe in this (... | :param strands: Strands on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strands: list of coral.DNA or coral.RNA
:param max_size: Maximum complex size to consider (maximum number of
strand species in complex).
:type max... |
def complexes_timeonly(self, strands, max_size):
cmd_args = ['-quiet', '-timeonly']
lines = self._multi_lines(strands, [max_size])
stdout = self._run('complexes', cmd_args, lines)
return float(re.search('calculation\: (.*) seconds', stdout).group(1)) | Estimate the amount of time it will take to calculate all the
partition functions for each circular permutation - estimate the time
the actual \'complexes\' command will take to run.
:param strands: Strands on which to run energy. Strands must be either
coral.DNA or coral... |
def concentrations(self, complexes, concs, ordered=False, pairs=False,
cutoff=0.001, temp=37.0):
# Check inputs
nstrands = len(complexes[0]['strands'])
try:
if len(concs) != nstrands:
raise ValueError('concs argument not same length as ... | :param complexes: A list of the type returned by the complexes()
method.
:type complexes: list
:param concs: The concentration(s) of each strand species in the
initial complex. If they are all the same, a single
float can be used here... |
def distributions(self, complexes, counts, volume, maxstates=1e7,
ordered=False, temp=37.0):
# Check inputs
nstrands = len(complexes[0]['strands'])
if len(counts) != nstrands:
raise ValueError('counts argument not same length as strands.')
# Set... | Runs the \'distributions\' NUPACK command. Note: this is intended
for a relatively small number of species (on the order of ~20
total strands for complex size ~14).
:param complexes: A list of the type returned by the complexes()
method.
:type complexes: list
... |
def _multi_lines(self, strands, permutation):
lines = []
# Write the total number of distinct strands
lines.append(str(len(strands)))
# Write the distinct strands
lines += [str(strand) for strand in strands]
# Write the permutation
lines.append(' '.join(s... | Prepares lines to write to file for pfunc command input.
:param strand: Strand input (cr.DNA or cr.RNA).
:type strand: cr.DNA or cr.DNA
:param permutation: Permutation (e.g. [1, 2, 3, 4]) of the type used
by pfunc_multi.
:type permutation: list |
def _read_tempfile(self, filename):
with open(os.path.join(self._tempdir, filename)) as f:
return f.read() | Read in and return file that's in the tempdir.
:param filename: Name of the file to read.
:type filename: str |
def _pairs_to_np(self, pairlist, dim):
mat = np.zeros((dim, dim + 1))
for line in pairlist:
i = int(line[0]) - 1
j = int(line[1]) - 1
prob = float(line[2])
mat[i, j] = prob
return mat | Given a set of pair probability lines, construct a numpy array.
:param pairlist: a list of pair probability triples
:type pairlist: list
:returns: An upper triangular matrix of pair probabilities augmented
with one extra column that represents the unpaired
pr... |
def _flip_feature(self, feature, parent_len):
copy = feature.copy()
# Put on the other strand
if copy.strand == 0:
copy.strand = 1
else:
copy.strand = 0
# Adjust locations - guarantee that start is always less than end
copy.start = parent_len - copy.start
copy.stop = par... | Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.