_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q257700 | ContentExtractor.get_siblings_content | validation | def get_siblings_content(self, current_sibling, baselinescore_siblings_para):
"""
adds any siblings that may have a decent score to this node
"""
if current_sibling.tag == 'p' and self.parser.getText(current_sibling):
tmp = current_sibling
if tmp.tail:
tmp = deepcopy(tmp)
tmp.tail = ''
return [tmp]
else:
potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p')
if potential_paragraphs is None:
return None
paragraphs = list()
for first_paragraph in potential_paragraphs:
text = self.parser.getText(first_paragraph)
if text: # no len(text) > 0
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text)
paragraph_score = word_stats.get_stopword_count()
sibling_baseline_score = float(.30)
high_link_density = self.is_highlink_density(first_paragraph)
score = float(baselinescore_siblings_para * sibling_baseline_score)
if score < paragraph_score and not high_link_density:
para = self.parser.createElement(tag='p', text=text, tail=None)
paragraphs.append(para)
return paragraphs | python | {
"resource": ""
} |
q257701 | ContentExtractor.is_highlink_density | validation | def is_highlink_density(self, element):
"""
checks the density of links within a node,
is there not much text and most of it contains linky shit?
if so it's no good
"""
links = self.parser.getElementsByTag(element, tag='a')
if not links:
return False
text = self.parser.getText(element)
words = text.split(' ')
words_number = float(len(words))
link_text_parts = []
for link in links:
link_text_parts.append(self.parser.getText(link))
link_text = ''.join(link_text_parts)
link_words = link_text.split(' ')
number_of_link_words = float(len(link_words))
number_of_links = float(len(links))
link_divisor = float(number_of_link_words / words_number)
score = float(link_divisor * number_of_links)
if score >= 1.0:
return True
return False | python | {
"resource": ""
} |
q257702 | ContentExtractor.nodes_to_check | validation | def nodes_to_check(self, docs):
"""\
returns a list of nodes we want to search
on like paragraphs and tables
"""
nodes_to_check = []
for doc in docs:
for tag in ['p', 'pre', 'td']:
items = self.parser.getElementsByTag(doc, tag=tag)
nodes_to_check += items
return nodes_to_check | python | {
"resource": ""
} |
q257703 | ContentExtractor.post_cleanup | validation | def post_cleanup(self):
"""\
remove any divs that looks like non-content,
clusters of links, or paras with no gusto
"""
parse_tags = ['p']
if self.config.parse_lists:
parse_tags.extend(['ul', 'ol'])
if self.config.parse_headers:
parse_tags.extend(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
target_node = self.article.top_node
node = self.add_siblings(target_node)
for elm in self.parser.getChildren(node):
e_tag = self.parser.getTag(elm)
if e_tag not in parse_tags:
if (self.is_highlink_density(elm) or self.is_table_and_no_para_exist(elm) or
not self.is_nodescore_threshold_met(node, elm)):
self.parser.remove(elm)
return node | python | {
"resource": ""
} |
q257704 | TitleExtractor.get_title | validation | def get_title(self):
"""\
Fetch the article title and analyze it
"""
title = ''
# rely on opengraph in case we have the data
if "title" in list(self.article.opengraph.keys()):
return self.clean_title(self.article.opengraph['title'])
elif self.article.schema and "headline" in self.article.schema:
return self.clean_title(self.article.schema['headline'])
# try to fetch the meta headline
meta_headline = self.parser.getElementsByTag(self.article.doc,
tag="meta",
attr="name",
value="headline")
if meta_headline is not None and len(meta_headline) > 0:
title = self.parser.getAttribute(meta_headline[0], 'content')
return self.clean_title(title)
# otherwise use the title meta
title_element = self.parser.getElementsByTag(self.article.doc, tag='title')
if title_element is not None and len(title_element) > 0:
title = self.parser.getText(title_element[0])
return self.clean_title(title)
return title | python | {
"resource": ""
} |
q257705 | MetasExtractor.get_canonical_link | validation | def get_canonical_link(self):
"""
if the article has meta canonical link set in the url
"""
if self.article.final_url:
kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}
meta = self.parser.getElementsByTag(self.article.doc, **kwargs)
if meta is not None and len(meta) > 0:
href = self.parser.getAttribute(meta[0], 'href')
if href:
href = href.strip()
o = urlparse(href)
if not o.hostname:
tmp = urlparse(self.article.final_url)
domain = '%s://%s' % (tmp.scheme, tmp.hostname)
href = urljoin(domain, href)
return href
return self.article.final_url | python | {
"resource": ""
} |
q257706 | Goose.close | validation | def close(self):
''' Close the network connection and perform any other required cleanup
Note:
Auto closed when using goose as a context manager or when garbage collected '''
if self.fetcher is not None:
self.shutdown_network()
self.finalizer.atexit = False | python | {
"resource": ""
} |
q257707 | Goose.extract | validation | def extract(self, url=None, raw_html=None):
''' Extract the most likely article content from the html page
Args:
url (str): URL to pull and parse
raw_html (str): String representation of the HTML page
Returns:
Article: Representation of the article contents \
including other parsed and extracted metadata '''
crawl_candidate = CrawlCandidate(self.config, url, raw_html)
return self.__crawl(crawl_candidate) | python | {
"resource": ""
} |
q257708 | Goose.__crawl | validation | def __crawl(self, crawl_candidate):
''' wrap the crawling functionality '''
def crawler_wrapper(parser, parsers_lst, crawl_candidate):
try:
crawler = Crawler(self.config, self.fetcher)
article = crawler.crawl(crawl_candidate)
except (UnicodeDecodeError, ValueError) as ex:
if parsers_lst:
parser = parsers_lst.pop(0) # remove it also!
return crawler_wrapper(parser, parsers_lst, crawl_candidate)
else:
raise ex
return article
# use the wrapper
parsers = list(self.config.available_parsers)
parsers.remove(self.config.parser_class)
return crawler_wrapper(self.config.parser_class, parsers, crawl_candidate) | python | {
"resource": ""
} |
q257709 | smart_unicode | validation | def smart_unicode(string, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# if isinstance(s, Promise):
# # The input is the result of a gettext_lazy() call.
# return s
return force_unicode(string, encoding, strings_only, errors) | python | {
"resource": ""
} |
q257710 | force_unicode | validation | def force_unicode(string, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(string, str):
return string
if strings_only and is_protected_type(string):
return string
try:
if not isinstance(string, str):
if hasattr(string, '__unicode__'):
string = string.__unicode__()
else:
try:
string = str(string, encoding, errors)
except UnicodeEncodeError:
if not isinstance(string, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
string = ' '.join([force_unicode(arg, encoding,
strings_only,
errors) for arg in string])
elif not isinstance(string, str):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
string = string.decode(encoding, errors)
except UnicodeDecodeError as ex:
if not isinstance(string, Exception):
raise DjangoUnicodeDecodeError(string, *ex.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
string = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in string])
return string | python | {
"resource": ""
} |
q257711 | smart_str | validation | def smart_str(string, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(string, (type(None), int)):
return string
# if isinstance(s, Promise):
# return unicode(s).encode(encoding, errors)
if isinstance(string, str):
try:
return string.encode(encoding, errors)
except UnicodeEncodeError:
return string.encode('utf-8', errors)
elif not isinstance(string, bytes):
try:
return str(string).encode(encoding, errors)
except UnicodeEncodeError:
if isinstance(string, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in string])
return str(string).encode(encoding, errors)
else:
return string | python | {
"resource": ""
} |
q257712 | QuillAdmin.get_urls | validation | def get_urls(self):
"""Add URLs needed to handle image uploads."""
urls = patterns(
'',
url(r'^upload/$', self.admin_site.admin_view(self.handle_upload), name='quill-file-upload'),
)
return urls + super(QuillAdmin, self).get_urls() | python | {
"resource": ""
} |
q257713 | QuillAdmin.handle_upload | validation | def handle_upload(self, request):
"""Handle file uploads from WYSIWYG."""
if request.method != 'POST':
raise Http404
if request.is_ajax():
try:
filename = request.GET['quillUploadFile']
data = request
is_raw = True
except KeyError:
return HttpResponseBadRequest("Invalid file upload.")
else:
if len(request.FILES) != 1:
return HttpResponseBadRequest("Can only upload 1 file at a time.")
try:
data = request.FILES['quillUploadFile']
filename = data.name
is_raw = False
except KeyError:
return HttpResponseBadRequest('Missing image `quillUploadFile`.')
url = save_file(data, filename, is_raw, default_storage)
response_data = {}
response_data['url'] = url
# Response content type needs to be text/html here or else
# IE will try to download the file.
return HttpResponse(json.dumps(response_data), content_type="text/html; charset=utf-8") | python | {
"resource": ""
} |
q257714 | QuillEditorWidget.render | validation | def render(self, name, value, attrs={}):
"""Render the Quill WYSIWYG."""
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
quill_app = apps.get_app_config('quill')
quill_config = getattr(quill_app, self.config)
return mark_safe(render_to_string(quill_config['template'], {
'final_attrs': flatatt(final_attrs),
'value': value,
'id': final_attrs['id'],
'config': self.config,
})) | python | {
"resource": ""
} |
q257715 | RichTextField.formfield | validation | def formfield(self, **kwargs):
"""Get the form for field."""
defaults = {
'form_class': RichTextFormField,
'config': self.config,
}
defaults.update(kwargs)
return super(RichTextField, self).formfield(**defaults) | python | {
"resource": ""
} |
q257716 | render_toolbar | validation | def render_toolbar(context, config):
"""Render the toolbar for the given config."""
quill_config = getattr(quill_app, config)
t = template.loader.get_template(quill_config['toolbar_template'])
return t.render(context) | python | {
"resource": ""
} |
q257717 | get_meta_image_url | validation | def get_meta_image_url(request, image):
"""
Resize an image for metadata tags, and return an absolute URL to it.
"""
rendition = image.get_rendition(filter='original')
return request.build_absolute_uri(rendition.url) | python | {
"resource": ""
} |
q257718 | check_mdrun_success | validation | def check_mdrun_success(logfile):
"""Check if ``mdrun`` finished successfully.
Analyses the output from ``mdrun`` in *logfile*. Right now we are
simply looking for the line "Finished mdrun on node" in the last 1kb of
the file. (The file must be seeakable.)
:Arguments:
*logfile* : filename
Logfile produced by ``mdrun``.
:Returns: ``True`` if all ok, ``False`` if not finished, and
``None`` if the *logfile* cannot be opened
"""
if not os.path.exists(logfile):
return None
with open(logfile, 'rb') as log:
log.seek(-1024, 2)
for line in log:
line = line.decode('ASCII')
if line.startswith("Finished mdrun on"):
return True
return False | python | {
"resource": ""
} |
q257719 | get_double_or_single_prec_mdrun | validation | def get_double_or_single_prec_mdrun():
"""Return double precision ``mdrun`` or fall back to single precision.
This convenience function tries :func:`gromacs.mdrun_d` first and
if it cannot run it, falls back to :func:`gromacs.mdrun` (without
further checking).
.. versionadded:: 0.5.1
"""
try:
gromacs.mdrun_d(h=True, stdout=False, stderr=False)
logger.debug("using double precision gromacs.mdrun_d")
return gromacs.mdrun_d
except (AttributeError, GromacsError, OSError):
# fall back to mdrun if no double precision binary
wmsg = "No 'mdrun_d' binary found so trying 'mdrun' instead.\n"\
"(Note that energy minimization runs better with mdrun_d.)"
logger.warn(wmsg)
warnings.warn(wmsg, category=AutoCorrectionWarning)
return gromacs.mdrun | python | {
"resource": ""
} |
q257720 | MDrunner.commandline | validation | def commandline(self, **mpiargs):
"""Returns simple command line to invoke mdrun.
If :attr:`mpiexec` is set then :meth:`mpicommand` provides the mpi
launcher command that prefixes the actual ``mdrun`` invocation:
:attr:`mpiexec` [*mpiargs*] :attr:`mdrun` [*mdrun-args*]
The *mdrun-args* are set on initializing the class. Override
:meth:`mpicommand` to fit your system if the simple default
OpenMP launcher is not appropriate.
"""
cmd = self.MDRUN.commandline()
if self.mpiexec:
cmd = self.mpicommand(**mpiargs) + cmd
return cmd | python | {
"resource": ""
} |
q257721 | MDrunner.mpicommand | validation | def mpicommand(self, *args, **kwargs):
"""Return a list of the mpi command portion of the commandline.
Only allows primitive mpi at the moment:
*mpiexec* -n *ncores* *mdrun* *mdrun-args*
(This is a primitive example for OpenMP. Override it for more
complicated cases.)
"""
if self.mpiexec is None:
raise NotImplementedError("Override mpiexec to enable the simple OpenMP launcher")
# example implementation
ncores = kwargs.pop('ncores', 8)
return [self.mpiexec, '-n', str(ncores)] | python | {
"resource": ""
} |
q257722 | MDrunnerMpich2Smpd.prehook | validation | def prehook(self, **kwargs):
"""Launch local smpd."""
cmd = ['smpd', '-s']
logger.info("Starting smpd: "+" ".join(cmd))
rc = subprocess.call(cmd)
return rc | python | {
"resource": ""
} |
q257723 | glob_parts | validation | def glob_parts(prefix, ext):
"""Find files from a continuation run"""
if ext.startswith('.'):
ext = ext[1:]
files = glob.glob(prefix+'.'+ext) + glob.glob(prefix+'.part[0-9][0-9][0-9][0-9].'+ext)
files.sort() # at least some rough sorting...
return files | python | {
"resource": ""
} |
q257724 | grompp_qtot | validation | def grompp_qtot(*args, **kwargs):
"""Run ``gromacs.grompp`` and return the total charge of the system.
:Arguments:
The arguments are the ones one would pass to :func:`gromacs.grompp`.
:Returns:
The total charge as reported
Some things to keep in mind:
* The stdout output of grompp is only shown when an error occurs. For
debugging, look at the log file or screen output and try running the
normal :func:`gromacs.grompp` command and analyze the output if the
debugging messages are not sufficient.
* Check that ``qtot`` is correct. Because the function is based on pattern
matching of the informative output of :program:`grompp` it can break when
the output format changes. This version recognizes lines like ::
' System has non-zero total charge: -4.000001e+00'
using the regular expression
:regexp:`System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)`.
"""
qtot_pattern = re.compile('System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)')
# make sure to capture ALL output
kwargs['stdout'] = False
kwargs['stderr'] = False
rc, output, error = grompp_warnonly(*args, **kwargs)
gmxoutput = "\n".join([x for x in [output, error] if x is not None])
if rc != 0:
# error occured and we want to see the whole output for debugging
msg = "grompp_qtot() failed. See warning and screen output for clues."
logger.error(msg)
import sys
sys.stderr.write("=========== grompp (stdout/stderr) ============\n")
sys.stderr.write(gmxoutput)
sys.stderr.write("===============================================\n")
sys.stderr.flush()
raise GromacsError(rc, msg)
qtot = 0
for line in gmxoutput.split('\n'):
m = qtot_pattern.search(line)
if m:
qtot = float(m.group('qtot'))
break
logger.info("system total charge qtot = {qtot!r}".format(**vars()))
return qtot | python | {
"resource": ""
} |
q257725 | _mdp_include_string | validation | def _mdp_include_string(dirs):
"""Generate a string that can be added to a mdp 'include = ' line."""
include_paths = [os.path.expanduser(p) for p in dirs]
return ' -I'.join([''] + include_paths) | python | {
"resource": ""
} |
q257726 | create_portable_topology | validation | def create_portable_topology(topol, struct, **kwargs):
"""Create a processed topology.
The processed (or portable) topology file does not contain any
``#include`` statements and hence can be easily copied around. It
also makes it possible to re-grompp without having any special itp
files available.
:Arguments:
*topol*
topology file
*struct*
coordinat (structure) file
:Keywords:
*processed*
name of the new topology file; if not set then it is named like
*topol* but with ``pp_`` prepended
*includes*
path or list of paths of directories in which itp files are
searched for
*grompp_kwargs**
other options for :program:`grompp` such as ``maxwarn=2`` can
also be supplied
:Returns: full path to the processed topology
"""
_topoldir, _topol = os.path.split(topol)
processed = kwargs.pop('processed', os.path.join(_topoldir, 'pp_'+_topol))
grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs)
mdp_kwargs = add_mdp_includes(topol, mdp_kwargs)
with tempfile.NamedTemporaryFile(suffix='.mdp') as mdp:
mdp.write('; empty mdp file\ninclude = {include!s}\n'.format(**mdp_kwargs))
mdp.flush()
grompp_kwargs['p'] = topol
grompp_kwargs['pp'] = processed
grompp_kwargs['f'] = mdp.name
grompp_kwargs['c'] = struct
grompp_kwargs['v'] = False
try:
gromacs.grompp(**grompp_kwargs)
finally:
utilities.unlink_gmx('topol.tpr', 'mdout.mdp')
return utilities.realpath(processed) | python | {
"resource": ""
} |
q257727 | edit_txt | validation | def edit_txt(filename, substitutions, newname=None):
"""Primitive text file stream editor.
This function can be used to edit free-form text files such as the
topology file. By default it does an **in-place edit** of
*filename*. If *newname* is supplied then the edited
file is written to *newname*.
:Arguments:
*filename*
input text file
*substitutions*
substitution commands (see below for format)
*newname*
output filename; if ``None`` then *filename* is changed in
place [``None``]
*substitutions* is a list of triplets; the first two elements are regular
expression strings, the last is the substitution value. It mimics
``sed`` search and replace. The rules for *substitutions*:
.. productionlist::
substitutions: "[" search_replace_tuple, ... "]"
search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")"
line_match_RE: regular expression that selects the line (uses match)
search_RE: regular expression that is searched in the line
replacement: replacement string for search_RE
Running :func:`edit_txt` does pretty much what a simple ::
sed /line_match_RE/s/search_RE/replacement/
with repeated substitution commands does.
Special replacement values:
- ``None``: the rule is ignored
- ``False``: the line is deleted (even if other rules match)
.. note::
* No sanity checks are performed and the substitutions must be supplied
exactly as shown.
* All substitutions are applied to a line; thus the order of the substitution
commands may matter when one substitution generates a match for a subsequent rule.
* If replacement is set to ``None`` then the whole expression is ignored and
whatever is in the template is used. To unset values you must provided an
empty string or similar.
* Delete a matching line if replacement=``False``.
"""
if newname is None:
newname = filename
# No sanity checks (figure out later how to give decent diagnostics).
# Filter out any rules that have None in replacement.
_substitutions = [{'lRE': re.compile(str(lRE)),
'sRE': re.compile(str(sRE)),
'repl': repl}
for lRE,sRE,repl in substitutions if repl is not None]
with tempfile.TemporaryFile() as target:
with open(filename, 'rb') as src:
logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions)))
for line in src:
line = line.decode("utf-8")
keep_line = True
for subst in _substitutions:
m = subst['lRE'].match(line)
if m: # apply substition to this line?
logger.debug('match: '+line.rstrip())
if subst['repl'] is False: # special rule: delete line
keep_line = False
else: # standard replacement
line = subst['sRE'].sub(str(subst['repl']), line)
logger.debug('replaced: '+line.rstrip())
if keep_line:
target.write(line.encode('utf-8'))
else:
logger.debug("Deleting line %r", line)
target.seek(0)
with open(newname, 'wb') as final:
shutil.copyfileobj(target, final)
logger.info("edited txt = {newname!r}".format(**vars())) | python | {
"resource": ""
} |
q257728 | make_ndx_captured | validation | def make_ndx_captured(**kwargs):
"""make_ndx that captures all output
Standard :func:`~gromacs.make_ndx` command with the input and
output pre-set in such a way that it can be conveniently used for
:func:`parse_ndxlist`.
Example::
ndx_groups = parse_ndxlist(make_ndx_captured(n=ndx)[0])
Note that the convenient :func:`get_ndx_groups` function does exactly
that and can probably used in most cases.
:Arguments:
keywords are passed on to :func:`~gromacs.make_ndx`
:Returns:
(*returncode*, *output*, ``None``)
"""
kwargs['stdout']=False # required for proper output as described in doc
user_input = kwargs.pop('input',[])
user_input = [cmd for cmd in user_input if cmd != 'q'] # filter any quit
kwargs['input'] = user_input + ['', 'q'] # necessary commands
return gromacs.make_ndx(**kwargs) | python | {
"resource": ""
} |
q257729 | parse_groups | validation | def parse_groups(output):
"""Parse ``make_ndx`` output and return groups as a list of dicts."""
groups = []
for line in output.split('\n'):
m = NDXGROUP.match(line)
if m:
d = m.groupdict()
groups.append({'name': d['GROUPNAME'],
'nr': int(d['GROUPNUMBER']),
'natoms': int(d['NATOMS'])})
return groups | python | {
"resource": ""
} |
q257730 | Frames.delete_frames | validation | def delete_frames(self):
"""Delete all frames."""
for frame in glob.glob(self.frameglob):
os.unlink(frame) | python | {
"resource": ""
} |
q257731 | IndexBuilder.gmx_resid | validation | def gmx_resid(self, resid):
"""Returns resid in the Gromacs index by transforming with offset."""
try:
gmx_resid = int(self.offset[resid])
except (TypeError, IndexError):
gmx_resid = resid + self.offset
except KeyError:
raise KeyError("offset must be a dict that contains the gmx resid for {0:d}".format(resid))
return gmx_resid | python | {
"resource": ""
} |
q257732 | IndexBuilder.combine | validation | def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=False):
"""Combine individual groups into a single one and write output.
:Keywords:
name_all : string
Name of the combined group, ``None`` generates a name. [``None``]
out_ndx : filename
Name of the output file that will contain the individual groups
and the combined group. If ``None`` then default from the class
constructor is used. [``None``]
operation : character
Logical operation that is used to generate the combined group from
the individual groups: "|" (OR) or "&" (AND); if set to ``False``
then no combined group is created and only the individual groups
are written. ["|"]
defaultgroups : bool
``True``: append everything to the default groups produced by
:program:`make_ndx` (or rather, the groups provided in the ndx file on
initialization --- if this was ``None`` then these are truly default groups);
``False``: only use the generated groups
:Returns:
``(combinedgroup_name, output_ndx)``, a tuple showing the
actual group name and the name of the file; useful when all names are autogenerated.
.. Warning:: The order of the atom numbers in the combined group is
*not* guaranteed to be the same as the selections on input because
``make_ndx`` sorts them ascending. Thus you should be careful when
using these index files for calculations of angles and dihedrals.
Use :class:`gromacs.formats.NDX` in these cases.
.. SeeAlso:: :meth:`IndexBuilder.write`.
"""
if not operation in ('|', '&', False):
raise ValueError("Illegal operation {0!r}, only '|' (OR) and '&' (AND) or False allowed.".format(
operation))
if name_all is None and operation:
name_all = self.name_all or operation.join(self.indexfiles)
if out_ndx is None:
out_ndx = self.output
if defaultgroups:
# make a default file (using the original ndx where provided!!)
fd, default_ndx = tempfile.mkstemp(suffix='.ndx', prefix='default__')
try:
self.make_ndx(o=default_ndx, input=['q'])
except:
utilities.unlink_gmx(default_ndx)
raise
ndxfiles = [default_ndx]
else:
ndxfiles = []
ndxfiles.extend(self.indexfiles.values())
if operation:
# combine multiple selections and name them
try:
fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='combined__')
# combine all selections by loading ALL temporary index files
operation = ' '+operation.strip()+' '
cmd = [operation.join(['"{0!s}"'.format(gname) for gname in self.indexfiles]),
'', 'q']
rc,out,err = self.make_ndx(n=ndxfiles, o=tmp_ndx, input=cmd)
if self._is_empty_group(out):
warnings.warn("No atoms found for {cmd!r}".format(**vars()),
category=BadParameterWarning)
# second pass for naming, sigh (or: use NDX ?)
groups = parse_ndxlist(out)
last = groups[-1]
# name this group
name_cmd = ["name {0:d} {1!s}".format(last['nr'], name_all), 'q']
rc,out,err = self.make_ndx(n=tmp_ndx, o=out_ndx, input=name_cmd)
# For debugging, look at out and err or set stdout=True, stderr=True
# TODO: check out if at least 1 atom selected
##print "DEBUG: combine()"
##print out
finally:
utilities.unlink_gmx(tmp_ndx)
if defaultgroups:
utilities.unlink_gmx(default_ndx)
else:
# just write individual groups in one file (name_all --> None)
rc,out,err = self.make_ndx(n=ndxfiles, o=out_ndx, input=['','q'])
return name_all, out_ndx | python | {
"resource": ""
} |
q257733 | IndexBuilder.cat | validation | def cat(self, out_ndx=None):
"""Concatenate input index files.
Generate a new index file that contains the default Gromacs index
groups (if a structure file was defined) and all index groups from the
input index files.
:Arguments:
out_ndx : filename
Name of the output index file; if ``None`` then use the default
provided to the constructore. [``None``].
"""
if out_ndx is None:
out_ndx = self.output
self.make_ndx(o=out_ndx, input=['q'])
return out_ndx | python | {
"resource": ""
} |
q257734 | IndexBuilder._process_command | validation | def _process_command(self, command, name=None):
"""Process ``make_ndx`` command and return name and temp index file."""
self._command_counter += 1
if name is None:
name = "CMD{0:03d}".format(self._command_counter)
# Need to build it with two make_ndx calls because I cannot reliably
# name the new group without knowing its number.
try:
fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='tmp_'+name+'__')
cmd = [command, '', 'q'] # empty command '' necessary to get list
# This sometimes fails with 'OSError: Broken Pipe' --- hard to debug
rc,out,err = self.make_ndx(o=tmp_ndx, input=cmd)
self.check_output(out, "No atoms found for selection {command!r}.".format(**vars()), err=err)
# For debugging, look at out and err or set stdout=True, stderr=True
# TODO: check ' 0 r_300_&_ALA_&_O : 1 atoms' has at least 1 atom
##print "DEBUG: _process_command()"
##print out
groups = parse_ndxlist(out)
last = groups[-1]
# reduce and name this group
fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__')
name_cmd = ["keep {0:d}".format(last['nr']),
"name 0 {0!s}".format(name), 'q']
rc,out,err = self.make_ndx(n=tmp_ndx, o=ndx, input=name_cmd)
finally:
utilities.unlink_gmx(tmp_ndx)
return name, ndx | python | {
"resource": ""
} |
q257735 | IndexBuilder._process_range | validation | def _process_range(self, selection, name=None):
"""Process a range selection.
("S234", "A300", "CA") --> selected all CA in this range
("S234", "A300") --> selected all atoms in this range
.. Note:: Ignores residue type, only cares about the resid (but still required)
"""
try:
first, last, gmx_atomname = selection
except ValueError:
try:
first, last = selection
gmx_atomname = '*'
except:
logger.error("%r is not a valid range selection", selection)
raise
if name is None:
name = "{first!s}-{last!s}_{gmx_atomname!s}".format(**vars())
_first = self._translate_residue(first, default_atomname=gmx_atomname)
_last = self._translate_residue(last, default_atomname=gmx_atomname)
_selection = 'r {0:d} - {1:d} & & a {2!s}'.format(_first['resid'], _last['resid'], gmx_atomname)
cmd = ['keep 0', 'del 0',
_selection,
'name 0 {name!s}'.format(**vars()),
'q']
fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__')
rc,out,err = self.make_ndx(n=self.ndx, o=ndx, input=cmd)
self.check_output(out, "No atoms found for "
"%(selection)r --> %(_selection)r" % vars())
# For debugging, look at out and err or set stdout=True, stderr=True
##print "DEBUG: _process_residue()"
##print out
return name, ndx | python | {
"resource": ""
} |
q257736 | IndexBuilder._translate_residue | validation | def _translate_residue(self, selection, default_atomname='CA'):
"""Translate selection for a single res to make_ndx syntax."""
m = self.RESIDUE.match(selection)
if not m:
errmsg = "Selection {selection!r} is not valid.".format(**vars())
logger.error(errmsg)
raise ValueError(errmsg)
gmx_resid = self.gmx_resid(int(m.group('resid'))) # magic offset correction
residue = m.group('aa')
if len(residue) == 1:
gmx_resname = utilities.convert_aa_code(residue) # only works for AA
else:
gmx_resname = residue # use 3-letter for any resname
gmx_atomname = m.group('atom')
if gmx_atomname is None:
gmx_atomname = default_atomname
return {'resname':gmx_resname, 'resid':gmx_resid, 'atomname':gmx_atomname} | python | {
"resource": ""
} |
q257737 | IndexBuilder.check_output | validation | def check_output(self, make_ndx_output, message=None, err=None):
"""Simple tests to flag problems with a ``make_ndx`` run."""
if message is None:
message = ""
else:
message = '\n' + message
def format(output, w=60):
hrule = "====[ GromacsError (diagnostic output) ]".ljust(w,"=")
return hrule + '\n' + str(output) + hrule
rc = True
if self._is_empty_group(make_ndx_output):
warnings.warn("Selection produced empty group.{message!s}".format(**vars()), category=GromacsValueWarning)
rc = False
if self._has_syntax_error(make_ndx_output):
rc = False
out_formatted = format(make_ndx_output)
raise GromacsError("make_ndx encountered a Syntax Error, "
"%(message)s\noutput:\n%(out_formatted)s" % vars())
if make_ndx_output.strip() == "":
rc = False
out_formatted = format(err)
raise GromacsError("make_ndx produced no output, "
"%(message)s\nerror output:\n%(out_formatted)s" % vars())
return rc | python | {
"resource": ""
} |
q257738 | Transformer.outfile | validation | def outfile(self, p):
"""Path for an output file.
If :attr:`outdir` is set then the path is
``outdir/basename(p)`` else just ``p``
"""
if self.outdir is not None:
return os.path.join(self.outdir, os.path.basename(p))
else:
return p | python | {
"resource": ""
} |
q257739 | Transformer.center_fit | validation | def center_fit(self, **kwargs):
"""Write compact xtc that is fitted to the tpr reference structure.
See :func:`gromacs.cbook.trj_fitandcenter` for details and
description of *kwargs* (including *input*, *input1*, *n* and
*n1* for how to supply custom index groups). The most important ones are listed
here but in most cases the defaults should work.
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory.
*xy* : Boolean
If ``True`` then only fit in xy-plane (useful for a membrane normal
to z). The default is ``False``.
*force*
- ``True``: overwrite existing trajectories
- ``False``: throw a IOError exception
- ``None``: skip existing and log a warning [default]
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
"""
kwargs.setdefault('s', self.tpr)
kwargs.setdefault('n', self.ndx)
kwargs['f'] = self.xtc
kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, '_centfit', 'xtc')))
force = kwargs.pop('force', self.force)
logger.info("Centering and fitting trajectory {f!r}...".format(**kwargs))
with utilities.in_dir(self.dirname):
if not self.check_file_exists(kwargs['o'], resolve="indicate", force=force):
trj_fitandcenter(**kwargs)
logger.info("Centered and fit trajectory: {o!r}.".format(**kwargs))
return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])} | python | {
"resource": ""
} |
q257740 | Transformer.fit | validation | def fit(self, xy=False, **kwargs):
"""Write xtc that is fitted to the tpr reference structure.
Runs :class:`gromacs.tools.trjconv` with appropriate arguments
for fitting. The most important *kwargs* are listed
here but in most cases the defaults should work.
Note that the default settings do *not* include centering or
periodic boundary treatment as this often does not work well
with fitting. It is better to do this as a separate step (see
:meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`)
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory. A default name is created.
If e.g. *dt* = 100 is one of the *kwargs* then the default name includes
"_dt100ps".
*xy* : boolean
If ``True`` then only do a rot+trans fit in the xy plane
(good for membrane simulations); default is ``False``.
*force*
``True``: overwrite existing trajectories
``False``: throw a IOError exception
``None``: skip existing and log a warning [default]
*fitgroup*
index group to fit on ["backbone"]
.. Note:: If keyword *input* is supplied then it will override
*fitgroup*; *input* = ``[fitgroup, outgroup]``
*kwargs*
kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted`
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
"""
kwargs.setdefault('s', self.tpr)
kwargs.setdefault('n', self.ndx)
kwargs['f'] = self.xtc
force = kwargs.pop('force', self.force)
if xy:
fitmode = 'rotxy+transxy'
kwargs.pop('fit', None)
infix_default = '_fitxy'
else:
fitmode = kwargs.pop('fit', 'rot+trans') # user can use 'progressive', too
infix_default = '_fit'
dt = kwargs.get('dt')
if dt:
infix_default += '_dt{0:d}ps'.format(int(dt)) # dt in ps
kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, infix_default, 'xtc')))
fitgroup = kwargs.pop('fitgroup', 'backbone')
kwargs.setdefault('input', [fitgroup, "system"])
if kwargs.get('center', False):
logger.warn("Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", kwargs)
if len(kwargs['inputs']) != 3:
logger.error("If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)")
raise ValuError("Insufficient index groups for centering,fitting,output")
logger.info("Fitting trajectory %r to with xy=%r...", kwargs['f'], xy)
logger.info("Fitting on index group %(fitgroup)r", vars())
with utilities.in_dir(self.dirname):
if self.check_file_exists(kwargs['o'], resolve="indicate", force=force):
logger.warn("File %r exists; force regenerating it with force=True.", kwargs['o'])
else:
gromacs.trjconv(fit=fitmode, **kwargs)
logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs['o'])
return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])} | python | {
"resource": ""
} |
q257741 | Transformer.strip_fit | validation | def strip_fit(self, **kwargs):
"""Strip water and fit to the remaining system.
First runs :meth:`strip_water` and then :meth:`fit`; see there
for arguments.
- *strip_input* is used for :meth:`strip_water` (but is only useful in
special cases, e.g. when there is no Protein group defined. Then set
*strip_input* = ``['Other']``.
- *input* is passed on to :meth:`fit` and can contain the
``[center_group, fit_group, output_group]``
- *fitgroup* is only passed to :meth:`fit` and just contains
the group to fit to ("backbone" by default)
.. warning:: *fitgroup* can only be a Gromacs default group and not
a custom group (because the indices change after stripping)
- By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`,
together with the *xy* = ``False`` keyword)
.. Note:: The call signature of :meth:`strip_water` is somewhat different from this one.
"""
kwargs.setdefault('fit', 'rot+trans')
kw_fit = {}
for k in ('xy', 'fit', 'fitgroup', 'input'):
if k in kwargs:
kw_fit[k] = kwargs.pop(k)
kwargs['input'] = kwargs.pop('strip_input', ['Protein'])
kwargs['force'] = kw_fit['force'] = kwargs.pop('force', self.force)
paths = self.strip_water(**kwargs) # updates self.nowater
transformer_nowater = self.nowater[paths['xtc']] # make sure to get the one we just produced
return transformer_nowater.fit(**kw_fit) | python | {
"resource": ""
} |
q257742 | create | validation | def create(logger_name, logfile='gromacs.log'):
"""Create a top level logger.
- The file logger logs everything (including DEBUG).
- The console logger only logs INFO and above.
Logging to a file and the console.
See http://docs.python.org/library/logging.html?#logging-to-multiple-destinations
The top level logger of the library is named 'gromacs'. Note that
we are configuring this logger with console output. If the root
logger also does this then we will get two output lines to the
console. We'll live with this because this is a simple
convenience library...
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logfile = logging.FileHandler(logfile)
logfile_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logfile.setFormatter(logfile_formatter)
logger.addHandler(logfile)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
return logger | python | {
"resource": ""
} |
q257743 | get_configuration | validation | def get_configuration(filename=CONFIGNAME):
"""Reads and parses the configuration file.
Default values are loaded and then replaced with the values from
``~/.gromacswrapper.cfg`` if that file exists. The global
configuration instance :data:`gromacswrapper.config.cfg` is updated
as are a number of global variables such as :data:`configdir`,
:data:`qscriptdir`, :data:`templatesdir`, :data:`logfilename`, ...
Normally, the configuration is only loaded when the :mod:`gromacs`
package is imported but a re-reading of the configuration can be forced
anytime by calling :func:`get_configuration`.
:Returns: a dict with all updated global configuration variables
"""
global cfg, configuration # very iffy --- most of the whole config mod should a class
#: :data:`cfg` is the instance of :class:`GMXConfigParser` that makes all
#: global configuration data accessible
cfg = GMXConfigParser(filename=filename) # update module-level cfg
globals().update(cfg.configuration) # update configdir, templatesdir ...
configuration = cfg.configuration # update module-level configuration
return cfg | python | {
"resource": ""
} |
q257744 | setup | validation | def setup(filename=CONFIGNAME):
"""Prepare a default GromacsWrapper global environment.
1) Create the global config file.
2) Create the directories in which the user can store template and config files.
This function can be run repeatedly without harm.
"""
# setup() must be separate and NOT run automatically when config
# is loaded so that easy_install installations work
# (otherwise we get a sandbox violation)
# populate cfg with defaults (or existing data)
get_configuration()
if not os.path.exists(filename):
with open(filename, 'w') as configfile:
cfg.write(configfile) # write the default file so that user can edit
msg = "NOTE: GromacsWrapper created the configuration file \n\t%r\n" \
" for you. Edit the file to customize the package." % filename
print(msg)
# directories
for d in config_directories:
utilities.mkdir_p(d) | python | {
"resource": ""
} |
q257745 | check_setup | validation | def check_setup():
"""Check if templates directories are setup and issue a warning and help.
Set the environment variable :envvar:`GROMACSWRAPPER_SUPPRESS_SETUP_CHECK`
skip the check and make it always return ``True``
:return ``True`` if directories were found and ``False`` otherwise
.. versionchanged:: 0.3.1
Uses :envvar:`GROMACSWRAPPER_SUPPRESS_SETUP_CHECK` to suppress check
(useful for scripts run on a server)
"""
if "GROMACSWRAPPER_SUPPRESS_SETUP_CHECK" in os.environ:
return True
missing = [d for d in config_directories if not os.path.exists(d)]
if len(missing) > 0:
print("NOTE: Some configuration directories are not set up yet: ")
print("\t{0!s}".format('\n\t'.join(missing)))
print("NOTE: You can create the configuration file and directories with:")
print("\t>>> import gromacs")
print("\t>>> gromacs.config.setup()")
return False
return True | python | {
"resource": ""
} |
q257746 | get_tool_names | validation | def get_tool_names():
""" Get tool names from all configured groups.
:return: list of tool names
"""
names = []
for group in cfg.get('Gromacs', 'groups').split():
names.extend(cfg.get('Gromacs', group).split())
return names | python | {
"resource": ""
} |
q257747 | GMXConfigParser.configuration | validation | def configuration(self):
"""Dict of variables that we make available as globals in the module.
Can be used as ::
globals().update(GMXConfigParser.configuration) # update configdir, templatesdir ...
"""
configuration = {
'configfilename': self.filename,
'logfilename': self.getpath('Logging', 'logfilename'),
'loglevel_console': self.getLogLevel('Logging', 'loglevel_console'),
'loglevel_file': self.getLogLevel('Logging', 'loglevel_file'),
'configdir': self.getpath('DEFAULT', 'configdir'),
'qscriptdir': self.getpath('DEFAULT', 'qscriptdir'),
'templatesdir': self.getpath('DEFAULT', 'templatesdir'),
}
configuration['path'] = [os.path.curdir,
configuration['qscriptdir'],
configuration['templatesdir']]
return configuration | python | {
"resource": ""
} |
q257748 | GMXConfigParser.getpath | validation | def getpath(self, section, option):
"""Return option as an expanded path."""
return os.path.expanduser(os.path.expandvars(self.get(section, option))) | python | {
"resource": ""
} |
q257749 | GMXConfigParser.getLogLevel | validation | def getLogLevel(self, section, option):
"""Return the textual representation of logging level 'option' or the number.
Note that option is always interpreted as an UPPERCASE string
and hence integer log levels will not be recognized.
.. SeeAlso: :mod:`logging` and :func:`logging.getLevelName`
"""
return logging.getLevelName(self.get(section, option).upper()) | python | {
"resource": ""
} |
q257750 | Collection._canonicalize | validation | def _canonicalize(self, filename):
"""Use .collection as extension unless provided"""
path, ext = os.path.splitext(filename)
if not ext:
ext = ".collection"
return path + ext | python | {
"resource": ""
} |
q257751 | scale_dihedrals | validation | def scale_dihedrals(mol, dihedrals, scale, banned_lines=None):
"""Scale dihedral angles"""
if banned_lines is None:
banned_lines = []
new_dihedrals = []
for dh in mol.dihedrals:
atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype(), dh.atom4.get_atomtype()
atypes = [a.replace("_", "").replace("=","") for a in atypes]
# special-case: this is a [ dihedral ] override in molecule block, continue and don't match
if dh.gromacs['param'] != []:
for p in dh.gromacs['param']:
p['kch'] *= scale
new_dihedrals.append(dh)
continue
for iswitch in range(32):
if (iswitch%2==0 ):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs['func'])
if (key in dihedrals):
for i, dt in enumerate(dihedrals[key]):
dhA = copy.deepcopy(dh)
param = copy.deepcopy(dt.gromacs['param'])
# Only check the first dihedral in a list
if not dihedrals[key][0].line in banned_lines:
for p in param: p['kchi'] *= scale
dhA.gromacs['param'] = param
#if key == "CT3-C-NH1-CT1-9": print i, dt, key
if i == 0:
dhA.comment = "; banned lines {0} found={1}\n".format(" ".join(
map(str, banned_lines)), 1 if dt.line in banned_lines else 0)
dhA.comment += "; parameters for types {}-{}-{}-{}-9 at LINE({})\n".format(
dhA.atom1.atomtype, dhA.atom2.atomtype, dhA.atom3.atomtype,
dhA.atom4.atomtype, dt.line).replace("_","")
name = "{}-{}-{}-{}-9".format(dhA.atom1.atomtype, dhA.atom2.atomtype,
dhA.atom3.atomtype, dhA.atom4.atomtype).replace("_","")
#if name == "CL-CTL2-CTL2-HAL2-9": print dihedrals[key], key
new_dihedrals.append(dhA)
break
mol.dihedrals = new_dihedrals
#assert(len(mol.dihedrals) == new_dihedrals)
return mol | python | {
"resource": ""
} |
q257752 | scale_impropers | validation | def scale_impropers(mol, impropers, scale, banned_lines=None):
"""Scale improper dihedrals"""
if banned_lines is None:
banned_lines = []
new_impropers = []
for im in mol.impropers:
atypes = (im.atom1.get_atomtype(), im.atom2.get_atomtype(),
im.atom3.get_atomtype(), im.atom4.get_atomtype())
atypes = [a.replace("_", "").replace("=", "") for a in atypes]
# special-case: this is a [ dihedral ] override in molecule block, continue and don't match
if im.gromacs['param'] != []:
for p in im.gromacs['param']:
p['kpsi'] *= scale
new_impropers.append(im)
continue
for iswitch in range(32):
if (iswitch%2==0):
a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3];
else:
a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0];
if((iswitch//2)%2==1): a1="X";
if((iswitch//4)%2==1): a2="X";
if((iswitch//8)%2==1): a3="X";
if((iswitch//16)%2==1): a4="X";
key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func'])
if (key in impropers):
for i, imt in enumerate(impropers[key]):
imA = copy.deepcopy(im)
param = copy.deepcopy(imt.gromacs['param'])
# Only check the first dihedral in a list
if not impropers[key][0].line in banned_lines:
for p in param: p['kpsi'] *= scale
imA.gromacs['param'] = param
if i == 0:
imA.comment = "; banned lines {0} found={1}\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\n".format(
" ".join(map(str, banned_lines)),
1 if imt.line in banned_lines else 0,
imt.atype1, imt.atype2, imt.atype3, imt.atype4, imt.line)
new_impropers.append(imA)
break
#assert(len(mol.impropers) == new_impropers)
mol.impropers = new_impropers
return mol | python | {
"resource": ""
} |
q257753 | besttype | validation | def besttype(x):
"""Convert string x to the most useful type, i.e. int, float or unicode string.
If x is a quoted string (single or double quotes) then the quotes
are stripped and the enclosed string returned.
.. Note::
Strings will be returned as Unicode strings (using :func:`to_unicode`).
.. versionchanged:: 0.7.0
removed `encoding keyword argument
"""
x = to_unicode(x) # make unicode as soon as possible
try:
x = x.strip()
except AttributeError:
pass
m = re.match(r"""['"](?P<value>.*)["']$""", x)
if m is None:
# not a quoted string, try different types
for converter in int, float, to_unicode: # try them in increasing order of lenience
try:
return converter(x)
except ValueError:
pass
else:
# quoted string
x = to_unicode(m.group('value'))
return x | python | {
"resource": ""
} |
q257754 | to_int64 | validation | def to_int64(a):
"""Return view of the recarray with all int32 cast to int64."""
# build new dtype and replace i4 --> i8
def promote_i4(typestr):
if typestr[1:] == 'i4':
typestr = typestr[0]+'i8'
return typestr
dtype = [(name, promote_i4(typestr)) for name,typestr in a.dtype.descr]
return a.astype(dtype) | python | {
"resource": ""
} |
q257755 | irecarray_to_py | validation | def irecarray_to_py(a):
"""Slow conversion of a recarray into a list of records with python types.
Get the field names from :attr:`a.dtype.names`.
:Returns: iterator so that one can handle big input arrays
"""
pytypes = [pyify(typestr) for name,typestr in a.dtype.descr]
def convert_record(r):
return tuple([converter(value) for converter, value in zip(pytypes,r)])
return (convert_record(r) for r in a) | python | {
"resource": ""
} |
q257756 | XPM.col | validation | def col(self, c):
"""Parse colour specification"""
m = self.COLOUR.search(c)
if not m:
self.logger.fatal("Cannot parse colour specification %r.", c)
raise ParseError("XPM reader: Cannot parse colour specification {0!r}.".format(c))
value = m.group('value')
color = m.group('symbol')
self.logger.debug("%s: %s %s\n", c.strip(), color, value)
return color, value | python | {
"resource": ""
} |
q257757 | Command.transform_args | validation | def transform_args(self, *args, **kwargs):
"""Transform arguments and return them as a list suitable for Popen."""
options = []
for option,value in kwargs.items():
if not option.startswith('-'):
# heuristic for turning key=val pairs into options
# (fails for commands such as 'find' -- then just use args)
if len(option) == 1:
option = '-' + option # POSIX style
else:
option = '--' + option # GNU option
if value is True:
options.append(option)
continue
elif value is False:
raise ValueError('A False value is ambiguous for option {0!r}'.format(option))
if option[:2] == '--':
options.append(option + '=' + str(value)) # GNU option
else:
options.extend((option, str(value))) # POSIX style
return options + list(args) | python | {
"resource": ""
} |
q257758 | Command.help | validation | def help(self, long=False):
"""Print help; same as using ``?`` in ``ipython``. long=True also gives call signature."""
print("\ncommand: {0!s}\n\n".format(self.command_name))
print(self.__doc__)
if long:
print("\ncall method: command():\n")
print(self.__call__.__doc__) | python | {
"resource": ""
} |
q257759 | GromacsCommand._combineargs | validation | def _combineargs(self, *args, **kwargs):
"""Add switches as 'options' with value True to the options dict."""
d = {arg: True for arg in args} # switches are kwargs with value True
d.update(kwargs)
return d | python | {
"resource": ""
} |
q257760 | GromacsCommand._build_arg_list | validation | def _build_arg_list(self, **kwargs):
"""Build list of arguments from the dict; keys must be valid gromacs flags."""
arglist = []
for flag, value in kwargs.items():
# XXX: check flag against allowed values
flag = str(flag)
if flag.startswith('_'):
flag = flag[1:] # python-illegal keywords are '_'-quoted
if not flag.startswith('-'):
flag = '-' + flag # now flag is guaranteed to start with '-'
if value is True:
arglist.append(flag) # simple command line flag
elif value is False:
if flag.startswith('-no'):
# negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?)
arglist.append('-' + flag[3:])
else:
arglist.append('-no' + flag[1:]) # gromacs switches booleans by prefixing 'no'
elif value is None:
pass # ignore flag = None
else:
try:
arglist.extend([flag] + value) # option with value list
except TypeError:
arglist.extend([flag, value]) # option with single value
return list(map(str, arglist)) | python | {
"resource": ""
} |
q257761 | GromacsCommand.transform_args | validation | def transform_args(self,*args,**kwargs):
"""Combine arguments and turn them into gromacs tool arguments."""
newargs = self._combineargs(*args, **kwargs)
return self._build_arg_list(**newargs) | python | {
"resource": ""
} |
q257762 | GromacsCommand._get_gmx_docs | validation | def _get_gmx_docs(self):
"""Extract standard gromacs doc
Extract by running the program and chopping the header to keep from
'DESCRIPTION' onwards.
"""
if self._doc_cache is not None:
return self._doc_cache
try:
logging.disable(logging.CRITICAL)
rc, header, docs = self.run('h', stdout=PIPE, stderr=PIPE, use_input=False)
except:
logging.critical("Invoking command {0} failed when determining its doc string. Proceed with caution".format(self.command_name))
self._doc_cache = "(No Gromacs documentation available)"
return self._doc_cache
finally:
# ALWAYS restore logging...
logging.disable(logging.NOTSET)
# The header is on STDOUT and is ignored. The docs are read from STDERR in GMX 4.
m = re.match(self.doc_pattern, docs, re.DOTALL)
if m is None:
# In GMX 5, the opposite is true (Grrr)
m = re.match(self.doc_pattern, header, re.DOTALL)
if m is None:
self._doc_cache = "(No Gromacs documentation available)"
return self._doc_cache
self._doc_cache = m.group('DOCS')
return self._doc_cache | python | {
"resource": ""
} |
q257763 | autoconvert | validation | def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if type(s) is not str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
s = [converter(i) for i in s.split()]
if len(s) == 1:
return s[0]
else:
return numpy.array(s)
except (ValueError, AttributeError):
pass
raise ValueError("Failed to autoconvert {0!r}".format(s)) | python | {
"resource": ""
} |
q257764 | isstream | validation | def isstream(obj):
"""Detect if `obj` is a stream.
We consider anything a stream that has the methods
- ``close()``
and either set of the following
- ``read()``, ``readline()``, ``readlines()``
- ``write()``, ``writeline()``, ``writelines()``
:Arguments:
*obj*
stream or str
:Returns:
*bool*, ``True`` if `obj` is a stream, ``False`` otherwise
.. SeeAlso::
:mod:`io`
.. versionadded:: 0.7.1
"""
signature_methods = ("close",)
alternative_methods = (
("read", "readline", "readlines"),
("write", "writeline", "writelines"))
# Must have ALL the signature methods
for m in signature_methods:
if not hasmethod(obj, m):
return False
# Must have at least one complete set of alternative_methods
alternative_results = [
numpy.all([hasmethod(obj, m) for m in alternatives])
for alternatives in alternative_methods]
return numpy.any(alternative_results) | python | {
"resource": ""
} |
q257765 | convert_aa_code | validation | def convert_aa_code(x):
"""Converts between 3-letter and 1-letter amino acid codes."""
if len(x) == 1:
return amino_acid_codes[x.upper()]
elif len(x) == 3:
return inverse_aa_codes[x.upper()]
else:
raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, "
"not %r" % x) | python | {
"resource": ""
} |
q257766 | in_dir | validation | def in_dir(directory, create=True):
"""Context manager to execute a code block in a directory.
* The directory is created if it does not exist (unless
create=False is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
"""
startdir = os.getcwd()
try:
try:
os.chdir(directory)
logger.debug("Working in {directory!r}...".format(**vars()))
except OSError as err:
if create and err.errno == errno.ENOENT:
os.makedirs(directory)
os.chdir(directory)
logger.info("Working in {directory!r} (newly created)...".format(**vars()))
else:
logger.exception("Failed to start working in {directory!r}.".format(**vars()))
raise
yield os.getcwd()
finally:
os.chdir(startdir) | python | {
"resource": ""
} |
q257767 | unlink_f | validation | def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise | python | {
"resource": ""
} |
q257768 | remove_legend | validation | def remove_legend(ax=None):
"""Remove legend for axes or gca.
See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html
"""
from pylab import gca, draw
if ax is None:
ax = gca()
ax.legend_ = None
draw() | python | {
"resource": ""
} |
q257769 | FileUtils.filename | validation | def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith(os.extsep):
ext = ext[1:] # strip a dot to avoid annoying mistakes
if ext != "":
filename = filename + os.extsep + ext
return filename | python | {
"resource": ""
} |
q257770 | FileUtils.check_file_exists | validation | def check_file_exists(self, filename, resolve='exception', force=None):
"""If a file exists then continue with the action specified in ``resolve``.
``resolve`` must be one of
"ignore"
always return ``False``
"indicate"
return ``True`` if it exists
"warn"
indicate and issue a :exc:`UserWarning`
"exception"
raise :exc:`IOError` if it exists
Alternatively, set *force* for the following behaviour (which
ignores *resolve*):
``True``
same as *resolve* = "ignore" (will allow overwriting of files)
``False``
same as *resolve* = "exception" (will prevent overwriting of files)
``None``
ignored, do whatever *resolve* says
"""
def _warn(x):
msg = "File {0!r} already exists.".format(x)
logger.warn(msg)
warnings.warn(msg)
return True
def _raise(x):
msg = "File {0!r} already exists.".format(x)
logger.error(msg)
raise IOError(errno.EEXIST, x, msg)
solutions = {'ignore': lambda x: False, # file exists, but we pretend that it doesn't
'indicate': lambda x: True, # yes, file exists
'warn': _warn,
'warning': _warn,
'exception': _raise,
'raise': _raise,
}
if force is True:
resolve = 'ignore'
elif force is False:
resolve = 'exception'
if not os.path.isfile(filename):
return False
else:
return solutions[resolve](filename) | python | {
"resource": ""
} |
q257771 | Timedelta.strftime | validation | def strftime(self, fmt="%d:%H:%M:%S"):
"""Primitive string formatter.
The only directives understood are the following:
============ ==========================
Directive meaning
============ ==========================
%d day as integer
%H hour [00-23]
%h hours including days
%M minute as integer [00-59]
%S second as integer [00-59]
============ ==========================
"""
substitutions = {
"%d": str(self.days),
"%H": "{0:02d}".format(self.dhours),
"%h": str(24*self.days + self.dhours),
"%M": "{0:02d}".format(self.dminutes),
"%S": "{0:02d}".format(self.dseconds),
}
s = fmt
for search, replacement in substitutions.items():
s = s.replace(search, replacement)
return s | python | {
"resource": ""
} |
q257772 | start_logging | validation | def start_logging(logfile="gromacs.log"):
"""Start logging of messages to file and console.
The default logfile is named ``gromacs.log`` and messages are
logged with the tag *gromacs*.
"""
from . import log
log.create("gromacs", logfile=logfile)
logging.getLogger("gromacs").info("GromacsWrapper %s STARTED logging to %r",
__version__, logfile) | python | {
"resource": ""
} |
q257773 | stop_logging | validation | def stop_logging():
"""Stop logging to logfile and console."""
from . import log
logger = logging.getLogger("gromacs")
logger.info("GromacsWrapper %s STOPPED logging", get_version())
log.clear_handlers(logger) | python | {
"resource": ""
} |
q257774 | tool_factory | validation | def tool_factory(clsname, name, driver, base=GromacsCommand):
""" Factory for GromacsCommand derived types. """
clsdict = {
'command_name': name,
'driver': driver,
'__doc__': property(base._get_gmx_docs)
}
return type(clsname, (base,), clsdict) | python | {
"resource": ""
} |
q257775 | find_executables | validation | def find_executables(path):
""" Find executables in a path.
Searches executables in a directory excluding some know commands
unusable with GromacsWrapper.
:param path: dirname to search for
:return: list of executables
"""
execs = []
for exe in os.listdir(path):
fullexe = os.path.join(path, exe)
if (os.access(fullexe, os.X_OK) and not os.path.isdir(fullexe) and
exe not in ['GMXRC', 'GMXRC.bash', 'GMXRC.csh', 'GMXRC.zsh',
'demux.pl', 'xplor2gmx.pl']):
execs.append(exe)
return execs | python | {
"resource": ""
} |
q257776 | load_v4_tools | validation | def load_v4_tools():
""" Load Gromacs 4.x tools automatically using some heuristic.
Tries to load tools (1) in configured tool groups (2) and fails back to
automatic detection from ``GMXBIN`` (3) then to a prefilled list.
Also load any extra tool configured in ``~/.gromacswrapper.cfg``
:return: dict mapping tool names to GromacsCommand classes
"""
logger.debug("Loading v4 tools...")
names = config.get_tool_names()
if len(names) == 0 and 'GMXBIN' in os.environ:
names = find_executables(os.environ['GMXBIN'])
if len(names) == 0 or len(names) > len(V4TOOLS) * 4:
names = list(V4TOOLS)
names.extend(config.get_extra_tool_names())
tools = {}
for name in names:
fancy = make_valid_identifier(name)
tools[fancy] = tool_factory(fancy, name, None)
if not tools:
errmsg = "Failed to load v4 tools"
logger.debug(errmsg)
raise GromacsToolLoadingError(errmsg)
logger.debug("Loaded {0} v4 tools successfully!".format(len(tools)))
return tools | python | {
"resource": ""
} |
q257777 | merge_ndx | validation | def merge_ndx(*args):
""" Takes one or more index files and optionally one structure file and
returns a path for a new merged index file.
:param args: index files and zero or one structure file
:return: path for the new merged index file
"""
ndxs = []
struct = None
for fname in args:
if fname.endswith('.ndx'):
ndxs.append(fname)
else:
if struct is not None:
raise ValueError("only one structure file supported")
struct = fname
fd, multi_ndx = tempfile.mkstemp(suffix='.ndx', prefix='multi_')
os.close(fd)
atexit.register(os.unlink, multi_ndx)
if struct:
make_ndx = registry['Make_ndx'](f=struct, n=ndxs, o=multi_ndx)
else:
make_ndx = registry['Make_ndx'](n=ndxs, o=multi_ndx)
_, _, _ = make_ndx(input=['q'], stdout=False, stderr=False)
return multi_ndx | python | {
"resource": ""
} |
q257778 | break_array | validation | def break_array(a, threshold=numpy.pi, other=None):
"""Create a array which masks jumps >= threshold.
Extra points are inserted between two subsequent values whose
absolute difference differs by more than threshold (default is
pi).
Other can be a secondary array which is also masked according to
*a*.
Returns (*a_masked*, *other_masked*) (where *other_masked* can be
``None``)
"""
assert len(a.shape) == 1, "Only 1D arrays supported"
if other is not None and a.shape != other.shape:
raise ValueError("arrays must be of identical shape")
# jump occurs after the index in break
breaks = numpy.where(numpy.abs(numpy.diff(a)) >= threshold)[0]
# insert a blank after
breaks += 1
# is this needed?? -- no, but leave it here as a reminder
#f2 = numpy.diff(a, 2)
#up = (f2[breaks - 1] >= 0) # >0: up, <0: down
# sort into up and down breaks:
#breaks_up = breaks[up]
#breaks_down = breaks[~up]
# new array b including insertions for all the breaks
m = len(breaks)
b = numpy.empty((len(a) + m))
# calculate new indices for breaks in b, taking previous insertions into account
b_breaks = breaks + numpy.arange(m)
mask = numpy.zeros_like(b, dtype=numpy.bool)
mask[b_breaks] = True
b[~mask] = a
b[mask] = numpy.NAN
if other is not None:
c = numpy.empty_like(b)
c[~mask] = other
c[mask] = numpy.NAN
ma_c = numpy.ma.array(c, mask=mask)
else:
ma_c = None
return numpy.ma.array(b, mask=mask), ma_c | python | {
"resource": ""
} |
q257779 | XVG.ma | validation | def ma(self):
"""Represent data as a masked array.
The array is returned with column-first indexing, i.e. for a data file with
columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .
inf and nan are filtered via :func:`numpy.isfinite`.
"""
a = self.array
return numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a))) | python | {
"resource": ""
} |
q257780 | XVG._tcorrel | validation | def _tcorrel(self, nstep=100, **kwargs):
"""Correlation "time" of data.
The 0-th column of the data is interpreted as a time and the
decay of the data is computed from the autocorrelation
function (using FFT).
.. SeeAlso:: :func:`numkit.timeseries.tcorrel`
"""
t = self.array[0,::nstep]
r = gromacs.collections.Collection([numkit.timeseries.tcorrel(t, Y, nstep=1, **kwargs) for Y in self.array[1:,::nstep]])
return r | python | {
"resource": ""
} |
q257781 | XVG.set_correlparameters | validation | def set_correlparameters(self, **kwargs):
"""Set and change the parameters for calculations with correlation functions.
The parameters persist until explicitly changed.
:Keywords:
*nstep*
only process every *nstep* data point to speed up the FFT; if
left empty a default is chosen that produces roughly 25,000 data
points (or whatever is set in *ncorrel*)
*ncorrel*
If no *nstep* is supplied, aim at using *ncorrel* data points for
the FFT; sets :attr:`XVG.ncorrel` [25000]
*force*
force recalculating correlation data even if cached values are
available
*kwargs*
see :func:`numkit.timeseries.tcorrel` for other options
.. SeeAlso: :attr:`XVG.error` for details and references.
"""
self.ncorrel = kwargs.pop('ncorrel', self.ncorrel) or 25000
nstep = kwargs.pop('nstep', None)
if nstep is None:
# good step size leads to ~25,000 data points
nstep = len(self.array[0])/float(self.ncorrel)
nstep = int(numpy.ceil(nstep)) # catch small data sets
kwargs['nstep'] = nstep
self.__correlkwargs.update(kwargs) # only contains legal kw for numkit.timeseries.tcorrel or force
return self.__correlkwargs | python | {
"resource": ""
} |
q257782 | XVG.parse | validation | def parse(self, stride=None):
"""Read and cache the file as a numpy array.
Store every *stride* line of data; if ``None`` then the class default is used.
The array is returned with column-first indexing, i.e. for a data file with
columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .
"""
if stride is None:
stride = self.stride
self.corrupted_lineno = []
irow = 0 # count rows of data
# cannot use numpy.loadtxt() because xvg can have two types of 'comment' lines
with utilities.openany(self.real_filename) as xvg:
rows = []
ncol = None
for lineno,line in enumerate(xvg):
line = line.strip()
if len(line) == 0:
continue
if "label" in line and "xaxis" in line:
self.xaxis = line.split('"')[-2]
if "label" in line and "yaxis" in line:
self.yaxis = line.split('"')[-2]
if line.startswith("@ legend"):
if not "legend" in self.metadata: self.metadata["legend"] = []
self.metadata["legend"].append(line.split("legend ")[-1])
if line.startswith("@ s") and "subtitle" not in line:
name = line.split("legend ")[-1].replace('"','').strip()
self.names.append(name)
if line.startswith(('#', '@')) :
continue
if line.startswith('&'):
raise NotImplementedError('{0!s}: Multi-data not supported, only simple NXY format.'.format(self.real_filename))
# parse line as floats
try:
row = [float(el) for el in line.split()]
except:
if self.permissive:
self.logger.warn("%s: SKIPPING unparsable line %d: %r",
self.real_filename, lineno+1, line)
self.corrupted_lineno.append(lineno+1)
continue
self.logger.error("%s: Cannot parse line %d: %r",
self.real_filename, lineno+1, line)
raise
# check for same number of columns as in previous step
if ncol is not None and len(row) != ncol:
if self.permissive:
self.logger.warn("%s: SKIPPING line %d with wrong number of columns: %r",
self.real_filename, lineno+1, line)
self.corrupted_lineno.append(lineno+1)
continue
errmsg = "{0!s}: Wrong number of columns in line {1:d}: {2!r}".format(self.real_filename, lineno+1, line)
self.logger.error(errmsg)
raise IOError(errno.ENODATA, errmsg, self.real_filename)
# finally: a good line
if irow % stride == 0:
ncol = len(row)
rows.append(row)
irow += 1
try:
self.__array = numpy.array(rows).transpose() # cache result
except:
self.logger.error("%s: Failed reading XVG file, possibly data corrupted. "
"Check the last line of the file...", self.real_filename)
raise
finally:
del rows | python | {
"resource": ""
} |
q257783 | XVG.plot | validation | def plot(self, **kwargs):
"""Plot xvg file data.
The first column of the data is always taken as the abscissa
X. Additional columns are plotted as ordinates Y1, Y2, ...
In the special case that there is only a single column then this column
is plotted against the index, i.e. (N, Y).
:Keywords:
*columns* : list
Select the columns of the data to be plotted; the list
is used as a numpy.array extended slice. The default is
to use all columns. Columns are selected *after* a transform.
*transform* : function
function ``transform(array) -> array`` which transforms
the original array; must return a 2D numpy array of
shape [X, Y1, Y2, ...] where X, Y1, ... are column
vectors. By default the transformation is the
identity [``lambda x: x``].
*maxpoints* : int
limit the total number of data points; matplotlib has issues processing
png files with >100,000 points and pdfs take forever to display. Set to
``None`` if really all data should be displayed. At the moment we simply
decimate the data at regular intervals. [10000]
*method*
method to decimate the data to *maxpoints*, see :meth:`XVG.decimate`
for details
*color*
single color (used for all plots); sequence of colors
(will be repeated as necessary); or a matplotlib
colormap (e.g. "jet", see :mod:`matplotlib.cm`). The
default is to use the :attr:`XVG.default_color_cycle`.
*ax*
plot into given axes or create new one if ``None`` [``None``]
*kwargs*
All other keyword arguments are passed on to :func:`matplotlib.pyplot.plot`.
:Returns:
*ax*
axes instance
"""
columns = kwargs.pop('columns', Ellipsis) # slice for everything
maxpoints = kwargs.pop('maxpoints', self.maxpoints_default)
transform = kwargs.pop('transform', lambda x: x) # default is identity transformation
method = kwargs.pop('method', "mean")
ax = kwargs.pop('ax', None)
if columns is Ellipsis or columns is None:
columns = numpy.arange(self.array.shape[0])
if len(columns) == 0:
raise MissingDataError("plot() needs at least one column of data")
if len(self.array.shape) == 1 or self.array.shape[0] == 1:
# special case: plot against index; plot would do this automatically but
# we'll just produce our own xdata and pretend that this was X all along
a = numpy.ravel(self.array)
X = numpy.arange(len(a))
a = numpy.vstack((X, a))
columns = [0] + [c+1 for c in columns]
else:
a = self.array
color = kwargs.pop('color', self.default_color_cycle)
try:
cmap = matplotlib.cm.get_cmap(color)
colors = cmap(matplotlib.colors.Normalize()(numpy.arange(len(columns[1:]), dtype=float)))
except TypeError:
colors = cycle(utilities.asiterable(color))
if ax is None:
ax = plt.gca()
# (decimate/smooth o slice o transform)(array)
a = self.decimate(method, numpy.asarray(transform(a))[columns], maxpoints=maxpoints)
# now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan)
ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a)))
# finally plot (each column separately to catch empty sets)
for column, color in zip(range(1,len(columns)), colors):
if len(ma[column]) == 0:
warnings.warn("No data to plot for column {column:d}".format(**vars()), category=MissingDataWarning)
kwargs['color'] = color
ax.plot(ma[0], ma[column], **kwargs) # plot all other columns in parallel
return ax | python | {
"resource": ""
} |
q257784 | topology | validation | def topology(struct=None, protein='protein',
top='system.top', dirname='top',
posres="posres.itp",
ff="oplsaa", water="tip4p",
**pdb2gmx_args):
"""Build Gromacs topology files from pdb.
:Keywords:
*struct*
input structure (**required**)
*protein*
name of the output files
*top*
name of the topology file
*dirname*
directory in which the new topology will be stored
*ff*
force field (string understood by ``pdb2gmx``); default
"oplsaa"
*water*
water model (string), default "tip4p"
*pdb2gmxargs*
other arguments for ``pdb2gmx``
.. note::
At the moment this function simply runs ``pdb2gmx`` and uses
the resulting topology file directly. If you want to create
more complicated topologies and maybe also use additional itp
files or make a protein itp file then you will have to do this
manually.
"""
structure = realpath(struct)
new_struct = protein + '.pdb'
if posres is None:
posres = protein + '_posres.itp'
pdb2gmx_args.update({'f': structure, 'o': new_struct, 'p': top, 'i': posres,
'ff': ff, 'water': water})
with in_dir(dirname):
logger.info("[{dirname!s}] Building topology {top!r} from struct = {struct!r}".format(**vars()))
# perhaps parse output from pdb2gmx 4.5.x to get the names of the chain itp files?
gromacs.pdb2gmx(**pdb2gmx_args)
return { \
'top': realpath(dirname, top), \
'struct': realpath(dirname, new_struct), \
'posres' : realpath(dirname, posres) } | python | {
"resource": ""
} |
q257785 | make_main_index | validation | def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None):
"""Make index file with the special groups.
This routine adds the group __main__ and the group __environment__
to the end of the index file. __main__ contains what the user
defines as the *central* and *most important* parts of the
system. __environment__ is everything else.
The template mdp file, for instance, uses these two groups for T-coupling.
These groups are mainly useful if the default groups "Protein" and "Non-Protein"
are not appropriate. By using symbolic names such as __main__ one
can keep scripts more general.
:Returns:
*groups* is a list of dictionaries that describe the index groups. See
:func:`gromacs.cbook.parse_ndxlist` for details.
:Arguments:
*struct* : filename
structure (tpr, pdb, gro)
*selection* : string
is a ``make_ndx`` command such as ``"Protein"`` or ``r DRG`` which
determines what is considered the main group for centering etc. It is
passed directly to ``make_ndx``.
*ndx* : string
name of the final index file
*oldndx* : string
name of index file that should be used as a basis; if None
then the ``make_ndx`` default groups are used.
This routine is very dumb at the moment; maybe some heuristics will be
added later as could be other symbolic groups such as __membrane__.
"""
logger.info("Building the main index file {ndx!r}...".format(**vars()))
# pass 1: select
# get a list of groups
# need the first "" to get make_ndx to spit out the group list.
_,out,_ = gromacs.make_ndx(f=struct, n=oldndx, o=ndx, stdout=False,
input=("", "q"))
groups = cbook.parse_ndxlist(out)
# find the matching groups,
# there is a nasty bug in GROMACS where make_ndx may have multiple
# groups, which caused the previous approach to fail big time.
# this is a work around the make_ndx bug.
# striping the "" allows compatibility with existing make_ndx selection commands.
selection = selection.strip("\"")
selected_groups = [g for g in groups if g['name'].lower() == selection.lower()]
if len(selected_groups) > 1:
logging.warn("make_ndx created duplicated groups, performing work around")
if len(selected_groups) <= 0:
msg = "no groups found for selection {0}, available groups are {1}".format(selection, groups)
logging.error(msg)
raise ValueError(msg)
# Found at least one matching group, we're OK
# index of last group
last = len(groups) - 1
assert last == groups[-1]['nr']
group = selected_groups[0]
# pass 2:
# 1) last group is __main__
# 2) __environment__ is everything else (eg SOL, ions, ...)
_,out,_ = gromacs.make_ndx(f=struct, n=ndx, o=ndx,
stdout=False,
# make copy selected group, this now has index last + 1
input=("{0}".format(group['nr']),
# rename this to __main__
"name {0} __main__".format(last+1),
# make a complement to this group, it get index last + 2
"! \"__main__\"",
# rename this to __environment__
"name {0} __environment__".format(last+2),
# list the groups
"",
# quit
"q"))
return cbook.parse_ndxlist(out) | python | {
"resource": ""
} |
q257786 | get_lipid_vdwradii | validation | def get_lipid_vdwradii(outdir=os.path.curdir, libdir=None):
"""Find vdwradii.dat and add special entries for lipids.
See :data:`gromacs.setup.vdw_lipid_resnames` for lipid
resnames. Add more if necessary.
"""
vdwradii_dat = os.path.join(outdir, "vdwradii.dat")
if libdir is not None:
filename = os.path.join(libdir, 'vdwradii.dat') # canonical name
if not os.path.exists(filename):
msg = 'No VDW database file found in {filename!r}.'.format(**vars())
logger.exception(msg)
raise OSError(msg, errno.ENOENT)
else:
try:
filename = os.path.join(os.environ['GMXLIB'], 'vdwradii.dat')
except KeyError:
try:
filename = os.path.join(os.environ['GMXDATA'], 'top', 'vdwradii.dat')
except KeyError:
msg = "Cannot find vdwradii.dat. Set GMXLIB (point to 'top') or GMXDATA ('share/gromacs')."
logger.exception(msg)
raise OSError(msg, errno.ENOENT)
if not os.path.exists(filename):
msg = "Cannot find {filename!r}; something is wrong with the Gromacs installation.".format(**vars())
logger.exception(msg, errno.ENOENT)
raise OSError(msg)
# make sure to catch 3 and 4 letter resnames
patterns = vdw_lipid_resnames + list({x[:3] for x in vdw_lipid_resnames})
# TODO: should do a tempfile...
with open(vdwradii_dat, 'w') as outfile:
# write lipid stuff before general
outfile.write('; Special larger vdw radii for solvating lipid membranes\n')
for resname in patterns:
for atom,radius in vdw_lipid_atom_radii.items():
outfile.write('{resname:4!s} {atom:<5!s} {radius:5.3f}\n'.format(**vars()))
with open(filename, 'r') as infile:
for line in infile:
outfile.write(line)
logger.debug('Created lipid vdW radii file {vdwradii_dat!r}.'.format(**vars()))
return realpath(vdwradii_dat) | python | {
"resource": ""
} |
q257787 | solvate | validation | def solvate(struct='top/protein.pdb', top='top/system.top',
distance=0.9, boxtype='dodecahedron',
concentration=0, cation='NA', anion='CL',
water='tip4p', solvent_name='SOL', with_membrane=False,
ndx = 'main.ndx', mainselection = '"Protein"',
dirname='solvate',
**kwargs):
"""Put protein into box, add water, add counter-ions.
Currently this really only supports solutes in water. If you need
to embedd a protein in a membrane then you will require more
sophisticated approaches.
However, you *can* supply a protein already inserted in a
bilayer. In this case you will probably want to set *distance* =
``None`` and also enable *with_membrane* = ``True`` (using extra
big vdw radii for typical lipids).
.. Note:: The defaults are suitable for solvating a globular
protein in a fairly tight (increase *distance*!) dodecahedral
box.
:Arguments:
*struct* : filename
pdb or gro input structure
*top* : filename
Gromacs topology
*distance* : float
When solvating with water, make the box big enough so that
at least *distance* nm water are between the solute *struct*
and the box boundary.
Set *boxtype* to ``None`` in order to use a box size in the input
file (gro or pdb).
*boxtype* or *bt*: string
Any of the box types supported by :class:`~gromacs.tools.Editconf`
(triclinic, cubic, dodecahedron, octahedron). Set the box dimensions
either with *distance* or the *box* and *angle* keywords.
If set to ``None`` it will ignore *distance* and use the box
inside the *struct* file.
*bt* overrides the value of *boxtype*.
*box*
List of three box lengths [A,B,C] that are used by :class:`~gromacs.tools.Editconf`
in combination with *boxtype* (``bt`` in :program:`editconf`) and *angles*.
Setting *box* overrides *distance*.
*angles*
List of three angles (only necessary for triclinic boxes).
*concentration* : float
Concentration of the free ions in mol/l. Note that counter
ions are added in excess of this concentration.
*cation* and *anion* : string
Molecule names of the ions. This depends on the chosen force field.
*water* : string
Name of the water model; one of "spc", "spce", "tip3p",
"tip4p". This should be appropriate for the chosen force
field. If an alternative solvent is required, simply supply the path to a box
with solvent molecules (used by :func:`~gromacs.genbox`'s *cs* argument)
and also supply the molecule name via *solvent_name*.
*solvent_name*
Name of the molecules that make up the solvent (as set in the itp/top).
Typically needs to be changed when using non-standard/non-water solvents.
["SOL"]
*with_membrane* : bool
``True``: use special ``vdwradii.dat`` with 0.1 nm-increased radii on
lipids. Default is ``False``.
*ndx* : filename
How to name the index file that is produced by this function.
*mainselection* : string
A string that is fed to :class:`~gromacs.tools.Make_ndx` and
which should select the solute.
*dirname* : directory name
Name of the directory in which all files for the solvation stage are stored.
*includes*
List of additional directories to add to the mdp include path
*kwargs*
Additional arguments are passed on to
:class:`~gromacs.tools.Editconf` or are interpreted as parameters to be
changed in the mdp file.
"""
sol = solvate_sol(struct=struct, top=top,
distance=distance, boxtype=boxtype,
water=water, solvent_name=solvent_name,
with_membrane=with_membrane,
dirname=dirname, **kwargs)
ion = solvate_ion(struct=sol['struct'], top=top,
concentration=concentration, cation=cation, anion=anion,
solvent_name=solvent_name, ndx=ndx,
mainselection=mainselection, dirname=dirname,
**kwargs)
return ion | python | {
"resource": ""
} |
q257788 | energy_minimize | validation | def energy_minimize(dirname='em', mdp=config.templates['em.mdp'],
struct='solvate/ionized.gro', top='top/system.top',
output='em.pdb', deffnm="em",
mdrunner=None, mdrun_args=None,
**kwargs):
"""Energy minimize the system.
This sets up the system (creates run input files) and also runs
``mdrun_d``. Thus it can take a while.
Additional itp files should be in the same directory as the top file.
Many of the keyword arguments below already have sensible values.
:Keywords:
*dirname*
set up under directory dirname [em]
*struct*
input structure (gro, pdb, ...) [solvate/ionized.gro]
*output*
output structure (will be put under dirname) [em.pdb]
*deffnm*
default name for mdrun-related files [em]
*top*
topology file [top/system.top]
*mdp*
mdp file (or use the template) [templates/em.mdp]
*includes*
additional directories to search for itp files
*mdrunner*
:class:`gromacs.run.MDrunner` instance; by default we
just try :func:`gromacs.mdrun_d` and :func:`gromacs.mdrun` but a
MDrunner instance gives the user the ability to run mpi jobs
etc. [None]
*mdrun_args*
arguments for *mdrunner* (as a dict), e.g. ``{'nt': 2}``;
empty by default
.. versionaddedd:: 0.7.0
*kwargs*
remaining key/value pairs that should be changed in the
template mdp file, eg ``nstxtcout=250, nstfout=250``.
.. note:: If :func:`~gromacs.mdrun_d` is not found, the function
falls back to :func:`~gromacs.mdrun` instead.
"""
structure = realpath(struct)
topology = realpath(top)
mdp_template = config.get_template(mdp)
deffnm = deffnm.strip()
mdrun_args = {} if mdrun_args is None else mdrun_args
# write the processed topology to the default output
kwargs.setdefault('pp', 'processed.top')
# filter some kwargs that might come through when feeding output
# from previous stages such as solvate(); necessary because *all*
# **kwargs must be *either* substitutions in the mdp file *or* valid
# command line parameters for ``grompp``.
kwargs.pop('ndx', None)
# mainselection is not used but only passed through; right now we
# set it to the default that is being used in all argument lists
# but that is not pretty. TODO.
mainselection = kwargs.pop('mainselection', '"Protein"')
# only interesting when passed from solvate()
qtot = kwargs.pop('qtot', 0)
# mdp is now the *output* MDP that will be generated from mdp_template
mdp = deffnm+'.mdp'
tpr = deffnm+'.tpr'
logger.info("[{dirname!s}] Energy minimization of struct={struct!r}, top={top!r}, mdp={mdp!r} ...".format(**vars()))
cbook.add_mdp_includes(topology, kwargs)
if qtot != 0:
# At the moment this is purely user-reported and really only here because
# it might get fed into the function when using the keyword-expansion pipeline
# usage paradigm.
wmsg = "Total charge was reported as qtot = {qtot:g} <> 0; probably a problem.".format(**vars())
logger.warn(wmsg)
warnings.warn(wmsg, category=BadParameterWarning)
with in_dir(dirname):
unprocessed = cbook.edit_mdp(mdp_template, new_mdp=mdp, **kwargs)
check_mdpargs(unprocessed)
gromacs.grompp(f=mdp, o=tpr, c=structure, r=structure, p=topology, **unprocessed)
mdrun_args.update(v=True, stepout=10, deffnm=deffnm, c=output)
if mdrunner is None:
mdrun = run.get_double_or_single_prec_mdrun()
mdrun(**mdrun_args)
else:
if type(mdrunner) is type:
# class
# user wants full control and provides simulation.MDrunner **class**
# NO CHECKING --- in principle user can supply any callback they like
mdrun = mdrunner(**mdrun_args)
mdrun.run()
else:
# anything with a run() method that takes mdrun arguments...
try:
mdrunner.run(mdrunargs=mdrun_args)
except AttributeError:
logger.error("mdrunner: Provide a gromacs.run.MDrunner class or instance or a callback with a run() method")
raise TypeError("mdrunner: Provide a gromacs.run.MDrunner class or instance or a callback with a run() method")
# em.gro --> gives 'Bad box in file em.gro' warning --- why??
# --> use em.pdb instead.
if not os.path.exists(output):
errmsg = "Energy minimized system NOT produced."
logger.error(errmsg)
raise GromacsError(errmsg)
final_struct = realpath(output)
logger.info("[{dirname!s}] energy minimized structure {final_struct!r}".format(**vars()))
return {'struct': final_struct,
'top': topology,
'mainselection': mainselection,
} | python | {
"resource": ""
} |
q257789 | em_schedule | validation | def em_schedule(**kwargs):
"""Run multiple energy minimizations one after each other.
:Keywords:
*integrators*
list of integrators (from 'l-bfgs', 'cg', 'steep')
[['bfgs', 'steep']]
*nsteps*
list of maximum number of steps; one for each integrator in
in the *integrators* list [[100,1000]]
*kwargs*
mostly passed to :func:`gromacs.setup.energy_minimize`
:Returns: dictionary with paths to final structure ('struct') and
other files
:Example:
Conduct three minimizations:
1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps
2. steepest descent for 200 steps
3. finish with BFGS for another 30 steps
We also do a multi-processor minimization when possible (i.e. for steep
(and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class
for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see
:mod:`gromacs.run` for details)::
import gromacs.run
gromacs.setup.em_schedule(struct='solvate/ionized.gro',
mdrunner=gromacs.run.MDrunnerOpenMP64,
integrators=['l-bfgs', 'steep', 'l-bfgs'],
nsteps=[50,200, 50])
.. Note:: You might have to prepare the mdp file carefully because at the
moment one can only modify the *nsteps* parameter on a
per-minimizer basis.
"""
mdrunner = kwargs.pop('mdrunner', None)
integrators = kwargs.pop('integrators', ['l-bfgs', 'steep'])
kwargs.pop('integrator', None) # clean input; we set intgerator from integrators
nsteps = kwargs.pop('nsteps', [100, 1000])
outputs = ['em{0:03d}_{1!s}.pdb'.format(i, integrator) for i,integrator in enumerate(integrators)]
outputs[-1] = kwargs.pop('output', 'em.pdb')
files = {'struct': kwargs.pop('struct', None)} # fake output from energy_minimize()
for i, integrator in enumerate(integrators):
struct = files['struct']
logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i])
kwargs.update({'struct':struct, 'output':outputs[i],
'integrator':integrator, 'nsteps': nsteps[i]})
if not integrator == 'l-bfgs':
kwargs['mdrunner'] = mdrunner
else:
kwargs['mdrunner'] = None
logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot "
"do parallel runs.", i)
files = energy_minimize(**kwargs)
return files | python | {
"resource": ""
} |
q257790 | MD_restrained | validation | def MD_restrained(dirname='MD_POSRES', **kwargs):
"""Set up MD with position restraints.
Additional itp files should be in the same directory as the top file.
Many of the keyword arguments below already have sensible values. Note that
setting *mainselection* = ``None`` will disable many of the automated
choices and is often recommended when using your own mdp file.
:Keywords:
*dirname*
set up under directory dirname [MD_POSRES]
*struct*
input structure (gro, pdb, ...) [em/em.pdb]
*top*
topology file [top/system.top]
*mdp*
mdp file (or use the template) [templates/md.mdp]
*ndx*
index file (supply when using a custom mdp)
*includes*
additional directories to search for itp files
*mainselection*
:program:`make_ndx` selection to select main group ["Protein"]
(If ``None`` then no canonical index file is generated and
it is the user's responsibility to set *tc_grps*,
*tau_t*, and *ref_t* as keyword arguments, or provide the mdp template
with all parameter pre-set in *mdp* and probably also your own *ndx*
index file.)
*deffnm*
default filename for Gromacs run [md]
*runtime*
total length of the simulation in ps [1000]
*dt*
integration time step in ps [0.002]
*qscript*
script to submit to the queuing system; by default
uses the template :data:`gromacs.config.qscript_template`, which can
be manually set to another template from :data:`gromacs.config.templates`;
can also be a list of template names.
*qname*
name to be used for the job in the queuing system [PR_GMX]
*mdrun_opts*
option flags for the :program:`mdrun` command in the queuing system
scripts such as "-stepout 100". [""]
*kwargs*
remaining key/value pairs that should be changed in the template mdp
file, eg ``nstxtcout=250, nstfout=250`` or command line options for
``grompp` such as ``maxwarn=1``.
In particular one can also set **define** and activate
whichever position restraints have been coded into the itp
and top file. For instance one could have
*define* = "-DPOSRES_MainChain -DPOSRES_LIGAND"
if these preprocessor constructs exist. Note that there
**must not be any space between "-D" and the value.**
By default *define* is set to "-DPOSRES".
:Returns: a dict that can be fed into :func:`gromacs.setup.MD`
(but check, just in case, especially if you want to
change the ``define`` parameter in the mdp file)
.. Note:: The output frequency is drastically reduced for position
restraint runs by default. Set the corresponding ``nst*``
variables if you require more output. The `pressure coupling`_
option *refcoord_scaling* is set to "com" by default (but can
be changed via *kwargs*) and the pressure coupling
algorithm itself is set to *Pcoupl* = "Berendsen" to
run a stable simulation.
.. _`pressure coupling`: http://manual.gromacs.org/online/mdp_opt.html#pc
"""
logger.info("[{dirname!s}] Setting up MD with position restraints...".format(**vars()))
kwargs.setdefault('struct', 'em/em.pdb')
kwargs.setdefault('qname', 'PR_GMX')
kwargs.setdefault('define', '-DPOSRES')
# reduce size of output files
kwargs.setdefault('nstxout', '50000') # trr pos
kwargs.setdefault('nstvout', '50000') # trr veloc
kwargs.setdefault('nstfout', '0') # trr forces
kwargs.setdefault('nstlog', '500') # log file
kwargs.setdefault('nstenergy', '2500') # edr energy
kwargs.setdefault('nstxtcout', '5000') # xtc pos
# try to get good pressure equilibration
kwargs.setdefault('refcoord_scaling', 'com')
kwargs.setdefault('Pcoupl', "Berendsen")
new_kwargs = _setup_MD(dirname, **kwargs)
# clean up output kwargs
new_kwargs.pop('define', None) # but make sure that -DPOSRES does not stay...
new_kwargs.pop('refcoord_scaling', None)
new_kwargs.pop('Pcoupl', None)
return new_kwargs | python | {
"resource": ""
} |
q257791 | MD | validation | def MD(dirname='MD', **kwargs):
"""Set up equilibrium MD.
Additional itp files should be in the same directory as the top file.
Many of the keyword arguments below already have sensible values. Note that
setting *mainselection* = ``None`` will disable many of the automated
choices and is often recommended when using your own mdp file.
:Keywords:
*dirname*
set up under directory dirname [MD]
*struct*
input structure (gro, pdb, ...) [MD_POSRES/md_posres.pdb]
*top*
topology file [top/system.top]
*mdp*
mdp file (or use the template) [templates/md.mdp]
*ndx*
index file (supply when using a custom mdp)
*includes*
additional directories to search for itp files
*mainselection*
``make_ndx`` selection to select main group ["Protein"]
(If ``None`` then no canonical index file is generated and
it is the user's responsibility to set *tc_grps*,
*tau_t*, and *ref_t* as keyword arguments, or provide the mdp template
with all parameter pre-set in *mdp* and probably also your own *ndx*
index file.)
*deffnm*
default filename for Gromacs run [md]
*runtime*
total length of the simulation in ps [1000]
*dt*
integration time step in ps [0.002]
*qscript*
script to submit to the queuing system; by default
uses the template :data:`gromacs.config.qscript_template`, which can
be manually set to another template from :data:`gromacs.config.templates`;
can also be a list of template names.
*qname*
name to be used for the job in the queuing system [MD_GMX]
*mdrun_opts*
option flags for the :program:`mdrun` command in the queuing system
scripts such as "-stepout 100 -dgdl". [""]
*kwargs*
remaining key/value pairs that should be changed in the template mdp
file, e.g. ``nstxtcout=250, nstfout=250`` or command line options for
:program`grompp` such as ``maxwarn=1``.
:Returns: a dict that can be fed into :func:`gromacs.setup.MD`
(but check, just in case, especially if you want to
change the *define* parameter in the mdp file)
"""
logger.info("[{dirname!s}] Setting up MD...".format(**vars()))
kwargs.setdefault('struct', 'MD_POSRES/md.gro')
kwargs.setdefault('qname', 'MD_GMX')
return _setup_MD(dirname, **kwargs) | python | {
"resource": ""
} |
q257792 | generate_submit_scripts | validation | def generate_submit_scripts(templates, prefix=None, deffnm='md', jobname='MD', budget=None,
mdrun_opts=None, walltime=1.0, jobarray_string=None, startdir=None,
npme=None, **kwargs):
"""Write scripts for queuing systems.
This sets up queuing system run scripts with a simple search and replace in
templates. See :func:`gromacs.cbook.edit_txt` for details. Shell scripts
are made executable.
:Arguments:
*templates*
Template file or list of template files. The "files" can also be names
or symbolic names for templates in the templates directory. See
:mod:`gromacs.config` for details and rules for writing templates.
*prefix*
Prefix for the final run script filename; by default the filename will be
the same as the template. [None]
*dirname*
Directory in which to place the submit scripts. [.]
*deffnm*
Default filename prefix for :program:`mdrun` ``-deffnm`` [md]
*jobname*
Name of the job in the queuing system. [MD]
*budget*
Which budget to book the runtime on [None]
*startdir*
Explicit path on the remote system (for run scripts that need to `cd`
into this directory at the beginning of execution) [None]
*mdrun_opts*
String of additional options for :program:`mdrun`.
*walltime*
Maximum runtime of the job in hours. [1]
*npme*
number of PME nodes
*jobarray_string*
Multi-line string that is spliced in for job array functionality
(see :func:`gromacs.qsub.generate_submit_array`; do not use manually)
*kwargs*
all other kwargs are ignored
:Returns: list of generated run scripts
"""
if not jobname[0].isalpha():
jobname = 'MD_'+jobname
wmsg = "To make the jobname legal it must start with a letter: changed to {0!r}".format(jobname)
logger.warn(wmsg)
warnings.warn(wmsg, category=AutoCorrectionWarning)
if prefix is None:
prefix = ""
if mdrun_opts is not None:
mdrun_opts = '"'+str(mdrun_opts)+'"' # TODO: could test if quotes already present
dirname = kwargs.pop('dirname', os.path.curdir)
wt = Timedelta(hours=walltime)
walltime = wt.strftime("%h:%M:%S")
wall_hours = wt.ashours
def write_script(template):
submitscript = os.path.join(dirname, prefix + os.path.basename(template))
logger.info("Setting up queuing system script {submitscript!r}...".format(**vars()))
# These substitution rules are documented for the user in the module doc string
qsystem = detect_queuing_system(template)
if qsystem is not None and (qsystem.name == 'Slurm'):
cbook.edit_txt(template,
[('^ *DEFFNM=','(?<==)(.*)', deffnm),
('^#.*(-J)', '((?<=-J\s))\s*\w+', jobname),
('^#.*(-A|account_no)', '((?<=-A\s)|(?<=account_no\s))\s*\w+', budget),
('^#.*(-t)', '(?<=-t\s)(\d+:\d+:\d+)', walltime),
('^ *WALL_HOURS=', '(?<==)(.*)', wall_hours),
('^ *STARTDIR=', '(?<==)(.*)', startdir),
('^ *NPME=', '(?<==)(.*)', npme),
('^ *MDRUN_OPTS=', '(?<==)("")', mdrun_opts), # only replace literal ""
('^# JOB_ARRAY_PLACEHOLDER', '^.*$', jobarray_string),
],
newname=submitscript)
ext = os.path.splitext(submitscript)[1]
else:
cbook.edit_txt(template,
[('^ *DEFFNM=','(?<==)(.*)', deffnm),
('^#.*(-N|job_name)', '((?<=-N\s)|(?<=job_name\s))\s*\w+', jobname),
('^#.*(-A|account_no)', '((?<=-A\s)|(?<=account_no\s))\s*\w+', budget),
('^#.*(-l walltime|wall_clock_limit)', '(?<==)(\d+:\d+:\d+)', walltime),
('^ *WALL_HOURS=', '(?<==)(.*)', wall_hours),
('^ *STARTDIR=', '(?<==)(.*)', startdir),
('^ *NPME=', '(?<==)(.*)', npme),
('^ *MDRUN_OPTS=', '(?<==)("")', mdrun_opts), # only replace literal ""
('^# JOB_ARRAY_PLACEHOLDER', '^.*$', jobarray_string),
],
newname=submitscript)
ext = os.path.splitext(submitscript)[1]
if ext in ('.sh', '.csh', '.bash'):
os.chmod(submitscript, 0o755)
return submitscript
return [write_script(template) for template in config.get_templates(templates)] | python | {
"resource": ""
} |
q257793 | generate_submit_array | validation | def generate_submit_array(templates, directories, **kwargs):
"""Generate a array job.
For each ``work_dir`` in *directories*, the array job will
1. cd into ``work_dir``
2. run the job as detailed in the template
It will use all the queuing system directives found in the
template. If more complicated set ups are required, then this
function cannot be used.
:Arguments:
*templates*
Basic template for a single job; the job array logic is spliced into
the position of the line ::
# JOB_ARRAY_PLACEHOLDER
The appropriate commands for common queuing systems (Sun Gridengine, PBS)
are hard coded here. The queuing system is detected from the suffix of
the template.
*directories*
List of directories under *dirname*. One task is set up for each
directory.
*dirname*
The array script will be placed in this directory. The *directories*
**must** be located under *dirname*.
*kwargs*
See :func:`gromacs.setup.generate_submit_script` for details.
"""
dirname = kwargs.setdefault('dirname', os.path.curdir)
reldirs = [relpath(p, start=dirname) for p in asiterable(directories)]
missing = [p for p in (os.path.join(dirname, subdir) for subdir in reldirs)
if not os.path.exists(p)]
if len(missing) > 0:
logger.debug("template=%(template)r: dirname=%(dirname)r reldirs=%(reldirs)r", vars())
logger.error("Some directories are not accessible from the array script: "
"%(missing)r", vars())
def write_script(template):
qsystem = detect_queuing_system(template)
if qsystem is None or not qsystem.has_arrays():
logger.warning("Not known how to make a job array for %(template)r; skipping...", vars())
return None
kwargs['jobarray_string'] = qsystem.array(reldirs)
return generate_submit_scripts(template, **kwargs)[0] # returns list of length 1
# must use config.get_templates() because we need to access the file for detecting
return [write_script(template) for template in config.get_templates(templates)] | python | {
"resource": ""
} |
q257794 | QueuingSystem.isMine | validation | def isMine(self, scriptname):
"""Primitive queuing system detection; only looks at suffix at the moment."""
suffix = os.path.splitext(scriptname)[1].lower()
if suffix.startswith('.'):
suffix = suffix[1:]
return self.suffix == suffix | python | {
"resource": ""
} |
q257795 | Molecule.anumb_to_atom | validation | def anumb_to_atom(self, anumb):
'''Returns the atom object corresponding to an atom number'''
assert isinstance(anumb, int), "anumb must be integer"
if not self._anumb_to_atom: # empty dictionary
if self.atoms:
for atom in self.atoms:
self._anumb_to_atom[atom.number] = atom
return self._anumb_to_atom[anumb]
else:
self.logger("no atoms in the molecule")
return False
else:
if anumb in self._anumb_to_atom:
return self._anumb_to_atom[anumb]
else:
self.logger("no such atom number ({0:d}) in the molecule".format(anumb))
return False | python | {
"resource": ""
} |
q257796 | total_regular_pixels_from_mask | validation | def total_regular_pixels_from_mask(mask):
"""Compute the total number of unmasked regular pixels in a masks."""
total_regular_pixels = 0
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
total_regular_pixels += 1
return total_regular_pixels | python | {
"resource": ""
} |
q257797 | mask_circular_annular_from_shape_pixel_scale_and_radii | validation | def mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec,
centre=(0.0, 0.0)):
"""Compute an annular masks from an input inner and outer masks radius and regular shape."""
mask = np.full(shape, True)
centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale, centre=centre)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
y_arcsec = (y - centres_arcsec[0]) * pixel_scale
x_arcsec = (x - centres_arcsec[1]) * pixel_scale
r_arcsec = np.sqrt(x_arcsec ** 2 + y_arcsec ** 2)
if outer_radius_arcsec >= r_arcsec >= inner_radius_arcsec:
mask[y, x] = False
return mask | python | {
"resource": ""
} |
q257798 | mask_blurring_from_mask_and_psf_shape | validation | def mask_blurring_from_mask_and_psf_shape(mask, psf_shape):
"""Compute a blurring masks from an input masks and psf shape.
The blurring masks corresponds to all pixels which are outside of the masks but will have a fraction of their \
light blur into the masked region due to PSF convolution."""
blurring_mask = np.full(mask.shape, True)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
for y1 in range((-psf_shape[0] + 1) // 2, (psf_shape[0] + 1) // 2):
for x1 in range((-psf_shape[1] + 1) // 2, (psf_shape[1] + 1) // 2):
if 0 <= x + x1 <= mask.shape[1] - 1 and 0 <= y + y1 <= mask.shape[0] - 1:
if mask[y + y1, x + x1]:
blurring_mask[y + y1, x + x1] = False
else:
raise exc.MaskException(
"setup_blurring_mask extends beyond the sub_grid_size of the masks - pad the "
"datas array before masking")
return blurring_mask | python | {
"resource": ""
} |
q257799 | edge_pixels_from_mask | validation | def edge_pixels_from_mask(mask):
"""Compute a 1D array listing all edge pixel indexes in the masks. An edge pixel is a pixel which is not fully \
surrounding by False masks values i.e. it is on an edge."""
edge_pixel_total = total_edge_pixels_from_mask(mask)
edge_pixels = np.zeros(edge_pixel_total)
edge_index = 0
regular_index = 0
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
if mask[y + 1, x] or mask[y - 1, x] or mask[y, x + 1] or mask[y, x - 1] or \
mask[y + 1, x + 1] or mask[y + 1, x - 1] or mask[y - 1, x + 1] or mask[y - 1, x - 1]:
edge_pixels[edge_index] = regular_index
edge_index += 1
regular_index += 1
return edge_pixels | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.