text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def handle(app):
# TODO: for this to work properly we need a generator registry
# generator, lifecycle etc.
# list of tuples (label, value)
# TODO customize & use own style
default_choices = [
{
'name': 'Install a generator',
'value': 'install'
},
{
'name': 'Find some help',
'value': 'help'
},
{
'name': 'Get me out of here!',
'value': 'exit'
}
]
'''
if (globalConfigHasContent()) {
defaultChoices.splice(defaultChoices.length - 1, 0, {
name: 'Clear global config',
value: 'clearConfig'
});
}
var generatorList = _.chain(app.generators).map(function (generator) {
if (!generator.appGenerator) {
return null;
}
var updateInfo = generator.updateAvailable ? chalk.dim.yellow(' ♥ Update Available!') : '';
return {
name: generator.prettyName + updateInfo,
value: {
method: 'run',
generator: generator.namespace
}
};
}).compact().sortBy(function (el) {
var generatorName = namespaceToName(el.value.generator);
return -app.conf.get('generatorRunCount')[generatorName] || 0;
}).value();
if (generatorList.length) {
defaultChoices.unshift({
name: 'Update your generators',
value: 'update'
});
}
'''
# app.insight.track('yoyo', 'home');
generator_list = [{'name': g.title(), 'value': {'name': g, 'method': 'run'}}
for g in app.generators]
choices = _flatten([
whaaaaat.Separator('Run a generator'),
generator_list,
whaaaaat.Separator(),
default_choices,
whaaaaat.Separator(),
])
# var allo = name ? '\'Allo ' + name.split(' ')[0] + '! ' : '\'Allo! ';
allo = 'MoinMoin! '
questions = [
{
'type': 'list',
'name': 'what_next',
'message': allo + 'What would you like to do?',
'choices': choices,
}
]
answer = whaaaaat.prompt(questions)
if isinstance(answer['what_next'], dict) and \
answer['what_next']['method'] == 'run':
app.navigate('run', answer['what_next']['name'])
return
elif answer['what_next'] == 'exit':
return
app.navigate(answer['what_next']) | [
"def",
"handle",
"(",
"app",
")",
":",
"# TODO: for this to work properly we need a generator registry",
"# generator, lifecycle etc.",
"# list of tuples (label, value)",
"# TODO customize & use own style",
"default_choices",
"=",
"[",
"{",
"'name'",
":",
"'Install a generator'",
",",
"'value'",
":",
"'install'",
"}",
",",
"{",
"'name'",
":",
"'Find some help'",
",",
"'value'",
":",
"'help'",
"}",
",",
"{",
"'name'",
":",
"'Get me out of here!'",
",",
"'value'",
":",
"'exit'",
"}",
"]",
"# app.insight.track('yoyo', 'home');",
"generator_list",
"=",
"[",
"{",
"'name'",
":",
"g",
".",
"title",
"(",
")",
",",
"'value'",
":",
"{",
"'name'",
":",
"g",
",",
"'method'",
":",
"'run'",
"}",
"}",
"for",
"g",
"in",
"app",
".",
"generators",
"]",
"choices",
"=",
"_flatten",
"(",
"[",
"whaaaaat",
".",
"Separator",
"(",
"'Run a generator'",
")",
",",
"generator_list",
",",
"whaaaaat",
".",
"Separator",
"(",
")",
",",
"default_choices",
",",
"whaaaaat",
".",
"Separator",
"(",
")",
",",
"]",
")",
"# var allo = name ? '\\'Allo ' + name.split(' ')[0] + '! ' : '\\'Allo! ';",
"allo",
"=",
"'MoinMoin! '",
"questions",
"=",
"[",
"{",
"'type'",
":",
"'list'",
",",
"'name'",
":",
"'what_next'",
",",
"'message'",
":",
"allo",
"+",
"'What would you like to do?'",
",",
"'choices'",
":",
"choices",
",",
"}",
"]",
"answer",
"=",
"whaaaaat",
".",
"prompt",
"(",
"questions",
")",
"if",
"isinstance",
"(",
"answer",
"[",
"'what_next'",
"]",
",",
"dict",
")",
"and",
"answer",
"[",
"'what_next'",
"]",
"[",
"'method'",
"]",
"==",
"'run'",
":",
"app",
".",
"navigate",
"(",
"'run'",
",",
"answer",
"[",
"'what_next'",
"]",
"[",
"'name'",
"]",
")",
"return",
"elif",
"answer",
"[",
"'what_next'",
"]",
"==",
"'exit'",
":",
"return",
"app",
".",
"navigate",
"(",
"answer",
"[",
"'what_next'",
"]",
")"
] | 25.177778 | 22.2 |
def get_remote_file_size(self, url):
"""Gets the filesize of a remote file """
try:
req = urllib.request.urlopen(url)
return int(req.getheader('Content-Length').strip())
except urllib.error.HTTPError as error:
logger.error('Error retrieving size of the remote file %s' % error)
print('Error retrieving size of the remote file %s' % error)
self.connect_earthexplorer()
self.get_remote_file_size(url) | [
"def",
"get_remote_file_size",
"(",
"self",
",",
"url",
")",
":",
"try",
":",
"req",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"return",
"int",
"(",
"req",
".",
"getheader",
"(",
"'Content-Length'",
")",
".",
"strip",
"(",
")",
")",
"except",
"urllib",
".",
"error",
".",
"HTTPError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"'Error retrieving size of the remote file %s'",
"%",
"error",
")",
"print",
"(",
"'Error retrieving size of the remote file %s'",
"%",
"error",
")",
"self",
".",
"connect_earthexplorer",
"(",
")",
"self",
".",
"get_remote_file_size",
"(",
"url",
")"
] | 48.5 | 14 |
def editLogSettings(self, logLocation, logLevel="WARNING", maxLogFileAge=90):
"""
edits the log settings for the portal site
Inputs:
logLocation - file path to where you want the log files saved
on disk
logLevel - this is the level of detail saved in the log files
Levels are: OFF, SEVERE, WARNING, INFO, FINE, VERBOSE, and
DEBUG
maxLogFileAge - the numbers of days to keep a single log file
"""
url = self._url + "/settings/edit"
params = {
"f" : "json",
"logDir" : logLocation,
"logLevel" : logLevel,
"maxLogFileAge" : maxLogFileAge
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | [
"def",
"editLogSettings",
"(",
"self",
",",
"logLocation",
",",
"logLevel",
"=",
"\"WARNING\"",
",",
"maxLogFileAge",
"=",
"90",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/settings/edit\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"logDir\"",
":",
"logLocation",
",",
"\"logLevel\"",
":",
"logLevel",
",",
"\"maxLogFileAge\"",
":",
"maxLogFileAge",
"}",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | 40.833333 | 18.416667 |
def stop(self):
""" Stop this animated progress, and block until it is finished. """
super().stop()
while not self.stopped:
# stop() should block, so printing afterwards isn't interrupted.
sleep(0.001)
# Retrieve the latest exception, if any.
exc = self.exception
if exc is not None:
raise exc | [
"def",
"stop",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"stop",
"(",
")",
"while",
"not",
"self",
".",
"stopped",
":",
"# stop() should block, so printing afterwards isn't interrupted.",
"sleep",
"(",
"0.001",
")",
"# Retrieve the latest exception, if any.",
"exc",
"=",
"self",
".",
"exception",
"if",
"exc",
"is",
"not",
"None",
":",
"raise",
"exc"
] | 36.8 | 15.6 |
def t_ID(self, t):
r'`[^`]*`|[a-zA-Z_][a-zA-Z_0-9:@]*'
res = self.oper.get(t.value, None) # Check for reserved words
if res is None:
res = t.value.upper()
if res == 'FALSE':
t.type = 'BOOL'
t.value = False
elif res == 'TRUE':
t.type = 'BOOL'
t.value = True
else:
t.type = 'ID'
else:
t.value = res
t.type = 'FUNCTION'
return t | [
"def",
"t_ID",
"(",
"self",
",",
"t",
")",
":",
"res",
"=",
"self",
".",
"oper",
".",
"get",
"(",
"t",
".",
"value",
",",
"None",
")",
"# Check for reserved words\r",
"if",
"res",
"is",
"None",
":",
"res",
"=",
"t",
".",
"value",
".",
"upper",
"(",
")",
"if",
"res",
"==",
"'FALSE'",
":",
"t",
".",
"type",
"=",
"'BOOL'",
"t",
".",
"value",
"=",
"False",
"elif",
"res",
"==",
"'TRUE'",
":",
"t",
".",
"type",
"=",
"'BOOL'",
"t",
".",
"value",
"=",
"True",
"else",
":",
"t",
".",
"type",
"=",
"'ID'",
"else",
":",
"t",
".",
"value",
"=",
"res",
"t",
".",
"type",
"=",
"'FUNCTION'",
"return",
"t"
] | 30.470588 | 13.647059 |
def pileup(self, locus):
'''
Given a 1-base locus, return the Pileup at that locus.
Raises a KeyError if this PileupCollection does not have a Pileup at
the specified locus.
'''
locus = to_locus(locus)
if len(locus.positions) != 1:
raise ValueError("Not a single-base locus: %s" % locus)
return self.pileups[locus] | [
"def",
"pileup",
"(",
"self",
",",
"locus",
")",
":",
"locus",
"=",
"to_locus",
"(",
"locus",
")",
"if",
"len",
"(",
"locus",
".",
"positions",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Not a single-base locus: %s\"",
"%",
"locus",
")",
"return",
"self",
".",
"pileups",
"[",
"locus",
"]"
] | 34.636364 | 20.818182 |
def __attr_name(self, name):
""" Return suitable and valid attribute name. This method replaces dash char to underscore. If name
is invalid ValueError exception is raised
:param name: cookie attribute name
:return: str
"""
if name not in self.cookie_attr_value_compliance.keys():
suggested_name = name.replace('_', '-').lower()
if suggested_name not in self.cookie_attr_value_compliance.keys():
raise ValueError('Invalid attribute name is specified')
name = suggested_name
return name | [
"def",
"__attr_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"cookie_attr_value_compliance",
".",
"keys",
"(",
")",
":",
"suggested_name",
"=",
"name",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
".",
"lower",
"(",
")",
"if",
"suggested_name",
"not",
"in",
"self",
".",
"cookie_attr_value_compliance",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid attribute name is specified'",
")",
"name",
"=",
"suggested_name",
"return",
"name"
] | 38.461538 | 15.692308 |
def cleanup_sweep_threads():
'''
Not used. Keeping this function in case we decide not to use
daemonized threads and it becomes necessary to clean up the
running threads upon exit.
'''
for dict_name, obj in globals().items():
if isinstance(obj, (TimedDict,)):
logging.info(
'Stopping thread for TimedDict {dict_name}'.format(
dict_name=dict_name))
obj.stop_sweep() | [
"def",
"cleanup_sweep_threads",
"(",
")",
":",
"for",
"dict_name",
",",
"obj",
"in",
"globals",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"TimedDict",
",",
")",
")",
":",
"logging",
".",
"info",
"(",
"'Stopping thread for TimedDict {dict_name}'",
".",
"format",
"(",
"dict_name",
"=",
"dict_name",
")",
")",
"obj",
".",
"stop_sweep",
"(",
")"
] | 34.230769 | 18.076923 |
def get_source(self, doc):
"""
Grab contents of 'doc' and return it
:param doc: The active document
:return:
"""
start_iter = doc.get_start_iter()
end_iter = doc.get_end_iter()
source = doc.get_text(start_iter, end_iter, False)
return source | [
"def",
"get_source",
"(",
"self",
",",
"doc",
")",
":",
"start_iter",
"=",
"doc",
".",
"get_start_iter",
"(",
")",
"end_iter",
"=",
"doc",
".",
"get_end_iter",
"(",
")",
"source",
"=",
"doc",
".",
"get_text",
"(",
"start_iter",
",",
"end_iter",
",",
"False",
")",
"return",
"source"
] | 27.636364 | 11.272727 |
def do_add_auth(self, params):
"""
\x1b[1mNAME\x1b[0m
add_auth - Authenticates the session
\x1b[1mSYNOPSIS\x1b[0m
add_auth <scheme> <credential>
\x1b[1mEXAMPLES\x1b[0m
> add_auth digest super:s3cr3t
"""
self._zk.add_auth(params.scheme, params.credential) | [
"def",
"do_add_auth",
"(",
"self",
",",
"params",
")",
":",
"self",
".",
"_zk",
".",
"add_auth",
"(",
"params",
".",
"scheme",
",",
"params",
".",
"credential",
")"
] | 22.538462 | 16.538462 |
def disconnect(self, abandon_session=False):
""" Disconnects from the Responsys soap service
Calls the service logout method and destroys the client's session information. Returns
True on success, False otherwise.
"""
self.connected = False
if (self.session and self.session.is_expired) or abandon_session:
try:
self.logout()
except:
log.warning(
'Logout call to responsys failed, session may have not been terminated',
exc_info=True
)
del self.session
return True | [
"def",
"disconnect",
"(",
"self",
",",
"abandon_session",
"=",
"False",
")",
":",
"self",
".",
"connected",
"=",
"False",
"if",
"(",
"self",
".",
"session",
"and",
"self",
".",
"session",
".",
"is_expired",
")",
"or",
"abandon_session",
":",
"try",
":",
"self",
".",
"logout",
"(",
")",
"except",
":",
"log",
".",
"warning",
"(",
"'Logout call to responsys failed, session may have not been terminated'",
",",
"exc_info",
"=",
"True",
")",
"del",
"self",
".",
"session",
"return",
"True"
] | 37 | 19.117647 |
def add_caption(self, image, caption, colour=None):
""" Add a caption to the image """
if colour is None:
colour = "white"
width, height = image.size
draw = ImageDraw.Draw(image)
draw.font = self.font
draw.font = self.font
draw.text((width // 10, height//20), caption,
fill=colour)
return image | [
"def",
"add_caption",
"(",
"self",
",",
"image",
",",
"caption",
",",
"colour",
"=",
"None",
")",
":",
"if",
"colour",
"is",
"None",
":",
"colour",
"=",
"\"white\"",
"width",
",",
"height",
"=",
"image",
".",
"size",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"image",
")",
"draw",
".",
"font",
"=",
"self",
".",
"font",
"draw",
".",
"font",
"=",
"self",
".",
"font",
"draw",
".",
"text",
"(",
"(",
"width",
"//",
"10",
",",
"height",
"//",
"20",
")",
",",
"caption",
",",
"fill",
"=",
"colour",
")",
"return",
"image"
] | 23.875 | 19.25 |
def make_filter_string(cls, filter_specification):
"""
Converts the given filter specification to a CQL filter expression.
"""
registry = get_current_registry()
visitor_cls = registry.getUtility(IFilterSpecificationVisitor,
name=EXPRESSION_KINDS.CQL)
visitor = visitor_cls()
filter_specification.accept(visitor)
return str(visitor.expression) | [
"def",
"make_filter_string",
"(",
"cls",
",",
"filter_specification",
")",
":",
"registry",
"=",
"get_current_registry",
"(",
")",
"visitor_cls",
"=",
"registry",
".",
"getUtility",
"(",
"IFilterSpecificationVisitor",
",",
"name",
"=",
"EXPRESSION_KINDS",
".",
"CQL",
")",
"visitor",
"=",
"visitor_cls",
"(",
")",
"filter_specification",
".",
"accept",
"(",
"visitor",
")",
"return",
"str",
"(",
"visitor",
".",
"expression",
")"
] | 43.9 | 11.9 |
def get_tickers_from_file(self, filename):
"""Load ticker list from txt file"""
if not os.path.exists(filename):
log.error("Ticker List file does not exist: %s", filename)
tickers = []
with io.open(filename, 'r') as fd:
for ticker in fd:
tickers.append(ticker.rstrip())
return tickers | [
"def",
"get_tickers_from_file",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"log",
".",
"error",
"(",
"\"Ticker List file does not exist: %s\"",
",",
"filename",
")",
"tickers",
"=",
"[",
"]",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fd",
":",
"for",
"ticker",
"in",
"fd",
":",
"tickers",
".",
"append",
"(",
"ticker",
".",
"rstrip",
"(",
")",
")",
"return",
"tickers"
] | 35.6 | 13 |
def contains(self, obj):
"""Return whether this Wikicode object contains *obj*.
If *obj* is a :class:`.Node` or :class:`.Wikicode` object, then we
search for it exactly among all of our children, recursively.
Otherwise, this method just uses :meth:`.__contains__` on the string.
"""
if not isinstance(obj, (Node, Wikicode)):
return obj in self
try:
self._do_strong_search(obj, recursive=True)
except ValueError:
return False
return True | [
"def",
"contains",
"(",
"self",
",",
"obj",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"(",
"Node",
",",
"Wikicode",
")",
")",
":",
"return",
"obj",
"in",
"self",
"try",
":",
"self",
".",
"_do_strong_search",
"(",
"obj",
",",
"recursive",
"=",
"True",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True"
] | 38 | 19.214286 |
def cat_trials(x3d):
"""Concatenate trials along time axis.
Parameters
----------
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples.
Returns
-------
x2d : array, shape (m, t * n)
Trials are concatenated along the second axis.
See also
--------
cut_segments : Cut segments from continuous data.
Examples
--------
>>> x = np.random.randn(6, 4, 150)
>>> y = cat_trials(x)
>>> y.shape
(4, 900)
"""
x3d = atleast_3d(x3d)
t = x3d.shape[0]
return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0) | [
"def",
"cat_trials",
"(",
"x3d",
")",
":",
"x3d",
"=",
"atleast_3d",
"(",
"x3d",
")",
"t",
"=",
"x3d",
".",
"shape",
"[",
"0",
"]",
"return",
"np",
".",
"concatenate",
"(",
"np",
".",
"split",
"(",
"x3d",
",",
"t",
",",
"0",
")",
",",
"axis",
"=",
"2",
")",
".",
"squeeze",
"(",
"0",
")"
] | 22.518519 | 22.333333 |
def run(self, *args):
"""Merge unique identities using a matching algorithm."""
params = self.parser.parse_args(args)
code = self.unify(params.matching, params.sources,
params.fast_matching, params.no_strict,
params.interactive, params.recovery)
return code | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
")",
":",
"params",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"args",
")",
"code",
"=",
"self",
".",
"unify",
"(",
"params",
".",
"matching",
",",
"params",
".",
"sources",
",",
"params",
".",
"fast_matching",
",",
"params",
".",
"no_strict",
",",
"params",
".",
"interactive",
",",
"params",
".",
"recovery",
")",
"return",
"code"
] | 33.5 | 23 |
def errors(self):
"""
Get the errors of the tag.
If invalid then the list will consist of errors containing each a code and message explaining the error.
Each error also refers to the respective (sub)tag(s).
:return: list of errors of the tag. If the tag is valid, it returns an empty list.
"""
errors = []
data = self.data
error = self.error
# Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn).
if 'record' in data:
if 'Deprecated' in data['record']:
errors.append(error(self.ERR_DEPRECATED))
# Only check every subtag if the tag is not explicitly listed as grandfathered or redundant.
return errors
# Check that all subtag codes are meaningful.
codes = data['tag'].split('-')
for i, code in enumerate(codes):
# Ignore anything after a singleton (break)
if len(code) < 2:
# Check that each private-use subtag is within the maximum allowed length.
for code in codes[i + 1:]:
if len(code) > 8:
errors.append(error(self.ERR_TOO_LONG, code))
break
if code not in index:
errors.append(error(self.ERR_UNKNOWN, code))
# Continue to the next item.
continue
# Check that first tag is a language tag.
subtags = self.subtags
if not len(subtags):
errors.append(error(self.ERR_NO_LANGUAGE))
return errors
elif subtags[0].type != 'language':
errors.append(error(self.ERR_NO_LANGUAGE))
return errors
# Check for more than one of some types and for deprecation.
found = dict(language=[], extlang=[], variant=[], script=[], region=[])
for subtag in subtags:
type = subtag.type
if subtag.deprecated:
errors.append(error(self.ERR_SUBTAG_DEPRECATED, subtag))
if type in found:
found[type].append(subtag)
if 'language' == type:
if len(found['language']) > 1:
errors.append(error(self.ERR_EXTRA_LANGUAGE, subtag))
elif 'region' == type:
if len(found['region']) > 1:
errors.append(error(self.ERR_EXTRA_REGION, subtag))
elif 'extlang' == type:
if len(found['extlang']) > 1:
errors.append(error(self.ERR_EXTRA_EXTLANG, subtag))
elif 'script' == type:
if len(found['script']) > 1:
errors.append(error(self.ERR_EXTRA_SCRIPT, subtag))
# Check if script is same as language suppress-script.
else:
script = subtags[0].script
if script:
if script.format == subtag.format:
errors.append(error(self.ERR_SUPPRESS_SCRIPT, subtag))
elif 'variant' == type:
if len(found['variant']) > 1:
for variant in found['variant']:
if variant.format == subtag.format:
errors.append(error(self.ERR_DUPLICATE_VARIANT, subtag))
break
# Check for correct order.
if len(subtags) > 1:
priority = dict(language=4, extlang=5, script=6, region=7, variant=8)
for i, subtag in enumerate(subtags[0:len(subtags)-1]):
next = subtags[i + 1]
if next:
if priority[subtag.type] > priority[next.type]:
errors.append(error(self.ERR_WRONG_ORDER, [subtag, next]))
return errors | [
"def",
"errors",
"(",
"self",
")",
":",
"errors",
"=",
"[",
"]",
"data",
"=",
"self",
".",
"data",
"error",
"=",
"self",
".",
"error",
"# Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn).",
"if",
"'record'",
"in",
"data",
":",
"if",
"'Deprecated'",
"in",
"data",
"[",
"'record'",
"]",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_DEPRECATED",
")",
")",
"# Only check every subtag if the tag is not explicitly listed as grandfathered or redundant.",
"return",
"errors",
"# Check that all subtag codes are meaningful.",
"codes",
"=",
"data",
"[",
"'tag'",
"]",
".",
"split",
"(",
"'-'",
")",
"for",
"i",
",",
"code",
"in",
"enumerate",
"(",
"codes",
")",
":",
"# Ignore anything after a singleton (break)",
"if",
"len",
"(",
"code",
")",
"<",
"2",
":",
"# Check that each private-use subtag is within the maximum allowed length.",
"for",
"code",
"in",
"codes",
"[",
"i",
"+",
"1",
":",
"]",
":",
"if",
"len",
"(",
"code",
")",
">",
"8",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_TOO_LONG",
",",
"code",
")",
")",
"break",
"if",
"code",
"not",
"in",
"index",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_UNKNOWN",
",",
"code",
")",
")",
"# Continue to the next item.",
"continue",
"# Check that first tag is a language tag.",
"subtags",
"=",
"self",
".",
"subtags",
"if",
"not",
"len",
"(",
"subtags",
")",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_NO_LANGUAGE",
")",
")",
"return",
"errors",
"elif",
"subtags",
"[",
"0",
"]",
".",
"type",
"!=",
"'language'",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_NO_LANGUAGE",
")",
")",
"return",
"errors",
"# Check for more than one of some types and for deprecation.",
"found",
"=",
"dict",
"(",
"language",
"=",
"[",
"]",
",",
"extlang",
"=",
"[",
"]",
",",
"variant",
"=",
"[",
"]",
",",
"script",
"=",
"[",
"]",
",",
"region",
"=",
"[",
"]",
")",
"for",
"subtag",
"in",
"subtags",
":",
"type",
"=",
"subtag",
".",
"type",
"if",
"subtag",
".",
"deprecated",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_SUBTAG_DEPRECATED",
",",
"subtag",
")",
")",
"if",
"type",
"in",
"found",
":",
"found",
"[",
"type",
"]",
".",
"append",
"(",
"subtag",
")",
"if",
"'language'",
"==",
"type",
":",
"if",
"len",
"(",
"found",
"[",
"'language'",
"]",
")",
">",
"1",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_EXTRA_LANGUAGE",
",",
"subtag",
")",
")",
"elif",
"'region'",
"==",
"type",
":",
"if",
"len",
"(",
"found",
"[",
"'region'",
"]",
")",
">",
"1",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_EXTRA_REGION",
",",
"subtag",
")",
")",
"elif",
"'extlang'",
"==",
"type",
":",
"if",
"len",
"(",
"found",
"[",
"'extlang'",
"]",
")",
">",
"1",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_EXTRA_EXTLANG",
",",
"subtag",
")",
")",
"elif",
"'script'",
"==",
"type",
":",
"if",
"len",
"(",
"found",
"[",
"'script'",
"]",
")",
">",
"1",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_EXTRA_SCRIPT",
",",
"subtag",
")",
")",
"# Check if script is same as language suppress-script.",
"else",
":",
"script",
"=",
"subtags",
"[",
"0",
"]",
".",
"script",
"if",
"script",
":",
"if",
"script",
".",
"format",
"==",
"subtag",
".",
"format",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_SUPPRESS_SCRIPT",
",",
"subtag",
")",
")",
"elif",
"'variant'",
"==",
"type",
":",
"if",
"len",
"(",
"found",
"[",
"'variant'",
"]",
")",
">",
"1",
":",
"for",
"variant",
"in",
"found",
"[",
"'variant'",
"]",
":",
"if",
"variant",
".",
"format",
"==",
"subtag",
".",
"format",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_DUPLICATE_VARIANT",
",",
"subtag",
")",
")",
"break",
"# Check for correct order.",
"if",
"len",
"(",
"subtags",
")",
">",
"1",
":",
"priority",
"=",
"dict",
"(",
"language",
"=",
"4",
",",
"extlang",
"=",
"5",
",",
"script",
"=",
"6",
",",
"region",
"=",
"7",
",",
"variant",
"=",
"8",
")",
"for",
"i",
",",
"subtag",
"in",
"enumerate",
"(",
"subtags",
"[",
"0",
":",
"len",
"(",
"subtags",
")",
"-",
"1",
"]",
")",
":",
"next",
"=",
"subtags",
"[",
"i",
"+",
"1",
"]",
"if",
"next",
":",
"if",
"priority",
"[",
"subtag",
".",
"type",
"]",
">",
"priority",
"[",
"next",
".",
"type",
"]",
":",
"errors",
".",
"append",
"(",
"error",
"(",
"self",
".",
"ERR_WRONG_ORDER",
",",
"[",
"subtag",
",",
"next",
"]",
")",
")",
"return",
"errors"
] | 41.666667 | 20.422222 |
def compute_fitness_cdf(chromosomes, ga):
"""
Return a list of fitness-weighted cumulative probabilities for a set of chromosomes.
chromosomes: chromosomes to use for fitness-based calculations
ga: ``algorithms.BaseGeneticAlgorithm`` used to obtain fitness values using its ``eval_fitness`` method
return: list of fitness-weighted cumulative probabilities in [0, 1]
"""
ga.sort(chromosomes)
fitness = [ga.eval_fitness(c) for c in chromosomes]
min_fit = min(fitness)
fit_range = max(fitness) - min_fit
if fit_range == 0:
# all chromosomes have equal chance of being chosen
n = len(chromosomes)
return [i / n for i in range(1, n + 1)]
return [(fit - min_fit) / fit_range for fit in fitness] | [
"def",
"compute_fitness_cdf",
"(",
"chromosomes",
",",
"ga",
")",
":",
"ga",
".",
"sort",
"(",
"chromosomes",
")",
"fitness",
"=",
"[",
"ga",
".",
"eval_fitness",
"(",
"c",
")",
"for",
"c",
"in",
"chromosomes",
"]",
"min_fit",
"=",
"min",
"(",
"fitness",
")",
"fit_range",
"=",
"max",
"(",
"fitness",
")",
"-",
"min_fit",
"if",
"fit_range",
"==",
"0",
":",
"# all chromosomes have equal chance of being chosen",
"n",
"=",
"len",
"(",
"chromosomes",
")",
"return",
"[",
"i",
"/",
"n",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
"]",
"return",
"[",
"(",
"fit",
"-",
"min_fit",
")",
"/",
"fit_range",
"for",
"fit",
"in",
"fitness",
"]"
] | 36.761905 | 22.571429 |
def merge(self, b, a=DEFAULT):
"""Merges b into a recursively, if a is not given: merges into self.
also merges lists and:
* merge({a:a},{a:b}) = {a:[a,b]}
* merge({a:[a]},{a:b}) = {a:[a,b]}
* merge({a:a},{a:[b]}) = {a:[a,b]}
* merge({a:[a]},{a:[b]}) = {a:[a,b]}
"""
if a is DEFAULT:
a = self
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
self.merge(b[key], a[key])
else:
if type(a[key]) is list and type(b[key]) is list:
a[key] += b[key]
elif type(a[key]) is list and type(b[key]) is not list:
a[key] += [b[key]]
elif type(a[key]) is not list and type(b[key]) is list:
a[key] = [a[key]] + b[key]
elif type(a[key]) is not list and type(b[key]) is not list:
a[key] = [a[key]] + [b[key]]
else:
a[key] = b[key]
return a | [
"def",
"merge",
"(",
"self",
",",
"b",
",",
"a",
"=",
"DEFAULT",
")",
":",
"if",
"a",
"is",
"DEFAULT",
":",
"a",
"=",
"self",
"for",
"key",
"in",
"b",
":",
"if",
"key",
"in",
"a",
":",
"if",
"isinstance",
"(",
"a",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
"b",
"[",
"key",
"]",
",",
"dict",
")",
":",
"self",
".",
"merge",
"(",
"b",
"[",
"key",
"]",
",",
"a",
"[",
"key",
"]",
")",
"else",
":",
"if",
"type",
"(",
"a",
"[",
"key",
"]",
")",
"is",
"list",
"and",
"type",
"(",
"b",
"[",
"key",
"]",
")",
"is",
"list",
":",
"a",
"[",
"key",
"]",
"+=",
"b",
"[",
"key",
"]",
"elif",
"type",
"(",
"a",
"[",
"key",
"]",
")",
"is",
"list",
"and",
"type",
"(",
"b",
"[",
"key",
"]",
")",
"is",
"not",
"list",
":",
"a",
"[",
"key",
"]",
"+=",
"[",
"b",
"[",
"key",
"]",
"]",
"elif",
"type",
"(",
"a",
"[",
"key",
"]",
")",
"is",
"not",
"list",
"and",
"type",
"(",
"b",
"[",
"key",
"]",
")",
"is",
"list",
":",
"a",
"[",
"key",
"]",
"=",
"[",
"a",
"[",
"key",
"]",
"]",
"+",
"b",
"[",
"key",
"]",
"elif",
"type",
"(",
"a",
"[",
"key",
"]",
")",
"is",
"not",
"list",
"and",
"type",
"(",
"b",
"[",
"key",
"]",
")",
"is",
"not",
"list",
":",
"a",
"[",
"key",
"]",
"=",
"[",
"a",
"[",
"key",
"]",
"]",
"+",
"[",
"b",
"[",
"key",
"]",
"]",
"else",
":",
"a",
"[",
"key",
"]",
"=",
"b",
"[",
"key",
"]",
"return",
"a"
] | 40.962963 | 15.962963 |
def get_best(self):
"""Finds the optimal number of features
:return: optimal number of features and ranking
"""
svc = SVC(kernel="linear")
rfecv = RFECV(
estimator=svc,
step=1,
cv=StratifiedKFold(self.y_train, 2),
scoring="log_loss"
)
rfecv.fit(self.x_train, self.y_train)
return rfecv.n_features_, rfecv.ranking_ | [
"def",
"get_best",
"(",
"self",
")",
":",
"svc",
"=",
"SVC",
"(",
"kernel",
"=",
"\"linear\"",
")",
"rfecv",
"=",
"RFECV",
"(",
"estimator",
"=",
"svc",
",",
"step",
"=",
"1",
",",
"cv",
"=",
"StratifiedKFold",
"(",
"self",
".",
"y_train",
",",
"2",
")",
",",
"scoring",
"=",
"\"log_loss\"",
")",
"rfecv",
".",
"fit",
"(",
"self",
".",
"x_train",
",",
"self",
".",
"y_train",
")",
"return",
"rfecv",
".",
"n_features_",
",",
"rfecv",
".",
"ranking_"
] | 31.769231 | 12.076923 |
def file_to_base64(path_or_obj, max_mb=None):
"""converts contents of a file to base64 encoding
:param str_or_object path_or_obj: fool pathname string for a file or a file like object that supports read
:param int max_mb: maximum number in MegaBytes to accept
:param float lon2: longitude of second place (decimal degrees)
:raises ErrorFileTooBig: if file contents > max_mb (see :class:`ErrorFileTooBig`)
:raises IOError: if file path can't be found (Also possible other exceptions depending on file_object)
"""
if not hasattr(path_or_obj, 'read'):
rt = read_file(path_or_obj)
else:
rt = path_or_obj.read()
if max_mb:
len_mb = len(rt) / (10024.0 * 1000)
if len_mb > max_mb:
raise ErrorFileTooBig("File is too big ({.2f} MBytes)" (len_mb))
return b64encode(rt) | [
"def",
"file_to_base64",
"(",
"path_or_obj",
",",
"max_mb",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"path_or_obj",
",",
"'read'",
")",
":",
"rt",
"=",
"read_file",
"(",
"path_or_obj",
")",
"else",
":",
"rt",
"=",
"path_or_obj",
".",
"read",
"(",
")",
"if",
"max_mb",
":",
"len_mb",
"=",
"len",
"(",
"rt",
")",
"/",
"(",
"10024.0",
"*",
"1000",
")",
"if",
"len_mb",
">",
"max_mb",
":",
"raise",
"ErrorFileTooBig",
"(",
"\"File is too big ({.2f} MBytes)\"",
"(",
"len_mb",
")",
")",
"return",
"b64encode",
"(",
"rt",
")"
] | 43.736842 | 23.736842 |
def prepare(bedfile):
"""
Remove prepended tags in gene names.
"""
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
fwa = open(abedfile, "w")
fwb = open(bbedfile, "w")
bed = Bed(bedfile)
seen = set()
for b in bed:
accns = b.accn.split(";")
new_accns = []
for accn in accns:
if ":" in accn:
method, a = accn.split(":", 1)
if method in ("liftOver", "GMAP", ""):
accn = a
if accn in seen:
logging.error("Duplicate id {0} found. Ignored.".format(accn))
continue
new_accns.append(accn)
b.accn = accn
print(b, file=fwa)
seen.add(accn)
b.accn = ";".join(new_accns)
print(b, file=fwb)
fwa.close()
fwb.close() | [
"def",
"prepare",
"(",
"bedfile",
")",
":",
"pf",
"=",
"bedfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"abedfile",
"=",
"pf",
"+",
"\".a.bed\"",
"bbedfile",
"=",
"pf",
"+",
"\".b.bed\"",
"fwa",
"=",
"open",
"(",
"abedfile",
",",
"\"w\"",
")",
"fwb",
"=",
"open",
"(",
"bbedfile",
",",
"\"w\"",
")",
"bed",
"=",
"Bed",
"(",
"bedfile",
")",
"seen",
"=",
"set",
"(",
")",
"for",
"b",
"in",
"bed",
":",
"accns",
"=",
"b",
".",
"accn",
".",
"split",
"(",
"\";\"",
")",
"new_accns",
"=",
"[",
"]",
"for",
"accn",
"in",
"accns",
":",
"if",
"\":\"",
"in",
"accn",
":",
"method",
",",
"a",
"=",
"accn",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"if",
"method",
"in",
"(",
"\"liftOver\"",
",",
"\"GMAP\"",
",",
"\"\"",
")",
":",
"accn",
"=",
"a",
"if",
"accn",
"in",
"seen",
":",
"logging",
".",
"error",
"(",
"\"Duplicate id {0} found. Ignored.\"",
".",
"format",
"(",
"accn",
")",
")",
"continue",
"new_accns",
".",
"append",
"(",
"accn",
")",
"b",
".",
"accn",
"=",
"accn",
"print",
"(",
"b",
",",
"file",
"=",
"fwa",
")",
"seen",
".",
"add",
"(",
"accn",
")",
"b",
".",
"accn",
"=",
"\";\"",
".",
"join",
"(",
"new_accns",
")",
"print",
"(",
"b",
",",
"file",
"=",
"fwb",
")",
"fwa",
".",
"close",
"(",
")",
"fwb",
".",
"close",
"(",
")"
] | 25.787879 | 15.727273 |
def make_repr(*args, **kwargs):
"""Returns __repr__ method which returns ASCII
representaion of the object with given fields.
Without arguments, ``make_repr`` generates a method
which outputs all object's non-protected (non-undercored)
arguments which are not callables.
Accepts ``*args``, which should be a names of object's
attributes to be included in the output::
__repr__ = make_repr('foo', 'bar')
If you want to generate attribute's content on the fly,
then you should use keyword arguments and pass a callable
of one argument::
__repr__ = make_repr(foo=lambda obj: obj.blah + 100500)
"""
def method(self):
cls_name = self.__class__.__name__
if args:
field_names = args
else:
def undercored(name): return name.startswith('_')
def is_method(name): return callable(getattr(self, name))
def good_name(name):
return not undercored(name) and not is_method(name)
field_names = filter(good_name, dir(self))
field_names = sorted(field_names)
# on this stage, we make from field_names an
# attribute getters
field_getters = zip(field_names,
map(attrgetter, field_names))
# now process keyword args, they must
# contain callables of one argument
# and callable should return a field's value
field_getters = chain(
field_getters,
kwargs.items())
fields = ((name, format_value(getter(self)))
for name, getter in field_getters)
# prepare key strings
fields = ((u'{0}='.format(name), value)
for name, value in fields)
# join values with they respective keys
fields = list(starmap(serialize_text, fields))
beginning = u'<{cls_name} '.format(
cls_name=cls_name,
)
result = serialize_list(
beginning,
fields)
# append closing braket
result += u'>'
if ON_PYTHON2:
# on python 2.x repr returns bytes, but on python3 - unicode strings
result = result.encode('utf-8')
return result
return method | [
"def",
"make_repr",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"method",
"(",
"self",
")",
":",
"cls_name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"if",
"args",
":",
"field_names",
"=",
"args",
"else",
":",
"def",
"undercored",
"(",
"name",
")",
":",
"return",
"name",
".",
"startswith",
"(",
"'_'",
")",
"def",
"is_method",
"(",
"name",
")",
":",
"return",
"callable",
"(",
"getattr",
"(",
"self",
",",
"name",
")",
")",
"def",
"good_name",
"(",
"name",
")",
":",
"return",
"not",
"undercored",
"(",
"name",
")",
"and",
"not",
"is_method",
"(",
"name",
")",
"field_names",
"=",
"filter",
"(",
"good_name",
",",
"dir",
"(",
"self",
")",
")",
"field_names",
"=",
"sorted",
"(",
"field_names",
")",
"# on this stage, we make from field_names an",
"# attribute getters",
"field_getters",
"=",
"zip",
"(",
"field_names",
",",
"map",
"(",
"attrgetter",
",",
"field_names",
")",
")",
"# now process keyword args, they must",
"# contain callables of one argument",
"# and callable should return a field's value",
"field_getters",
"=",
"chain",
"(",
"field_getters",
",",
"kwargs",
".",
"items",
"(",
")",
")",
"fields",
"=",
"(",
"(",
"name",
",",
"format_value",
"(",
"getter",
"(",
"self",
")",
")",
")",
"for",
"name",
",",
"getter",
"in",
"field_getters",
")",
"# prepare key strings",
"fields",
"=",
"(",
"(",
"u'{0}='",
".",
"format",
"(",
"name",
")",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"fields",
")",
"# join values with they respective keys",
"fields",
"=",
"list",
"(",
"starmap",
"(",
"serialize_text",
",",
"fields",
")",
")",
"beginning",
"=",
"u'<{cls_name} '",
".",
"format",
"(",
"cls_name",
"=",
"cls_name",
",",
")",
"result",
"=",
"serialize_list",
"(",
"beginning",
",",
"fields",
")",
"# append closing braket",
"result",
"+=",
"u'>'",
"if",
"ON_PYTHON2",
":",
"# on python 2.x repr returns bytes, but on python3 - unicode strings",
"result",
"=",
"result",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"result",
"return",
"method"
] | 28.973684 | 20.697368 |
def ensure_annotations(f):
"""
Decorator to be used on functions with annotations. Runs type checks to enforce annotations. Raises
:class:`EnsureError` if any argument passed to *f* is not of the type specified by the annotation. Also raises
:class:`EnsureError` if the return value of *f* is not of the type specified by the annotation. Examples:
.. code-block:: python
from ensure import ensure_annotations
@ensure_annotations
def f(x: int, y: float) -> float:
return x+y
print(f(1, y=2.2))
>>> 3.2
print(f(1, y=2))
>>> ensure.EnsureError: Argument y of type <class 'int'> to
<function f at 0x109b7c710> does not match annotation type <class 'float'>
"""
if f.__defaults__:
for rpos, value in enumerate(f.__defaults__):
pos = f.__code__.co_argcount - len(f.__defaults__) + rpos
arg = f.__code__.co_varnames[pos]
_check_default_argument(f, arg, value)
if f.__kwdefaults__:
for arg, value in f.__kwdefaults__.items():
_check_default_argument(f, arg, value)
arg_properties = []
for pos, arg in enumerate(f.__code__.co_varnames):
if pos >= f.__code__.co_argcount + f.__code__.co_kwonlyargcount:
break
elif arg in f.__annotations__:
templ = f.__annotations__[arg]
if pos >= f.__code__.co_argcount:
arg_properties.append((arg, templ, None))
else:
arg_properties.append((arg, templ, pos))
if 'return' in f.__annotations__:
return_templ = f.__annotations__['return']
return WrappedFunctionReturn(arg_properties, f, return_templ)
else:
return WrappedFunction(arg_properties, f) | [
"def",
"ensure_annotations",
"(",
"f",
")",
":",
"if",
"f",
".",
"__defaults__",
":",
"for",
"rpos",
",",
"value",
"in",
"enumerate",
"(",
"f",
".",
"__defaults__",
")",
":",
"pos",
"=",
"f",
".",
"__code__",
".",
"co_argcount",
"-",
"len",
"(",
"f",
".",
"__defaults__",
")",
"+",
"rpos",
"arg",
"=",
"f",
".",
"__code__",
".",
"co_varnames",
"[",
"pos",
"]",
"_check_default_argument",
"(",
"f",
",",
"arg",
",",
"value",
")",
"if",
"f",
".",
"__kwdefaults__",
":",
"for",
"arg",
",",
"value",
"in",
"f",
".",
"__kwdefaults__",
".",
"items",
"(",
")",
":",
"_check_default_argument",
"(",
"f",
",",
"arg",
",",
"value",
")",
"arg_properties",
"=",
"[",
"]",
"for",
"pos",
",",
"arg",
"in",
"enumerate",
"(",
"f",
".",
"__code__",
".",
"co_varnames",
")",
":",
"if",
"pos",
">=",
"f",
".",
"__code__",
".",
"co_argcount",
"+",
"f",
".",
"__code__",
".",
"co_kwonlyargcount",
":",
"break",
"elif",
"arg",
"in",
"f",
".",
"__annotations__",
":",
"templ",
"=",
"f",
".",
"__annotations__",
"[",
"arg",
"]",
"if",
"pos",
">=",
"f",
".",
"__code__",
".",
"co_argcount",
":",
"arg_properties",
".",
"append",
"(",
"(",
"arg",
",",
"templ",
",",
"None",
")",
")",
"else",
":",
"arg_properties",
".",
"append",
"(",
"(",
"arg",
",",
"templ",
",",
"pos",
")",
")",
"if",
"'return'",
"in",
"f",
".",
"__annotations__",
":",
"return_templ",
"=",
"f",
".",
"__annotations__",
"[",
"'return'",
"]",
"return",
"WrappedFunctionReturn",
"(",
"arg_properties",
",",
"f",
",",
"return_templ",
")",
"else",
":",
"return",
"WrappedFunction",
"(",
"arg_properties",
",",
"f",
")"
] | 35.510204 | 23.265306 |
def this_year(self):
""" Get AnnouncementRequests from this school year only. """
start_date, end_date = get_date_range_this_year()
return Announcement.objects.filter(added__gte=start_date, added__lte=end_date) | [
"def",
"this_year",
"(",
"self",
")",
":",
"start_date",
",",
"end_date",
"=",
"get_date_range_this_year",
"(",
")",
"return",
"Announcement",
".",
"objects",
".",
"filter",
"(",
"added__gte",
"=",
"start_date",
",",
"added__lte",
"=",
"end_date",
")"
] | 57.75 | 20.75 |
def func_span(func, tags=None, require_active_trace=False):
"""
Creates a new local span for execution of the given `func`.
The returned span is best used as a context manager, e.g.
.. code-block:: python
with func_span('my_function'):
return my_function(...)
At this time the func should be a string name. In the future this code
can be enhanced to accept a real function and derive its qualified name.
:param func: name of the function or method
:param tags: optional tags to add to the child span
:param require_active_trace: controls what to do when there is no active
trace. If require_active_trace=True, then no span is created.
If require_active_trace=False, a new trace is started.
:return: new child span, or a dummy context manager if there is no
active/current parent span
"""
current_span = get_current_span()
if current_span is None and require_active_trace:
@contextlib2.contextmanager
def empty_ctx_mgr():
yield None
return empty_ctx_mgr()
# TODO convert func to a proper name: module:class.func
operation_name = str(func)
return utils.start_child_span(
operation_name=operation_name, parent=current_span, tags=tags) | [
"def",
"func_span",
"(",
"func",
",",
"tags",
"=",
"None",
",",
"require_active_trace",
"=",
"False",
")",
":",
"current_span",
"=",
"get_current_span",
"(",
")",
"if",
"current_span",
"is",
"None",
"and",
"require_active_trace",
":",
"@",
"contextlib2",
".",
"contextmanager",
"def",
"empty_ctx_mgr",
"(",
")",
":",
"yield",
"None",
"return",
"empty_ctx_mgr",
"(",
")",
"# TODO convert func to a proper name: module:class.func",
"operation_name",
"=",
"str",
"(",
"func",
")",
"return",
"utils",
".",
"start_child_span",
"(",
"operation_name",
"=",
"operation_name",
",",
"parent",
"=",
"current_span",
",",
"tags",
"=",
"tags",
")"
] | 36.970588 | 20.735294 |
def dict_matches_params_deep(params_dct, dct):
"""
Filters deeply by comparing dct to filter_dct's value at each depth. Whenever a mismatch occurs the whole
thing returns false
:param params_dct: dict matching any portion of dct. E.g. filter_dct = {foo: {bar: 1}} would allow
{foo: {bar: 1, car: 2}} to pass, {foo: {bar: 2}} would fail, {goo: ...} would fail
:param dct: Dict for deep processing
:return: True if all pass else false
"""
def recurse_if_param_exists(params, key, value):
"""
If a param[key] exists, recurse. Otherwise return True since there is no param to contest value
:param params:
:param key:
:param value:
:return:
"""
return dict_matches_params_deep(
prop(key, params),
value
) if has(key, params) else True
def recurse_if_array_param_exists(params, index, value):
"""
If a param[key] exists, recurse. Otherwise return True since there is no param to contest value
:param params:
:param index:
:param value:
:return:
"""
return dict_matches_params_deep(
params[index],
value
) if isinstance((list, tuple), params_dct) and index < length(params_dct) else True
if isinstance(dict, dct):
# Filter out keys and then recurse on each value
return all_pass_dict(
# Recurse on each value if there is a corresponding filter_dct[key]. If not we pass
lambda key, value: recurse_if_param_exists(params_dct, key, value),
# We shallow merge, giving dct priority with (hopefully) unmatchable values
merge(map_with_obj(lambda k, v: 1 / (-e * pi), params_dct), dct)
)
if isinstance((list, tuple), dct):
if isinstance((list, tuple), params_dct) and length(dct) < length(params_dct):
# if there are more param items then dct items fail
return False
# run map_deep on each value
return all(map(
lambda ivalue: recurse_if_array_param_exists(params_dct, *ivalue),
enumerate(dct)
))
# scalar. Not that anything not truthy, False, None, 0, are considered equal
return params_dct == dct | [
"def",
"dict_matches_params_deep",
"(",
"params_dct",
",",
"dct",
")",
":",
"def",
"recurse_if_param_exists",
"(",
"params",
",",
"key",
",",
"value",
")",
":",
"\"\"\"\n If a param[key] exists, recurse. Otherwise return True since there is no param to contest value\n :param params:\n :param key:\n :param value:\n :return:\n \"\"\"",
"return",
"dict_matches_params_deep",
"(",
"prop",
"(",
"key",
",",
"params",
")",
",",
"value",
")",
"if",
"has",
"(",
"key",
",",
"params",
")",
"else",
"True",
"def",
"recurse_if_array_param_exists",
"(",
"params",
",",
"index",
",",
"value",
")",
":",
"\"\"\"\n If a param[key] exists, recurse. Otherwise return True since there is no param to contest value\n :param params:\n :param index:\n :param value:\n :return:\n \"\"\"",
"return",
"dict_matches_params_deep",
"(",
"params",
"[",
"index",
"]",
",",
"value",
")",
"if",
"isinstance",
"(",
"(",
"list",
",",
"tuple",
")",
",",
"params_dct",
")",
"and",
"index",
"<",
"length",
"(",
"params_dct",
")",
"else",
"True",
"if",
"isinstance",
"(",
"dict",
",",
"dct",
")",
":",
"# Filter out keys and then recurse on each value",
"return",
"all_pass_dict",
"(",
"# Recurse on each value if there is a corresponding filter_dct[key]. If not we pass",
"lambda",
"key",
",",
"value",
":",
"recurse_if_param_exists",
"(",
"params_dct",
",",
"key",
",",
"value",
")",
",",
"# We shallow merge, giving dct priority with (hopefully) unmatchable values",
"merge",
"(",
"map_with_obj",
"(",
"lambda",
"k",
",",
"v",
":",
"1",
"/",
"(",
"-",
"e",
"*",
"pi",
")",
",",
"params_dct",
")",
",",
"dct",
")",
")",
"if",
"isinstance",
"(",
"(",
"list",
",",
"tuple",
")",
",",
"dct",
")",
":",
"if",
"isinstance",
"(",
"(",
"list",
",",
"tuple",
")",
",",
"params_dct",
")",
"and",
"length",
"(",
"dct",
")",
"<",
"length",
"(",
"params_dct",
")",
":",
"# if there are more param items then dct items fail",
"return",
"False",
"# run map_deep on each value",
"return",
"all",
"(",
"map",
"(",
"lambda",
"ivalue",
":",
"recurse_if_array_param_exists",
"(",
"params_dct",
",",
"*",
"ivalue",
")",
",",
"enumerate",
"(",
"dct",
")",
")",
")",
"# scalar. Not that anything not truthy, False, None, 0, are considered equal",
"return",
"params_dct",
"==",
"dct"
] | 40 | 23.178571 |
def make_sparse(arr, kind='block', fill_value=None, dtype=None, copy=False):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
dtype : np.dtype, optional
copy : bool, default False
Returns
-------
(sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
"""
arr = _sanitize_values(arr)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if isna(fill_value):
mask = notna(arr)
else:
# cast to object comparison to be safe
if is_string_dtype(arr):
arr = arr.astype(object)
if is_object_dtype(arr.dtype):
# element-wise equality check method in numpy doesn't treat
# each element type, eg. 0, 0.0, and False are treated as
# same. So we have to check the both of its type and value.
mask = splib.make_mask_object_ndarray(arr, fill_value)
else:
mask = arr != fill_value
length = len(arr)
if length != len(mask):
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].astype(np.int32)
index = _make_index(length, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)
# TODO: copy
return sparsified_values, index, fill_value | [
"def",
"make_sparse",
"(",
"arr",
",",
"kind",
"=",
"'block'",
",",
"fill_value",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"arr",
"=",
"_sanitize_values",
"(",
"arr",
")",
"if",
"arr",
".",
"ndim",
">",
"1",
":",
"raise",
"TypeError",
"(",
"\"expected dimension <= 1 data\"",
")",
"if",
"fill_value",
"is",
"None",
":",
"fill_value",
"=",
"na_value_for_dtype",
"(",
"arr",
".",
"dtype",
")",
"if",
"isna",
"(",
"fill_value",
")",
":",
"mask",
"=",
"notna",
"(",
"arr",
")",
"else",
":",
"# cast to object comparison to be safe",
"if",
"is_string_dtype",
"(",
"arr",
")",
":",
"arr",
"=",
"arr",
".",
"astype",
"(",
"object",
")",
"if",
"is_object_dtype",
"(",
"arr",
".",
"dtype",
")",
":",
"# element-wise equality check method in numpy doesn't treat",
"# each element type, eg. 0, 0.0, and False are treated as",
"# same. So we have to check the both of its type and value.",
"mask",
"=",
"splib",
".",
"make_mask_object_ndarray",
"(",
"arr",
",",
"fill_value",
")",
"else",
":",
"mask",
"=",
"arr",
"!=",
"fill_value",
"length",
"=",
"len",
"(",
"arr",
")",
"if",
"length",
"!=",
"len",
"(",
"mask",
")",
":",
"# the arr is a SparseArray",
"indices",
"=",
"mask",
".",
"sp_index",
".",
"indices",
"else",
":",
"indices",
"=",
"mask",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"index",
"=",
"_make_index",
"(",
"length",
",",
"indices",
",",
"kind",
")",
"sparsified_values",
"=",
"arr",
"[",
"mask",
"]",
"if",
"dtype",
"is",
"not",
"None",
":",
"sparsified_values",
"=",
"astype_nansafe",
"(",
"sparsified_values",
",",
"dtype",
"=",
"dtype",
")",
"# TODO: copy",
"return",
"sparsified_values",
",",
"index",
",",
"fill_value"
] | 28.924528 | 20.169811 |
def get_color_func(self, word):
"""Returns a single_color_func associated with the word"""
try:
color_func = next(
color_func for (color_func, words) in self.color_func_to_words
if word in words)
except StopIteration:
color_func = self.default_color_func
return color_func | [
"def",
"get_color_func",
"(",
"self",
",",
"word",
")",
":",
"try",
":",
"color_func",
"=",
"next",
"(",
"color_func",
"for",
"(",
"color_func",
",",
"words",
")",
"in",
"self",
".",
"color_func_to_words",
"if",
"word",
"in",
"words",
")",
"except",
"StopIteration",
":",
"color_func",
"=",
"self",
".",
"default_color_func",
"return",
"color_func"
] | 35.2 | 16.6 |
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src | [
"def",
"color_normalize",
"(",
"src",
",",
"mean",
",",
"std",
"=",
"None",
")",
":",
"if",
"mean",
"is",
"not",
"None",
":",
"src",
"-=",
"mean",
"if",
"std",
"is",
"not",
"None",
":",
"src",
"/=",
"std",
"return",
"src"
] | 20.318182 | 19.772727 |
def _clean_tmp_dirs(self):
"""
Remove temporary dir associated with this backend instance.
:return: None
"""
def onerror(fnc, path, excinfo):
# we might not have rights to do this, the files could be owned by root
self.logger.info("we were not able to remove temporary file %s: %s", path, excinfo[1])
shutil.rmtree(self.tmpdir, onerror=onerror)
self.tmpdir = None
global _backend_tmpdir
_backend_tmpdir = None | [
"def",
"_clean_tmp_dirs",
"(",
"self",
")",
":",
"def",
"onerror",
"(",
"fnc",
",",
"path",
",",
"excinfo",
")",
":",
"# we might not have rights to do this, the files could be owned by root",
"self",
".",
"logger",
".",
"info",
"(",
"\"we were not able to remove temporary file %s: %s\"",
",",
"path",
",",
"excinfo",
"[",
"1",
"]",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"tmpdir",
",",
"onerror",
"=",
"onerror",
")",
"self",
".",
"tmpdir",
"=",
"None",
"global",
"_backend_tmpdir",
"_backend_tmpdir",
"=",
"None"
] | 32.933333 | 21.733333 |
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body) | [
"def",
"decode_payload",
"(",
"cls",
",",
"request",
")",
":",
"# TODO(user): Pass mr_id into headers. Otherwise when payload decoding",
"# failed, we can't abort a mr.",
"if",
"request",
".",
"headers",
".",
"get",
"(",
"cls",
".",
"PAYLOAD_VERSION_HEADER",
")",
"!=",
"cls",
".",
"PAYLOAD_VERSION",
":",
"raise",
"DeprecationWarning",
"(",
"\"Task is generated by an older incompatible version of mapreduce. \"",
"\"Please kill this job manually\"",
")",
"return",
"cls",
".",
"_decode_payload",
"(",
"request",
".",
"body",
")"
] | 35.913043 | 21.391304 |
def _reset_state(self):
"""! @brief Clear all state variables. """
self._builders = {}
self._total_data_size = 0
self._progress_offset = 0
self._current_progress_fraction = 0 | [
"def",
"_reset_state",
"(",
"self",
")",
":",
"self",
".",
"_builders",
"=",
"{",
"}",
"self",
".",
"_total_data_size",
"=",
"0",
"self",
".",
"_progress_offset",
"=",
"0",
"self",
".",
"_current_progress_fraction",
"=",
"0"
] | 34.833333 | 7.833333 |
def zinb_ll(data, P, R, Z):
"""
Returns the zero-inflated negative binomial log-likelihood of the data.
"""
lls = nb_ll(data, P, R)
clusters = P.shape[1]
for c in range(clusters):
pass
return lls | [
"def",
"zinb_ll",
"(",
"data",
",",
"P",
",",
"R",
",",
"Z",
")",
":",
"lls",
"=",
"nb_ll",
"(",
"data",
",",
"P",
",",
"R",
")",
"clusters",
"=",
"P",
".",
"shape",
"[",
"1",
"]",
"for",
"c",
"in",
"range",
"(",
"clusters",
")",
":",
"pass",
"return",
"lls"
] | 24.777778 | 15.666667 |
def _build_extra_predicate(self, extra_predicate):
""" This method is a good one to extend if you want to create a queue which always applies an extra predicate. """
if extra_predicate is None:
return ''
# if they don't have a supported format seq, wrap it for them
if not isinstance(extra_predicate[1], (list, dict, tuple)):
extra_predicate = [extra_predicate[0], (extra_predicate[1], )]
extra_predicate = database.escape_query(*extra_predicate)
return 'AND (' + extra_predicate + ')' | [
"def",
"_build_extra_predicate",
"(",
"self",
",",
"extra_predicate",
")",
":",
"if",
"extra_predicate",
"is",
"None",
":",
"return",
"''",
"# if they don't have a supported format seq, wrap it for them",
"if",
"not",
"isinstance",
"(",
"extra_predicate",
"[",
"1",
"]",
",",
"(",
"list",
",",
"dict",
",",
"tuple",
")",
")",
":",
"extra_predicate",
"=",
"[",
"extra_predicate",
"[",
"0",
"]",
",",
"(",
"extra_predicate",
"[",
"1",
"]",
",",
")",
"]",
"extra_predicate",
"=",
"database",
".",
"escape_query",
"(",
"*",
"extra_predicate",
")",
"return",
"'AND ('",
"+",
"extra_predicate",
"+",
"')'"
] | 45.75 | 22.916667 |
def _collect_dirty_tabs(self, skip=None, tab_range=None):
"""
Collects the list of dirty tabs
:param skip: Tab to skip (used for close_others).
"""
widgets = []
filenames = []
if tab_range is None:
tab_range = range(self.count())
for i in tab_range:
widget = self.widget(i)
try:
if widget.dirty and widget != skip:
widgets.append(widget)
if widget.file.path:
filenames.append(widget.file.path)
else:
filenames.append(widget.documentTitle())
except AttributeError:
pass
return widgets, filenames | [
"def",
"_collect_dirty_tabs",
"(",
"self",
",",
"skip",
"=",
"None",
",",
"tab_range",
"=",
"None",
")",
":",
"widgets",
"=",
"[",
"]",
"filenames",
"=",
"[",
"]",
"if",
"tab_range",
"is",
"None",
":",
"tab_range",
"=",
"range",
"(",
"self",
".",
"count",
"(",
")",
")",
"for",
"i",
"in",
"tab_range",
":",
"widget",
"=",
"self",
".",
"widget",
"(",
"i",
")",
"try",
":",
"if",
"widget",
".",
"dirty",
"and",
"widget",
"!=",
"skip",
":",
"widgets",
".",
"append",
"(",
"widget",
")",
"if",
"widget",
".",
"file",
".",
"path",
":",
"filenames",
".",
"append",
"(",
"widget",
".",
"file",
".",
"path",
")",
"else",
":",
"filenames",
".",
"append",
"(",
"widget",
".",
"documentTitle",
"(",
")",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"widgets",
",",
"filenames"
] | 33.363636 | 12.363636 |
def book_hotel(intent_request):
"""
Performs dialog management and fulfillment for booking a hotel.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting
2) Use of sessionAttributes to pass information that can be used to guide conversation
"""
location = try_ex(lambda: intent_request['currentIntent']['slots']['Location'])
checkin_date = try_ex(lambda: intent_request['currentIntent']['slots']['CheckInDate'])
nights = safe_int(try_ex(lambda: intent_request['currentIntent']['slots']['Nights']))
room_type = try_ex(lambda: intent_request['currentIntent']['slots']['RoomType'])
session_attributes = intent_request['sessionAttributes']
# Load confirmation history and track the current reservation.
reservation = json.dumps({
'ReservationType': 'Hotel',
'Location': location,
'RoomType': room_type,
'CheckInDate': checkin_date,
'Nights': nights
})
session_attributes['currentReservation'] = reservation
if intent_request['invocationSource'] == 'DialogCodeHook':
# Validate any slots which have been specified. If any are invalid, re-elicit for their value
validation_result = validate_hotel(intent_request['currentIntent']['slots'])
if not validation_result['isValid']:
slots = intent_request['currentIntent']['slots']
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
intent_request['currentIntent']['name'],
slots,
validation_result['violatedSlot'],
validation_result['message']
)
# Otherwise, let native DM rules determine how to elicit for slots and prompt for confirmation. Pass price
# back in sessionAttributes once it can be calculated; otherwise clear any setting from sessionAttributes.
if location and checkin_date and nights and room_type:
# The price of the hotel has yet to be confirmed.
price = generate_hotel_price(location, nights, room_type)
session_attributes['currentReservationPrice'] = price
else:
try_ex(lambda: session_attributes.pop('currentReservationPrice'))
session_attributes['currentReservation'] = reservation
return delegate(session_attributes, intent_request['currentIntent']['slots'])
# Booking the hotel. In a real application, this would likely involve a call to a backend service.
logger.debug('bookHotel under={}'.format(reservation))
try_ex(lambda: session_attributes.pop('currentReservationPrice'))
try_ex(lambda: session_attributes.pop('currentReservation'))
session_attributes['lastConfirmedReservation'] = reservation
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Thanks, I have placed your reservation. Please let me know if you would like to book a car '
'rental, or another hotel.'
}
) | [
"def",
"book_hotel",
"(",
"intent_request",
")",
":",
"location",
"=",
"try_ex",
"(",
"lambda",
":",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
"[",
"'Location'",
"]",
")",
"checkin_date",
"=",
"try_ex",
"(",
"lambda",
":",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
"[",
"'CheckInDate'",
"]",
")",
"nights",
"=",
"safe_int",
"(",
"try_ex",
"(",
"lambda",
":",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
"[",
"'Nights'",
"]",
")",
")",
"room_type",
"=",
"try_ex",
"(",
"lambda",
":",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
"[",
"'RoomType'",
"]",
")",
"session_attributes",
"=",
"intent_request",
"[",
"'sessionAttributes'",
"]",
"# Load confirmation history and track the current reservation.",
"reservation",
"=",
"json",
".",
"dumps",
"(",
"{",
"'ReservationType'",
":",
"'Hotel'",
",",
"'Location'",
":",
"location",
",",
"'RoomType'",
":",
"room_type",
",",
"'CheckInDate'",
":",
"checkin_date",
",",
"'Nights'",
":",
"nights",
"}",
")",
"session_attributes",
"[",
"'currentReservation'",
"]",
"=",
"reservation",
"if",
"intent_request",
"[",
"'invocationSource'",
"]",
"==",
"'DialogCodeHook'",
":",
"# Validate any slots which have been specified. If any are invalid, re-elicit for their value",
"validation_result",
"=",
"validate_hotel",
"(",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
")",
"if",
"not",
"validation_result",
"[",
"'isValid'",
"]",
":",
"slots",
"=",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
"slots",
"[",
"validation_result",
"[",
"'violatedSlot'",
"]",
"]",
"=",
"None",
"return",
"elicit_slot",
"(",
"session_attributes",
",",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'name'",
"]",
",",
"slots",
",",
"validation_result",
"[",
"'violatedSlot'",
"]",
",",
"validation_result",
"[",
"'message'",
"]",
")",
"# Otherwise, let native DM rules determine how to elicit for slots and prompt for confirmation. Pass price",
"# back in sessionAttributes once it can be calculated; otherwise clear any setting from sessionAttributes.",
"if",
"location",
"and",
"checkin_date",
"and",
"nights",
"and",
"room_type",
":",
"# The price of the hotel has yet to be confirmed.",
"price",
"=",
"generate_hotel_price",
"(",
"location",
",",
"nights",
",",
"room_type",
")",
"session_attributes",
"[",
"'currentReservationPrice'",
"]",
"=",
"price",
"else",
":",
"try_ex",
"(",
"lambda",
":",
"session_attributes",
".",
"pop",
"(",
"'currentReservationPrice'",
")",
")",
"session_attributes",
"[",
"'currentReservation'",
"]",
"=",
"reservation",
"return",
"delegate",
"(",
"session_attributes",
",",
"intent_request",
"[",
"'currentIntent'",
"]",
"[",
"'slots'",
"]",
")",
"# Booking the hotel. In a real application, this would likely involve a call to a backend service.",
"logger",
".",
"debug",
"(",
"'bookHotel under={}'",
".",
"format",
"(",
"reservation",
")",
")",
"try_ex",
"(",
"lambda",
":",
"session_attributes",
".",
"pop",
"(",
"'currentReservationPrice'",
")",
")",
"try_ex",
"(",
"lambda",
":",
"session_attributes",
".",
"pop",
"(",
"'currentReservation'",
")",
")",
"session_attributes",
"[",
"'lastConfirmedReservation'",
"]",
"=",
"reservation",
"return",
"close",
"(",
"session_attributes",
",",
"'Fulfilled'",
",",
"{",
"'contentType'",
":",
"'PlainText'",
",",
"'content'",
":",
"'Thanks, I have placed your reservation. Please let me know if you would like to book a car '",
"'rental, or another hotel.'",
"}",
")"
] | 44.357143 | 28.7 |
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum | [
"def",
"lharmonicmean",
"(",
"inlist",
")",
":",
"sum",
"=",
"0",
"for",
"item",
"in",
"inlist",
":",
"sum",
"=",
"sum",
"+",
"1.0",
"/",
"item",
"return",
"len",
"(",
"inlist",
")",
"/",
"sum"
] | 25.636364 | 16.181818 |
def execute(self, name, command):
"""
execute the command on the named host
:param name: the name of the host in config
:param command: the command to be executed
:return:
"""
if name in ["localhost"]:
r = '\n'.join(Shell.sh("-c", command).split()[-1:])
else:
r = '\n'.join(Shell.ssh(name, command).split()[-1:])
return r | [
"def",
"execute",
"(",
"self",
",",
"name",
",",
"command",
")",
":",
"if",
"name",
"in",
"[",
"\"localhost\"",
"]",
":",
"r",
"=",
"'\\n'",
".",
"join",
"(",
"Shell",
".",
"sh",
"(",
"\"-c\"",
",",
"command",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
":",
"]",
")",
"else",
":",
"r",
"=",
"'\\n'",
".",
"join",
"(",
"Shell",
".",
"ssh",
"(",
"name",
",",
"command",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
":",
"]",
")",
"return",
"r"
] | 33.916667 | 13.416667 |
def delete(self):
"""Submits a deletion request for this `Resource` instance as
a ``DELETE`` request to its URL."""
response = self.http_request(self._url, 'DELETE')
if response.status != 204:
self.raise_http_error(response) | [
"def",
"delete",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"http_request",
"(",
"self",
".",
"_url",
",",
"'DELETE'",
")",
"if",
"response",
".",
"status",
"!=",
"204",
":",
"self",
".",
"raise_http_error",
"(",
"response",
")"
] | 43.833333 | 8.166667 |
def delete(self, template_id):
"""
Delete a specific template.
:param template_id: The unique id for the template.
:type template_id: :py:class:`str`
"""
self.template_id = template_id
return self._mc_client._delete(url=self._build_path(template_id)) | [
"def",
"delete",
"(",
"self",
",",
"template_id",
")",
":",
"self",
".",
"template_id",
"=",
"template_id",
"return",
"self",
".",
"_mc_client",
".",
"_delete",
"(",
"url",
"=",
"self",
".",
"_build_path",
"(",
"template_id",
")",
")"
] | 33.222222 | 12.333333 |
def channels_open(self, room_id, **kwargs):
"""Adds the channel back to the user’s list of channels."""
return self.__call_api_post('channels.open', roomId=room_id, kwargs=kwargs) | [
"def",
"channels_open",
"(",
"self",
",",
"room_id",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"__call_api_post",
"(",
"'channels.open'",
",",
"roomId",
"=",
"room_id",
",",
"kwargs",
"=",
"kwargs",
")"
] | 64.333333 | 15.333333 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, 'hash') and self.hash is not None:
_dict['hash'] = self.hash
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'title'",
")",
"and",
"self",
".",
"title",
"is",
"not",
"None",
":",
"_dict",
"[",
"'title'",
"]",
"=",
"self",
".",
"title",
"if",
"hasattr",
"(",
"self",
",",
"'hash'",
")",
"and",
"self",
".",
"hash",
"is",
"not",
"None",
":",
"_dict",
"[",
"'hash'",
"]",
"=",
"self",
".",
"hash",
"return",
"_dict"
] | 39.5 | 13.375 |
def get_macs(vm_):
'''
Return a list off MAC addresses from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <vm name>
'''
macs = []
nics = get_nics(vm_)
if nics is None:
return None
for nic in nics:
macs.append(nic)
return macs | [
"def",
"get_macs",
"(",
"vm_",
")",
":",
"macs",
"=",
"[",
"]",
"nics",
"=",
"get_nics",
"(",
"vm_",
")",
"if",
"nics",
"is",
"None",
":",
"return",
"None",
"for",
"nic",
"in",
"nics",
":",
"macs",
".",
"append",
"(",
"nic",
")",
"return",
"macs"
] | 16.666667 | 24.777778 |
def run_parallel(workflow, n_threads):
"""Run a workflow in parallel threads.
:param workflow: Workflow or PromisedObject to evaluate.
:param n_threads: number of threads to use (in addition to the scheduler).
:returns: evaluated workflow.
"""
scheduler = Scheduler()
threaded_worker = Queue() >> thread_pool(
*repeat(worker, n_threads))
return scheduler.run(threaded_worker, get_workflow(workflow)) | [
"def",
"run_parallel",
"(",
"workflow",
",",
"n_threads",
")",
":",
"scheduler",
"=",
"Scheduler",
"(",
")",
"threaded_worker",
"=",
"Queue",
"(",
")",
">>",
"thread_pool",
"(",
"*",
"repeat",
"(",
"worker",
",",
"n_threads",
")",
")",
"return",
"scheduler",
".",
"run",
"(",
"threaded_worker",
",",
"get_workflow",
"(",
"workflow",
")",
")"
] | 35.833333 | 16.25 |
def get_input(self, more=False):
"""Prompt for code input."""
received = None
try:
received = self.prompt.input(more)
except KeyboardInterrupt:
print()
printerr("KeyboardInterrupt")
except EOFError:
print()
self.exit_runner()
else:
if received.startswith(exit_chars):
self.exit_runner()
received = None
return received | [
"def",
"get_input",
"(",
"self",
",",
"more",
"=",
"False",
")",
":",
"received",
"=",
"None",
"try",
":",
"received",
"=",
"self",
".",
"prompt",
".",
"input",
"(",
"more",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
")",
"printerr",
"(",
"\"KeyboardInterrupt\"",
")",
"except",
"EOFError",
":",
"print",
"(",
")",
"self",
".",
"exit_runner",
"(",
")",
"else",
":",
"if",
"received",
".",
"startswith",
"(",
"exit_chars",
")",
":",
"self",
".",
"exit_runner",
"(",
")",
"received",
"=",
"None",
"return",
"received"
] | 28.9375 | 12.5625 |
def _get_file_paths(self, tax_ids, file_type):
"""
Assemble file paths from tax ids
Args:
:param tax_ids (list) list of taxa
Returns:
:return file dict
"""
file_paths = dict()
if file_type not in self.files:
raise KeyError("file type {} not configured".format(file_type))
for taxon in tax_ids:
file_paths[taxon] = {
'file': "{}.{}".format(taxon, self.files[file_type]['pattern']),
'url': "{}{}.{}".format(
self.files[file_type]['path'], taxon,
self.files[file_type]['pattern']),
'headers': {'User-Agent': USER_AGENT}
}
return file_paths | [
"def",
"_get_file_paths",
"(",
"self",
",",
"tax_ids",
",",
"file_type",
")",
":",
"file_paths",
"=",
"dict",
"(",
")",
"if",
"file_type",
"not",
"in",
"self",
".",
"files",
":",
"raise",
"KeyError",
"(",
"\"file type {} not configured\"",
".",
"format",
"(",
"file_type",
")",
")",
"for",
"taxon",
"in",
"tax_ids",
":",
"file_paths",
"[",
"taxon",
"]",
"=",
"{",
"'file'",
":",
"\"{}.{}\"",
".",
"format",
"(",
"taxon",
",",
"self",
".",
"files",
"[",
"file_type",
"]",
"[",
"'pattern'",
"]",
")",
",",
"'url'",
":",
"\"{}{}.{}\"",
".",
"format",
"(",
"self",
".",
"files",
"[",
"file_type",
"]",
"[",
"'path'",
"]",
",",
"taxon",
",",
"self",
".",
"files",
"[",
"file_type",
"]",
"[",
"'pattern'",
"]",
")",
",",
"'headers'",
":",
"{",
"'User-Agent'",
":",
"USER_AGENT",
"}",
"}",
"return",
"file_paths"
] | 36.85 | 13.35 |
def cal_k_vinet(p, k):
"""
calculate bulk modulus in GPa
:param p: pressure in GPa
:param k: [v0, k0, k0p]
:return: bulk modulus at high pressure in GPa
"""
v = cal_v_vinet(p, k)
return cal_k_vinet_from_v(v, k[0], k[1], k[2]) | [
"def",
"cal_k_vinet",
"(",
"p",
",",
"k",
")",
":",
"v",
"=",
"cal_v_vinet",
"(",
"p",
",",
"k",
")",
"return",
"cal_k_vinet_from_v",
"(",
"v",
",",
"k",
"[",
"0",
"]",
",",
"k",
"[",
"1",
"]",
",",
"k",
"[",
"2",
"]",
")"
] | 24.9 | 12.3 |
def has_overflow(self, params):
""" detect inf and nan """
is_not_finite = 0
for param in params:
if param.grad_req != 'null':
grad = param.list_grad()[0]
is_not_finite += mx.nd.contrib.isnan(grad).sum()
is_not_finite += mx.nd.contrib.isinf(grad).sum()
# NDArray is implicitly converted to bool
if is_not_finite == 0:
return False
else:
return True | [
"def",
"has_overflow",
"(",
"self",
",",
"params",
")",
":",
"is_not_finite",
"=",
"0",
"for",
"param",
"in",
"params",
":",
"if",
"param",
".",
"grad_req",
"!=",
"'null'",
":",
"grad",
"=",
"param",
".",
"list_grad",
"(",
")",
"[",
"0",
"]",
"is_not_finite",
"+=",
"mx",
".",
"nd",
".",
"contrib",
".",
"isnan",
"(",
"grad",
")",
".",
"sum",
"(",
")",
"is_not_finite",
"+=",
"mx",
".",
"nd",
".",
"contrib",
".",
"isinf",
"(",
"grad",
")",
".",
"sum",
"(",
")",
"# NDArray is implicitly converted to bool",
"if",
"is_not_finite",
"==",
"0",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | 36 | 12.769231 |
def create_commit(self, message, tree, parents, author={}, committer={}):
"""Create a commit on this repository.
:param str message: (required), commit message
:param str tree: (required), SHA of the tree object this
commit points to
:param list parents: (required), SHAs of the commits that were parents
of this commit. If empty, the commit will be written as the root
commit. Even if there is only one parent, this should be an
array.
:param dict author: (optional), if omitted, GitHub will
use the authenticated user's credentials and the current
time. Format: {'name': 'Committer Name', 'email':
'name@example.com', 'date': 'YYYY-MM-DDTHH:MM:SS+HH:00'}
:param dict committer: (optional), if ommitted, GitHub will use the
author parameters. Should be the same format as the author
parameter.
:returns: :class:`Commit <github3.git.Commit>` if successful, else
None
"""
json = None
if message and tree and isinstance(parents, list):
url = self._build_url('git', 'commits', base_url=self._api)
data = {'message': message, 'tree': tree, 'parents': parents,
'author': author, 'committer': committer}
json = self._json(self._post(url, data=data), 201)
return Commit(json, self) if json else None | [
"def",
"create_commit",
"(",
"self",
",",
"message",
",",
"tree",
",",
"parents",
",",
"author",
"=",
"{",
"}",
",",
"committer",
"=",
"{",
"}",
")",
":",
"json",
"=",
"None",
"if",
"message",
"and",
"tree",
"and",
"isinstance",
"(",
"parents",
",",
"list",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'git'",
",",
"'commits'",
",",
"base_url",
"=",
"self",
".",
"_api",
")",
"data",
"=",
"{",
"'message'",
":",
"message",
",",
"'tree'",
":",
"tree",
",",
"'parents'",
":",
"parents",
",",
"'author'",
":",
"author",
",",
"'committer'",
":",
"committer",
"}",
"json",
"=",
"self",
".",
"_json",
"(",
"self",
".",
"_post",
"(",
"url",
",",
"data",
"=",
"data",
")",
",",
"201",
")",
"return",
"Commit",
"(",
"json",
",",
"self",
")",
"if",
"json",
"else",
"None"
] | 53.037037 | 24.037037 |
def syllabify(self, word):
"""Splits input Latin word into a list of syllables, based on
the language syllables loaded for the Syllabifier instance"""
prefixes = self.language['single_syllable_prefixes']
prefixes.sort(key=len, reverse=True)
# Check if word is in exception dictionary
if word in self.language['exceptions']:
syllables = self.language['exceptions'][word]
# Else, breakdown syllables for word
else:
syllables = []
# Remove prefixes
for prefix in prefixes:
if word.startswith(prefix):
syllables.append(prefix)
word = re.sub('^%s' % prefix, '', word)
break
# Initialize syllable to build by iterating through over characters
syllable = ''
# Get word length for determining character position in word
word_len = len(word)
# Iterate over characters to build syllables
for i, char in enumerate(word):
# Build syllable
syllable = syllable + char
syllable_complete = False
# Checks to process syllable logic
char_is_vowel = self._is_vowel(char)
has_next_char = i < word_len - 1
has_prev_char = i > 0
# If it's the end of the word, the syllable is complete
if not has_next_char:
syllable_complete = True
else:
next_char = word[i + 1]
if has_prev_char:
prev_char = word[i - 1]
# 'i' is a special case for a vowel. when i is at the
# beginning of the word (Iesu) or i is between
# vowels (alleluia), then the i is treated as a
# consonant (y) Note: what about compounds like 'adiungere'
if char == 'i' and has_next_char and self._is_vowel(next_char):
if i == 0:
char_is_vowel = False
elif self._is_vowel(prev_char):
char_is_vowel = False
# Determine if the syllable is complete
if char_is_vowel:
if (
( # If the next character's a vowel
self._is_vowel(
next_char) # And it doesn't compose a dipthong with the current character
and not self._is_diphthong(char,
next_char) # And the current character isn't preceded by a q, unless followed by a u
and not (
has_prev_char
and prev_char == "q"
and char == "u"
and next_char != "u"
)
)
or (
# If the next character's a consonant but not a double consonant, unless it's a mute consonant followed by a liquid consonant
i < word_len - 2
and (
(
(
has_prev_char
and prev_char != "q"
and char == "u"
and self._is_vowel(word[i + 2])
)
or (
not has_prev_char
and char == "u"
and self._is_vowel(word[i + 2])
)
)
or (
char != "u"
and self._is_vowel(word[i + 2])
and not self._is_diphthong(char, next_char)
)
or (
self._is_mute_consonant_or_f(next_char)
and self._is_liquid_consonant(word[i + 2])
)
)
)
):
syllable_complete = True
# Otherwise, it's a consonant
else:
if ( # If the next character's also a consonant (but it's not the last in the word)
(
not self._is_vowel(next_char)
and i < word_len - 2
) # If the char's not a mute consonant followed by a liquid consonant
and not (
self._is_mute_consonant_or_f(char)
and self._is_liquid_consonant(next_char)
) # If the char's not a c, p, or t followed by an h
and not (
(
has_prev_char
and not self._is_vowel(prev_char)
and char in ['c', 'p', 't'] and next_char == 'h'
)
or (
not has_prev_char
and char in ['c', 'p', 't'] and next_char == 'h'
)
) # And it's not the only letter in the syllable
and not len(syllable) == 1
):
syllable_complete = True
# If it's a complete syllable, append it to syllables list and reset syllable
if syllable_complete:
syllables.append(syllable)
syllable = ''
return syllables | [
"def",
"syllabify",
"(",
"self",
",",
"word",
")",
":",
"prefixes",
"=",
"self",
".",
"language",
"[",
"'single_syllable_prefixes'",
"]",
"prefixes",
".",
"sort",
"(",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"# Check if word is in exception dictionary",
"if",
"word",
"in",
"self",
".",
"language",
"[",
"'exceptions'",
"]",
":",
"syllables",
"=",
"self",
".",
"language",
"[",
"'exceptions'",
"]",
"[",
"word",
"]",
"# Else, breakdown syllables for word",
"else",
":",
"syllables",
"=",
"[",
"]",
"# Remove prefixes",
"for",
"prefix",
"in",
"prefixes",
":",
"if",
"word",
".",
"startswith",
"(",
"prefix",
")",
":",
"syllables",
".",
"append",
"(",
"prefix",
")",
"word",
"=",
"re",
".",
"sub",
"(",
"'^%s'",
"%",
"prefix",
",",
"''",
",",
"word",
")",
"break",
"# Initialize syllable to build by iterating through over characters",
"syllable",
"=",
"''",
"# Get word length for determining character position in word",
"word_len",
"=",
"len",
"(",
"word",
")",
"# Iterate over characters to build syllables",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"word",
")",
":",
"# Build syllable",
"syllable",
"=",
"syllable",
"+",
"char",
"syllable_complete",
"=",
"False",
"# Checks to process syllable logic",
"char_is_vowel",
"=",
"self",
".",
"_is_vowel",
"(",
"char",
")",
"has_next_char",
"=",
"i",
"<",
"word_len",
"-",
"1",
"has_prev_char",
"=",
"i",
">",
"0",
"# If it's the end of the word, the syllable is complete",
"if",
"not",
"has_next_char",
":",
"syllable_complete",
"=",
"True",
"else",
":",
"next_char",
"=",
"word",
"[",
"i",
"+",
"1",
"]",
"if",
"has_prev_char",
":",
"prev_char",
"=",
"word",
"[",
"i",
"-",
"1",
"]",
"# 'i' is a special case for a vowel. when i is at the ",
"# beginning of the word (Iesu) or i is between ",
"# vowels (alleluia), then the i is treated as a ",
"# consonant (y) Note: what about compounds like 'adiungere'",
"if",
"char",
"==",
"'i'",
"and",
"has_next_char",
"and",
"self",
".",
"_is_vowel",
"(",
"next_char",
")",
":",
"if",
"i",
"==",
"0",
":",
"char_is_vowel",
"=",
"False",
"elif",
"self",
".",
"_is_vowel",
"(",
"prev_char",
")",
":",
"char_is_vowel",
"=",
"False",
"# Determine if the syllable is complete",
"if",
"char_is_vowel",
":",
"if",
"(",
"(",
"# If the next character's a vowel",
"self",
".",
"_is_vowel",
"(",
"next_char",
")",
"# And it doesn't compose a dipthong with the current character",
"and",
"not",
"self",
".",
"_is_diphthong",
"(",
"char",
",",
"next_char",
")",
"# And the current character isn't preceded by a q, unless followed by a u",
"and",
"not",
"(",
"has_prev_char",
"and",
"prev_char",
"==",
"\"q\"",
"and",
"char",
"==",
"\"u\"",
"and",
"next_char",
"!=",
"\"u\"",
")",
")",
"or",
"(",
"# If the next character's a consonant but not a double consonant, unless it's a mute consonant followed by a liquid consonant",
"i",
"<",
"word_len",
"-",
"2",
"and",
"(",
"(",
"(",
"has_prev_char",
"and",
"prev_char",
"!=",
"\"q\"",
"and",
"char",
"==",
"\"u\"",
"and",
"self",
".",
"_is_vowel",
"(",
"word",
"[",
"i",
"+",
"2",
"]",
")",
")",
"or",
"(",
"not",
"has_prev_char",
"and",
"char",
"==",
"\"u\"",
"and",
"self",
".",
"_is_vowel",
"(",
"word",
"[",
"i",
"+",
"2",
"]",
")",
")",
")",
"or",
"(",
"char",
"!=",
"\"u\"",
"and",
"self",
".",
"_is_vowel",
"(",
"word",
"[",
"i",
"+",
"2",
"]",
")",
"and",
"not",
"self",
".",
"_is_diphthong",
"(",
"char",
",",
"next_char",
")",
")",
"or",
"(",
"self",
".",
"_is_mute_consonant_or_f",
"(",
"next_char",
")",
"and",
"self",
".",
"_is_liquid_consonant",
"(",
"word",
"[",
"i",
"+",
"2",
"]",
")",
")",
")",
")",
")",
":",
"syllable_complete",
"=",
"True",
"# Otherwise, it's a consonant",
"else",
":",
"if",
"(",
"# If the next character's also a consonant (but it's not the last in the word)",
"(",
"not",
"self",
".",
"_is_vowel",
"(",
"next_char",
")",
"and",
"i",
"<",
"word_len",
"-",
"2",
")",
"# If the char's not a mute consonant followed by a liquid consonant",
"and",
"not",
"(",
"self",
".",
"_is_mute_consonant_or_f",
"(",
"char",
")",
"and",
"self",
".",
"_is_liquid_consonant",
"(",
"next_char",
")",
")",
"# If the char's not a c, p, or t followed by an h",
"and",
"not",
"(",
"(",
"has_prev_char",
"and",
"not",
"self",
".",
"_is_vowel",
"(",
"prev_char",
")",
"and",
"char",
"in",
"[",
"'c'",
",",
"'p'",
",",
"'t'",
"]",
"and",
"next_char",
"==",
"'h'",
")",
"or",
"(",
"not",
"has_prev_char",
"and",
"char",
"in",
"[",
"'c'",
",",
"'p'",
",",
"'t'",
"]",
"and",
"next_char",
"==",
"'h'",
")",
")",
"# And it's not the only letter in the syllable",
"and",
"not",
"len",
"(",
"syllable",
")",
"==",
"1",
")",
":",
"syllable_complete",
"=",
"True",
"# If it's a complete syllable, append it to syllables list and reset syllable",
"if",
"syllable_complete",
":",
"syllables",
".",
"append",
"(",
"syllable",
")",
"syllable",
"=",
"''",
"return",
"syllables"
] | 50.678322 | 26.097902 |
def edmcompletion(A, reordered = True, **kwargs):
"""
Euclidean distance matrix completion. The routine takes an EDM-completable
cspmatrix :math:`A` and returns a dense EDM :math:`X`
that satisfies
.. math::
P( X ) = A
:param A: :py:class:`cspmatrix`
:param reordered: boolean
"""
assert isinstance(A, cspmatrix) and A.is_factor is False, "A must be a cspmatrix"
tol = kwargs.get('tol',1e-15)
X = matrix(A.spmatrix(reordered = True, symmetric = True))
symb = A.symb
n = symb.n
snptr = symb.snptr
sncolptr = symb.sncolptr
snrowidx = symb.snrowidx
# visit supernodes in reverse (descending) order
for k in range(symb.Nsn-1,-1,-1):
nn = snptr[k+1]-snptr[k]
beta = snrowidx[sncolptr[k]:sncolptr[k+1]]
nj = len(beta)
if nj-nn == 0: continue
alpha = beta[nn:]
nu = beta[:nn]
eta = matrix([matrix(range(beta[kk]+1,beta[kk+1])) for kk in range(nj-1)] + [matrix(range(beta[-1]+1,n))])
ne = len(eta)
# Compute Yaa, Yan, Yea, Ynn, Yee
Yaa = -0.5*X[alpha,alpha] - 0.5*X[alpha[0],alpha[0]]
blas.syr2(X[alpha,alpha[0]], matrix(1.0,(nj-nn,1)), Yaa, alpha = 0.5)
Ynn = -0.5*X[nu,nu] - 0.5*X[alpha[0],alpha[0]]
blas.syr2(X[nu,alpha[0]], matrix(1.0,(nn,1)), Ynn, alpha = 0.5)
Yee = -0.5*X[eta,eta] - 0.5*X[alpha[0],alpha[0]]
blas.syr2(X[eta,alpha[0]], matrix(1.0,(ne,1)), Yee, alpha = 0.5)
Yan = -0.5*X[alpha,nu] - 0.5*X[alpha[0],alpha[0]]
Yan += 0.5*matrix(1.0,(nj-nn,1))*X[alpha[0],nu]
Yan += 0.5*X[alpha,alpha[0]]*matrix(1.0,(1,nn))
Yea = -0.5*X[eta,alpha] - 0.5*X[alpha[0],alpha[0]]
Yea += 0.5*matrix(1.0,(ne,1))*X[alpha[0],alpha]
Yea += 0.5*X[eta,alpha[0]]*matrix(1.0,(1,nj-nn))
# EVD: Yaa = Z*diag(w)*Z.T
w = matrix(0.0,(Yaa.size[0],1))
Z = matrix(0.0,Yaa.size)
lapack.syevr(Yaa, w, jobz='V', range='A', uplo='L', Z=Z)
# Pseudo-inverse: Yp = pinv(Yaa)
lambda_max = max(w)
Yp = Z*spmatrix([1.0/wi if wi > lambda_max*tol else 0.0 for wi in w],range(len(w)),range(len(w)))*Z.T
# Compute update
tmp = -2.0*Yea*Yp*Yan + matrix(1.0,(ne,1))*Ynn[::nn+1].T + Yee[::ne+1]*matrix(1.0,(1,nn))
X[eta,nu] = tmp
X[nu,eta] = tmp.T
if reordered:
return X
else:
return X[symb.ip,symb.ip] | [
"def",
"edmcompletion",
"(",
"A",
",",
"reordered",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"A",
",",
"cspmatrix",
")",
"and",
"A",
".",
"is_factor",
"is",
"False",
",",
"\"A must be a cspmatrix\"",
"tol",
"=",
"kwargs",
".",
"get",
"(",
"'tol'",
",",
"1e-15",
")",
"X",
"=",
"matrix",
"(",
"A",
".",
"spmatrix",
"(",
"reordered",
"=",
"True",
",",
"symmetric",
"=",
"True",
")",
")",
"symb",
"=",
"A",
".",
"symb",
"n",
"=",
"symb",
".",
"n",
"snptr",
"=",
"symb",
".",
"snptr",
"sncolptr",
"=",
"symb",
".",
"sncolptr",
"snrowidx",
"=",
"symb",
".",
"snrowidx",
"# visit supernodes in reverse (descending) order",
"for",
"k",
"in",
"range",
"(",
"symb",
".",
"Nsn",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"nn",
"=",
"snptr",
"[",
"k",
"+",
"1",
"]",
"-",
"snptr",
"[",
"k",
"]",
"beta",
"=",
"snrowidx",
"[",
"sncolptr",
"[",
"k",
"]",
":",
"sncolptr",
"[",
"k",
"+",
"1",
"]",
"]",
"nj",
"=",
"len",
"(",
"beta",
")",
"if",
"nj",
"-",
"nn",
"==",
"0",
":",
"continue",
"alpha",
"=",
"beta",
"[",
"nn",
":",
"]",
"nu",
"=",
"beta",
"[",
":",
"nn",
"]",
"eta",
"=",
"matrix",
"(",
"[",
"matrix",
"(",
"range",
"(",
"beta",
"[",
"kk",
"]",
"+",
"1",
",",
"beta",
"[",
"kk",
"+",
"1",
"]",
")",
")",
"for",
"kk",
"in",
"range",
"(",
"nj",
"-",
"1",
")",
"]",
"+",
"[",
"matrix",
"(",
"range",
"(",
"beta",
"[",
"-",
"1",
"]",
"+",
"1",
",",
"n",
")",
")",
"]",
")",
"ne",
"=",
"len",
"(",
"eta",
")",
"# Compute Yaa, Yan, Yea, Ynn, Yee",
"Yaa",
"=",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
",",
"alpha",
"]",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"alpha",
"[",
"0",
"]",
"]",
"blas",
".",
"syr2",
"(",
"X",
"[",
"alpha",
",",
"alpha",
"[",
"0",
"]",
"]",
",",
"matrix",
"(",
"1.0",
",",
"(",
"nj",
"-",
"nn",
",",
"1",
")",
")",
",",
"Yaa",
",",
"alpha",
"=",
"0.5",
")",
"Ynn",
"=",
"-",
"0.5",
"*",
"X",
"[",
"nu",
",",
"nu",
"]",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"alpha",
"[",
"0",
"]",
"]",
"blas",
".",
"syr2",
"(",
"X",
"[",
"nu",
",",
"alpha",
"[",
"0",
"]",
"]",
",",
"matrix",
"(",
"1.0",
",",
"(",
"nn",
",",
"1",
")",
")",
",",
"Ynn",
",",
"alpha",
"=",
"0.5",
")",
"Yee",
"=",
"-",
"0.5",
"*",
"X",
"[",
"eta",
",",
"eta",
"]",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"alpha",
"[",
"0",
"]",
"]",
"blas",
".",
"syr2",
"(",
"X",
"[",
"eta",
",",
"alpha",
"[",
"0",
"]",
"]",
",",
"matrix",
"(",
"1.0",
",",
"(",
"ne",
",",
"1",
")",
")",
",",
"Yee",
",",
"alpha",
"=",
"0.5",
")",
"Yan",
"=",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
",",
"nu",
"]",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"alpha",
"[",
"0",
"]",
"]",
"Yan",
"+=",
"0.5",
"*",
"matrix",
"(",
"1.0",
",",
"(",
"nj",
"-",
"nn",
",",
"1",
")",
")",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"nu",
"]",
"Yan",
"+=",
"0.5",
"*",
"X",
"[",
"alpha",
",",
"alpha",
"[",
"0",
"]",
"]",
"*",
"matrix",
"(",
"1.0",
",",
"(",
"1",
",",
"nn",
")",
")",
"Yea",
"=",
"-",
"0.5",
"*",
"X",
"[",
"eta",
",",
"alpha",
"]",
"-",
"0.5",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"alpha",
"[",
"0",
"]",
"]",
"Yea",
"+=",
"0.5",
"*",
"matrix",
"(",
"1.0",
",",
"(",
"ne",
",",
"1",
")",
")",
"*",
"X",
"[",
"alpha",
"[",
"0",
"]",
",",
"alpha",
"]",
"Yea",
"+=",
"0.5",
"*",
"X",
"[",
"eta",
",",
"alpha",
"[",
"0",
"]",
"]",
"*",
"matrix",
"(",
"1.0",
",",
"(",
"1",
",",
"nj",
"-",
"nn",
")",
")",
"# EVD: Yaa = Z*diag(w)*Z.T ",
"w",
"=",
"matrix",
"(",
"0.0",
",",
"(",
"Yaa",
".",
"size",
"[",
"0",
"]",
",",
"1",
")",
")",
"Z",
"=",
"matrix",
"(",
"0.0",
",",
"Yaa",
".",
"size",
")",
"lapack",
".",
"syevr",
"(",
"Yaa",
",",
"w",
",",
"jobz",
"=",
"'V'",
",",
"range",
"=",
"'A'",
",",
"uplo",
"=",
"'L'",
",",
"Z",
"=",
"Z",
")",
"# Pseudo-inverse: Yp = pinv(Yaa)",
"lambda_max",
"=",
"max",
"(",
"w",
")",
"Yp",
"=",
"Z",
"*",
"spmatrix",
"(",
"[",
"1.0",
"/",
"wi",
"if",
"wi",
">",
"lambda_max",
"*",
"tol",
"else",
"0.0",
"for",
"wi",
"in",
"w",
"]",
",",
"range",
"(",
"len",
"(",
"w",
")",
")",
",",
"range",
"(",
"len",
"(",
"w",
")",
")",
")",
"*",
"Z",
".",
"T",
"# Compute update",
"tmp",
"=",
"-",
"2.0",
"*",
"Yea",
"*",
"Yp",
"*",
"Yan",
"+",
"matrix",
"(",
"1.0",
",",
"(",
"ne",
",",
"1",
")",
")",
"*",
"Ynn",
"[",
":",
":",
"nn",
"+",
"1",
"]",
".",
"T",
"+",
"Yee",
"[",
":",
":",
"ne",
"+",
"1",
"]",
"*",
"matrix",
"(",
"1.0",
",",
"(",
"1",
",",
"nn",
")",
")",
"X",
"[",
"eta",
",",
"nu",
"]",
"=",
"tmp",
"X",
"[",
"nu",
",",
"eta",
"]",
"=",
"tmp",
".",
"T",
"if",
"reordered",
":",
"return",
"X",
"else",
":",
"return",
"X",
"[",
"symb",
".",
"ip",
",",
"symb",
".",
"ip",
"]"
] | 34.859155 | 22.943662 |
def rollback(self, transaction = None):
"""Roll back a transaction."""
if not self.in_transaction:
raise NotInTransaction
for collection, store in self.stores.items():
store.rollback()
indexes = self.indexes[collection]
indexes_to_rebuild = []
for key, index in indexes.items():
try:
index.rollback()
except NotInTransaction:
# this index is "dirty" and needs to be rebuilt
# (probably it has been created within a transaction)
indexes_to_rebuild.append(key)
if indexes_to_rebuild:
self.rebuild_indexes(collection, indexes_to_rebuild)
self.in_transaction = False | [
"def",
"rollback",
"(",
"self",
",",
"transaction",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"in_transaction",
":",
"raise",
"NotInTransaction",
"for",
"collection",
",",
"store",
"in",
"self",
".",
"stores",
".",
"items",
"(",
")",
":",
"store",
".",
"rollback",
"(",
")",
"indexes",
"=",
"self",
".",
"indexes",
"[",
"collection",
"]",
"indexes_to_rebuild",
"=",
"[",
"]",
"for",
"key",
",",
"index",
"in",
"indexes",
".",
"items",
"(",
")",
":",
"try",
":",
"index",
".",
"rollback",
"(",
")",
"except",
"NotInTransaction",
":",
"# this index is \"dirty\" and needs to be rebuilt",
"# (probably it has been created within a transaction)",
"indexes_to_rebuild",
".",
"append",
"(",
"key",
")",
"if",
"indexes_to_rebuild",
":",
"self",
".",
"rebuild_indexes",
"(",
"collection",
",",
"indexes_to_rebuild",
")",
"self",
".",
"in_transaction",
"=",
"False"
] | 43.166667 | 10.388889 |
def cors_allow_any(request, response):
"""
Add headers to permit CORS requests from any origin, with or without credentials,
with any headers.
"""
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
# From the CORS spec: The string "*" cannot be used for a resource that supports credentials.
response['Access-Control-Allow-Origin'] = origin
patch_vary_headers(response, ['Origin'])
response['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:
response['Access-Control-Allow-Headers'] \
= request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
return response | [
"def",
"cors_allow_any",
"(",
"request",
",",
"response",
")",
":",
"origin",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_ORIGIN'",
")",
"if",
"not",
"origin",
":",
"return",
"response",
"# From the CORS spec: The string \"*\" cannot be used for a resource that supports credentials.",
"response",
"[",
"'Access-Control-Allow-Origin'",
"]",
"=",
"origin",
"patch_vary_headers",
"(",
"response",
",",
"[",
"'Origin'",
"]",
")",
"response",
"[",
"'Access-Control-Allow-Credentials'",
"]",
"=",
"'true'",
"if",
"request",
".",
"method",
"==",
"'OPTIONS'",
":",
"if",
"'HTTP_ACCESS_CONTROL_REQUEST_HEADERS'",
"in",
"request",
".",
"META",
":",
"response",
"[",
"'Access-Control-Allow-Headers'",
"]",
"=",
"request",
".",
"META",
"[",
"'HTTP_ACCESS_CONTROL_REQUEST_HEADERS'",
"]",
"response",
"[",
"'Access-Control-Allow-Methods'",
"]",
"=",
"'GET, POST, OPTIONS'",
"return",
"response"
] | 38.380952 | 21.142857 |
def get_Callable_args_res(clb):
"""Python version independent function to obtain the parameters
of a typing.Callable object. Returns as tuple: args, result.
Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1.
"""
try:
return clb.__args__, clb.__result__
except AttributeError:
# Python 3.6
return clb.__args__[:-1], clb.__args__[-1] | [
"def",
"get_Callable_args_res",
"(",
"clb",
")",
":",
"try",
":",
"return",
"clb",
".",
"__args__",
",",
"clb",
".",
"__result__",
"except",
"AttributeError",
":",
"# Python 3.6",
"return",
"clb",
".",
"__args__",
"[",
":",
"-",
"1",
"]",
",",
"clb",
".",
"__args__",
"[",
"-",
"1",
"]"
] | 37.1 | 12.7 |
def load_model(itos_filename, classifier_filename, num_classes):
"""Load the classifier and int to string mapping
Args:
itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)
classifier_filename (str): The filename of the trained classifier
Returns:
string to int mapping, trained classifer model
"""
# load the int to string mapping file
itos = pickle.load(Path(itos_filename).open('rb'))
# turn it into a string to int mapping (which is what we need)
stoi = collections.defaultdict(lambda:0, {str(v):int(k) for k,v in enumerate(itos)})
# these parameters aren't used, but this is the easiest way to get a model
bptt,em_sz,nh,nl = 70,400,1150,3
dps = np.array([0.4,0.5,0.05,0.3,0.4])*0.5
vs = len(itos)
model = get_rnn_classifer(bptt, 20*70, num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,
layers=[em_sz*3, 50, num_classes], drops=[dps[4], 0.1],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
# load the trained classifier
model.load_state_dict(torch.load(classifier_filename, map_location=lambda storage, loc: storage))
# put the classifier into evaluation mode
model.reset()
model.eval()
return stoi, model | [
"def",
"load_model",
"(",
"itos_filename",
",",
"classifier_filename",
",",
"num_classes",
")",
":",
"# load the int to string mapping file",
"itos",
"=",
"pickle",
".",
"load",
"(",
"Path",
"(",
"itos_filename",
")",
".",
"open",
"(",
"'rb'",
")",
")",
"# turn it into a string to int mapping (which is what we need)",
"stoi",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"0",
",",
"{",
"str",
"(",
"v",
")",
":",
"int",
"(",
"k",
")",
"for",
"k",
",",
"v",
"in",
"enumerate",
"(",
"itos",
")",
"}",
")",
"# these parameters aren't used, but this is the easiest way to get a model",
"bptt",
",",
"em_sz",
",",
"nh",
",",
"nl",
"=",
"70",
",",
"400",
",",
"1150",
",",
"3",
"dps",
"=",
"np",
".",
"array",
"(",
"[",
"0.4",
",",
"0.5",
",",
"0.05",
",",
"0.3",
",",
"0.4",
"]",
")",
"*",
"0.5",
"vs",
"=",
"len",
"(",
"itos",
")",
"model",
"=",
"get_rnn_classifer",
"(",
"bptt",
",",
"20",
"*",
"70",
",",
"num_classes",
",",
"vs",
",",
"emb_sz",
"=",
"em_sz",
",",
"n_hid",
"=",
"nh",
",",
"n_layers",
"=",
"nl",
",",
"pad_token",
"=",
"1",
",",
"layers",
"=",
"[",
"em_sz",
"*",
"3",
",",
"50",
",",
"num_classes",
"]",
",",
"drops",
"=",
"[",
"dps",
"[",
"4",
"]",
",",
"0.1",
"]",
",",
"dropouti",
"=",
"dps",
"[",
"0",
"]",
",",
"wdrop",
"=",
"dps",
"[",
"1",
"]",
",",
"dropoute",
"=",
"dps",
"[",
"2",
"]",
",",
"dropouth",
"=",
"dps",
"[",
"3",
"]",
")",
"# load the trained classifier",
"model",
".",
"load_state_dict",
"(",
"torch",
".",
"load",
"(",
"classifier_filename",
",",
"map_location",
"=",
"lambda",
"storage",
",",
"loc",
":",
"storage",
")",
")",
"# put the classifier into evaluation mode",
"model",
".",
"reset",
"(",
")",
"model",
".",
"eval",
"(",
")",
"return",
"stoi",
",",
"model"
] | 38.939394 | 28.484848 |
def with_wrapper(self, wrapper=None, name=None):
""" Copy this BarSet, and return a new BarSet with the specified
name and wrapper.
If no name is given, `{self.name}_custom_wrapper` is used.
If no wrapper is given, the new BarSet will have no wrapper.
"""
name = name or '{}_custom_wrapper'.format(self.name)
return self.__class__(self.data, name=name, wrapper=wrapper) | [
"def",
"with_wrapper",
"(",
"self",
",",
"wrapper",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"name",
"=",
"name",
"or",
"'{}_custom_wrapper'",
".",
"format",
"(",
"self",
".",
"name",
")",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"data",
",",
"name",
"=",
"name",
",",
"wrapper",
"=",
"wrapper",
")"
] | 53.75 | 16.125 |
def resolve_inputs(self, layers):
'''Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved.
'''
resolved = {}
for name, shape in self._input_shapes.items():
if shape is None:
name, shape = self._resolve_shape(name, layers)
resolved[name] = shape
self._input_shapes = resolved | [
"def",
"resolve_inputs",
"(",
"self",
",",
"layers",
")",
":",
"resolved",
"=",
"{",
"}",
"for",
"name",
",",
"shape",
"in",
"self",
".",
"_input_shapes",
".",
"items",
"(",
")",
":",
"if",
"shape",
"is",
"None",
":",
"name",
",",
"shape",
"=",
"self",
".",
"_resolve_shape",
"(",
"name",
",",
"layers",
")",
"resolved",
"[",
"name",
"]",
"=",
"shape",
"self",
".",
"_input_shapes",
"=",
"resolved"
] | 32.368421 | 18.894737 |
def p_UnionMemberType_anyType(p):
"""UnionMemberType : any "[" "]" TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.Array(t=model.SimpleType(
type=model.SimpleType.ANY)), p[4]) | [
"def",
"p_UnionMemberType_anyType",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"helper",
".",
"unwrapTypeSuffix",
"(",
"model",
".",
"Array",
"(",
"t",
"=",
"model",
".",
"SimpleType",
"(",
"type",
"=",
"model",
".",
"SimpleType",
".",
"ANY",
")",
")",
",",
"p",
"[",
"4",
"]",
")"
] | 45.75 | 8.25 |
def compatcallback(f):
""" Compatibility callback decorator for older click version.
Click 1.0 does not have a version string stored, so we need to
use getattr here to be safe.
"""
if getattr(click, '__version__', '0.0') >= '2.0':
return f
return update_wrapper(lambda ctx, value: f(ctx, None, value), f) | [
"def",
"compatcallback",
"(",
"f",
")",
":",
"if",
"getattr",
"(",
"click",
",",
"'__version__'",
",",
"'0.0'",
")",
">=",
"'2.0'",
":",
"return",
"f",
"return",
"update_wrapper",
"(",
"lambda",
"ctx",
",",
"value",
":",
"f",
"(",
"ctx",
",",
"None",
",",
"value",
")",
",",
"f",
")"
] | 36.555556 | 17.444444 |
def run(self):
"""
Performs the actual FEFF run
Returns:
(subprocess.Popen) Used for monitoring.
"""
with open(self.output_file, "w") as f_std, \
open(self.stderr_file, "w", buffering=1) as f_err:
# Use line buffering for stderr
# On TSCC, need to run shell command
p = subprocess.Popen(self.feff_cmd, stdout=f_std, stderr=f_err, shell=True)
return p | [
"def",
"run",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"output_file",
",",
"\"w\"",
")",
"as",
"f_std",
",",
"open",
"(",
"self",
".",
"stderr_file",
",",
"\"w\"",
",",
"buffering",
"=",
"1",
")",
"as",
"f_err",
":",
"# Use line buffering for stderr",
"# On TSCC, need to run shell command",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"self",
".",
"feff_cmd",
",",
"stdout",
"=",
"f_std",
",",
"stderr",
"=",
"f_err",
",",
"shell",
"=",
"True",
")",
"return",
"p"
] | 32.214286 | 18.928571 |
def NgramScorer(frequency_map):
"""Compute the score of a text by using the frequencies of ngrams.
Example:
>>> fitness = NgramScorer(english.unigrams)
>>> fitness("ABC")
-4.3622319742618245
Args:
frequency_map (dict): ngram to frequency mapping
"""
# Calculate the log probability
length = len(next(iter(frequency_map)))
# TODO: 0.01 is a magic number. Needs to be better than that.
floor = math.log10(0.01 / sum(frequency_map.values()))
ngrams = frequency.frequency_to_probability(frequency_map, decorator=math.log10)
def inner(text):
# I dont like this, it is only for the .upper() to work,
# But I feel as though this can be removed in later refactoring
text = ''.join(text)
text = remove(text.upper(), string.whitespace + string.punctuation)
return sum(ngrams.get(ngram, floor) for ngram in iterate_ngrams(text, length))
return inner | [
"def",
"NgramScorer",
"(",
"frequency_map",
")",
":",
"# Calculate the log probability",
"length",
"=",
"len",
"(",
"next",
"(",
"iter",
"(",
"frequency_map",
")",
")",
")",
"# TODO: 0.01 is a magic number. Needs to be better than that.",
"floor",
"=",
"math",
".",
"log10",
"(",
"0.01",
"/",
"sum",
"(",
"frequency_map",
".",
"values",
"(",
")",
")",
")",
"ngrams",
"=",
"frequency",
".",
"frequency_to_probability",
"(",
"frequency_map",
",",
"decorator",
"=",
"math",
".",
"log10",
")",
"def",
"inner",
"(",
"text",
")",
":",
"# I dont like this, it is only for the .upper() to work,",
"# But I feel as though this can be removed in later refactoring",
"text",
"=",
"''",
".",
"join",
"(",
"text",
")",
"text",
"=",
"remove",
"(",
"text",
".",
"upper",
"(",
")",
",",
"string",
".",
"whitespace",
"+",
"string",
".",
"punctuation",
")",
"return",
"sum",
"(",
"ngrams",
".",
"get",
"(",
"ngram",
",",
"floor",
")",
"for",
"ngram",
"in",
"iterate_ngrams",
"(",
"text",
",",
"length",
")",
")",
"return",
"inner"
] | 37.36 | 22.76 |
def from_spec(spec):
"""
Creates an exploration object from a specification dict.
"""
exploration = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.explorations.explorations
)
assert isinstance(exploration, Exploration)
return exploration | [
"def",
"from_spec",
"(",
"spec",
")",
":",
"exploration",
"=",
"util",
".",
"get_object",
"(",
"obj",
"=",
"spec",
",",
"predefined_objects",
"=",
"tensorforce",
".",
"core",
".",
"explorations",
".",
"explorations",
")",
"assert",
"isinstance",
"(",
"exploration",
",",
"Exploration",
")",
"return",
"exploration"
] | 32.4 | 15.4 |
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
"""Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
"""
# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError("Structure file not loaded")
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view | [
"def",
"view_structure",
"(",
"self",
",",
"only_chains",
"=",
"None",
",",
"opacity",
"=",
"1.0",
",",
"recolor",
"=",
"False",
",",
"gui",
"=",
"False",
")",
":",
"# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly",
"if",
"ssbio",
".",
"utils",
".",
"is_ipynb",
"(",
")",
":",
"import",
"nglview",
"as",
"nv",
"else",
":",
"raise",
"EnvironmentError",
"(",
"'Unable to display structure - not running in a Jupyter notebook environment'",
")",
"if",
"not",
"self",
".",
"structure_file",
":",
"raise",
"ValueError",
"(",
"\"Structure file not loaded\"",
")",
"only_chains",
"=",
"ssbio",
".",
"utils",
".",
"force_list",
"(",
"only_chains",
")",
"to_show_chains",
"=",
"'( '",
"for",
"c",
"in",
"only_chains",
":",
"to_show_chains",
"+=",
"':{} or'",
".",
"format",
"(",
"c",
")",
"to_show_chains",
"=",
"to_show_chains",
".",
"strip",
"(",
"' or '",
")",
"to_show_chains",
"+=",
"' )'",
"if",
"self",
".",
"file_type",
"==",
"'mmtf'",
"or",
"self",
".",
"file_type",
"==",
"'mmtf.gz'",
":",
"view",
"=",
"nv",
".",
"NGLWidget",
"(",
")",
"view",
".",
"add_component",
"(",
"self",
".",
"structure_path",
")",
"else",
":",
"view",
"=",
"nv",
".",
"show_structure_file",
"(",
"self",
".",
"structure_path",
",",
"gui",
"=",
"gui",
")",
"if",
"recolor",
":",
"view",
".",
"clear_representations",
"(",
")",
"if",
"only_chains",
":",
"view",
".",
"add_cartoon",
"(",
"selection",
"=",
"'{} and (not hydrogen)'",
".",
"format",
"(",
"to_show_chains",
")",
",",
"color",
"=",
"'silver'",
",",
"opacity",
"=",
"opacity",
")",
"else",
":",
"view",
".",
"add_cartoon",
"(",
"selection",
"=",
"'protein'",
",",
"color",
"=",
"'silver'",
",",
"opacity",
"=",
"opacity",
")",
"elif",
"only_chains",
":",
"view",
".",
"clear_representations",
"(",
")",
"view",
".",
"add_cartoon",
"(",
"selection",
"=",
"'{} and (not hydrogen)'",
".",
"format",
"(",
"to_show_chains",
")",
",",
"color",
"=",
"'silver'",
",",
"opacity",
"=",
"opacity",
")",
"return",
"view"
] | 39.659574 | 26.382979 |
def dskgtl(keywrd):
"""
Retrieve the value of a specified DSK tolerance or margin parameter.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskgtl_c.html
:param keywrd: Code specifying parameter to retrieve.
:type keywrd: int
:return: Value of parameter.
:rtype: float
"""
keywrd = ctypes.c_int(keywrd)
dpval = ctypes.c_double(0)
libspice.dskgtl_c(keywrd, ctypes.byref(dpval))
return dpval.value | [
"def",
"dskgtl",
"(",
"keywrd",
")",
":",
"keywrd",
"=",
"ctypes",
".",
"c_int",
"(",
"keywrd",
")",
"dpval",
"=",
"ctypes",
".",
"c_double",
"(",
"0",
")",
"libspice",
".",
"dskgtl_c",
"(",
"keywrd",
",",
"ctypes",
".",
"byref",
"(",
"dpval",
")",
")",
"return",
"dpval",
".",
"value"
] | 30.066667 | 18.066667 |
def list_subscriptions(self, service):
"""Asks for a list of all subscribed accounts and devices, along with their statuses."""
data = {
'service': service,
}
return self._perform_post_request(self.list_subscriptions_endpoint, data, self.token_header) | [
"def",
"list_subscriptions",
"(",
"self",
",",
"service",
")",
":",
"data",
"=",
"{",
"'service'",
":",
"service",
",",
"}",
"return",
"self",
".",
"_perform_post_request",
"(",
"self",
".",
"list_subscriptions_endpoint",
",",
"data",
",",
"self",
".",
"token_header",
")"
] | 48.333333 | 21 |
def _construct_version(self, function, intrinsics_resolver):
"""Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes.
Old versions will not be deleted without a direct reference from the CloudFormation template.
:param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version
:param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve
references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a
template parameter. Need to resolve the values otherwise we will never detect a change in Code dict
:return: Lambda function Version resource
"""
code_dict = function.Code
if not code_dict:
raise ValueError("Lambda function code must be a valid non-empty dictionary")
if not intrinsics_resolver:
raise ValueError("intrinsics_resolver is required for versions creation")
# Resolve references to template parameters before creating hash. This will *not* resolve all intrinsics
# because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this
# is good enough. Here is why:
#
# When using intrinsic functions there are two cases when has must change:
# - Value of the template parameter changes
# - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn
#
# Later case will already change the hash because some value in the Code dictionary changes. We handle the
# first case by resolving references to template parameters. It is okay even if these references are
# present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's
# value and keep all other parts of !Join identical. This will still trigger a change in the hash.
code_dict = intrinsics_resolver.resolve_parameter_refs(code_dict)
# Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary
# to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation
# will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to
# prevent CloudFormation from actually deleting the underlying version resource
#
# SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous
# and next hashes. The chances that two subsequent hashes collide is fairly low.
prefix = "{id}Version".format(id=self.logical_id)
logical_id = logical_id_generator.LogicalIdGenerator(prefix, code_dict).gen()
attributes = self.get_passthrough_resource_attributes()
if attributes is None:
attributes = {}
attributes["DeletionPolicy"] = "Retain"
lambda_version = LambdaVersion(logical_id=logical_id, attributes=attributes)
lambda_version.FunctionName = function.get_runtime_attr('name')
lambda_version.Description = self.VersionDescription
return lambda_version | [
"def",
"_construct_version",
"(",
"self",
",",
"function",
",",
"intrinsics_resolver",
")",
":",
"code_dict",
"=",
"function",
".",
"Code",
"if",
"not",
"code_dict",
":",
"raise",
"ValueError",
"(",
"\"Lambda function code must be a valid non-empty dictionary\"",
")",
"if",
"not",
"intrinsics_resolver",
":",
"raise",
"ValueError",
"(",
"\"intrinsics_resolver is required for versions creation\"",
")",
"# Resolve references to template parameters before creating hash. This will *not* resolve all intrinsics",
"# because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this",
"# is good enough. Here is why:",
"#",
"# When using intrinsic functions there are two cases when has must change:",
"# - Value of the template parameter changes",
"# - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn",
"#",
"# Later case will already change the hash because some value in the Code dictionary changes. We handle the",
"# first case by resolving references to template parameters. It is okay even if these references are",
"# present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's",
"# value and keep all other parts of !Join identical. This will still trigger a change in the hash.",
"code_dict",
"=",
"intrinsics_resolver",
".",
"resolve_parameter_refs",
"(",
"code_dict",
")",
"# Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary",
"# to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation",
"# will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to",
"# prevent CloudFormation from actually deleting the underlying version resource",
"#",
"# SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous",
"# and next hashes. The chances that two subsequent hashes collide is fairly low.",
"prefix",
"=",
"\"{id}Version\"",
".",
"format",
"(",
"id",
"=",
"self",
".",
"logical_id",
")",
"logical_id",
"=",
"logical_id_generator",
".",
"LogicalIdGenerator",
"(",
"prefix",
",",
"code_dict",
")",
".",
"gen",
"(",
")",
"attributes",
"=",
"self",
".",
"get_passthrough_resource_attributes",
"(",
")",
"if",
"attributes",
"is",
"None",
":",
"attributes",
"=",
"{",
"}",
"attributes",
"[",
"\"DeletionPolicy\"",
"]",
"=",
"\"Retain\"",
"lambda_version",
"=",
"LambdaVersion",
"(",
"logical_id",
"=",
"logical_id",
",",
"attributes",
"=",
"attributes",
")",
"lambda_version",
".",
"FunctionName",
"=",
"function",
".",
"get_runtime_attr",
"(",
"'name'",
")",
"lambda_version",
".",
"Description",
"=",
"self",
".",
"VersionDescription",
"return",
"lambda_version"
] | 64.45098 | 40.686275 |
async def set_state(self, parameter):
"""Set switch to desired state."""
command_send = CommandSend(pyvlx=self.pyvlx, node_id=self.node_id, parameter=parameter)
await command_send.do_api_call()
if not command_send.success:
raise PyVLXException("Unable to send command")
self.parameter = parameter
await self.after_update() | [
"async",
"def",
"set_state",
"(",
"self",
",",
"parameter",
")",
":",
"command_send",
"=",
"CommandSend",
"(",
"pyvlx",
"=",
"self",
".",
"pyvlx",
",",
"node_id",
"=",
"self",
".",
"node_id",
",",
"parameter",
"=",
"parameter",
")",
"await",
"command_send",
".",
"do_api_call",
"(",
")",
"if",
"not",
"command_send",
".",
"success",
":",
"raise",
"PyVLXException",
"(",
"\"Unable to send command\"",
")",
"self",
".",
"parameter",
"=",
"parameter",
"await",
"self",
".",
"after_update",
"(",
")"
] | 46.875 | 11.625 |
def get_users(self, search=None, page=1, per_page=20, **kwargs):
"""
Returns a list of users from the Gitlab server
:param search: Optional search query
:param page: Page number (default: 1)
:param per_page: Number of items to list per page (default: 20, max: 100)
:return: List of Dictionaries containing users
:raise: HttpError if invalid response returned
"""
if search:
return self.get('/users', page=page, per_page=per_page, search=search, **kwargs)
return self.get('/users', page=page, per_page=per_page, **kwargs) | [
"def",
"get_users",
"(",
"self",
",",
"search",
"=",
"None",
",",
"page",
"=",
"1",
",",
"per_page",
"=",
"20",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"search",
":",
"return",
"self",
".",
"get",
"(",
"'/users'",
",",
"page",
"=",
"page",
",",
"per_page",
"=",
"per_page",
",",
"search",
"=",
"search",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"get",
"(",
"'/users'",
",",
"page",
"=",
"page",
",",
"per_page",
"=",
"per_page",
",",
"*",
"*",
"kwargs",
")"
] | 42.928571 | 21.642857 |
def delete_milestone_request(session, milestone_request_id):
"""
Delete a milestone request
"""
params_data = {
'action': 'delete',
}
# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=
# delete
endpoint = 'milestone_requests/{}'.format(milestone_request_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneRequestNotDeletedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | [
"def",
"delete_milestone_request",
"(",
"session",
",",
"milestone_request_id",
")",
":",
"params_data",
"=",
"{",
"'action'",
":",
"'delete'",
",",
"}",
"# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=",
"# delete",
"endpoint",
"=",
"'milestone_requests/{}'",
".",
"format",
"(",
"milestone_request_id",
")",
"response",
"=",
"make_put_request",
"(",
"session",
",",
"endpoint",
",",
"params_data",
"=",
"params_data",
")",
"json_data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"json_data",
"[",
"'status'",
"]",
"else",
":",
"raise",
"MilestoneRequestNotDeletedException",
"(",
"message",
"=",
"json_data",
"[",
"'message'",
"]",
",",
"error_code",
"=",
"json_data",
"[",
"'error_code'",
"]",
",",
"request_id",
"=",
"json_data",
"[",
"'request_id'",
"]",
")"
] | 35.894737 | 16 |
def dms_to_degrees(v):
"""Convert degree/minute/second to decimal degrees."""
d = float(v[0][0]) / float(v[0][1])
m = float(v[1][0]) / float(v[1][1])
s = float(v[2][0]) / float(v[2][1])
return d + (m / 60.0) + (s / 3600.0) | [
"def",
"dms_to_degrees",
"(",
"v",
")",
":",
"d",
"=",
"float",
"(",
"v",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"/",
"float",
"(",
"v",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"m",
"=",
"float",
"(",
"v",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"/",
"float",
"(",
"v",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"s",
"=",
"float",
"(",
"v",
"[",
"2",
"]",
"[",
"0",
"]",
")",
"/",
"float",
"(",
"v",
"[",
"2",
"]",
"[",
"1",
"]",
")",
"return",
"d",
"+",
"(",
"m",
"/",
"60.0",
")",
"+",
"(",
"s",
"/",
"3600.0",
")"
] | 33.857143 | 8.714286 |
def lastElementChild(self):
"""Finds the last child node of that element which is a
Element node Note the handling of entities references is
different than in the W3C DOM element traversal spec since
we don't have back reference from entities content to
entities references. """
ret = libxml2mod.xmlLastElementChild(self._o)
if ret is None:return None
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"lastElementChild",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlLastElementChild",
"(",
"self",
".",
"_o",
")",
"if",
"ret",
"is",
"None",
":",
"return",
"None",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | 46.2 | 13.6 |
def get_client(provider, token=''):
"Return the API client for the given provider."
cls = OAuth2Client
if provider.request_token_url:
cls = OAuthClient
return cls(provider, token) | [
"def",
"get_client",
"(",
"provider",
",",
"token",
"=",
"''",
")",
":",
"cls",
"=",
"OAuth2Client",
"if",
"provider",
".",
"request_token_url",
":",
"cls",
"=",
"OAuthClient",
"return",
"cls",
"(",
"provider",
",",
"token",
")"
] | 33 | 10.666667 |
def _build_type(type_, value, property_path=None):
""" Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
for (type_check, builder) in (
(is_enum_type, _build_enum_type),
(is_null_type, _build_null_type),
(is_bool_type, _build_bool_type),
(is_string_type, _build_string_type),
(is_integer_type, _build_integer_type),
(is_number_type, _build_number_type),
(is_array_type, _build_array_type),
(is_object_type, _build_object_type),
):
if type_check(type_):
return builder(value, property_path=property_path)
# NOTE: warning ignores type None (as that is the config var default)
if type_:
warnings.warn(f"unhandled translation for type {type_!r} with value {value!r}")
return {} | [
"def",
"_build_type",
"(",
"type_",
",",
"value",
",",
"property_path",
"=",
"None",
")",
":",
"if",
"not",
"property_path",
":",
"property_path",
"=",
"[",
"]",
"for",
"(",
"type_check",
",",
"builder",
")",
"in",
"(",
"(",
"is_enum_type",
",",
"_build_enum_type",
")",
",",
"(",
"is_null_type",
",",
"_build_null_type",
")",
",",
"(",
"is_bool_type",
",",
"_build_bool_type",
")",
",",
"(",
"is_string_type",
",",
"_build_string_type",
")",
",",
"(",
"is_integer_type",
",",
"_build_integer_type",
")",
",",
"(",
"is_number_type",
",",
"_build_number_type",
")",
",",
"(",
"is_array_type",
",",
"_build_array_type",
")",
",",
"(",
"is_object_type",
",",
"_build_object_type",
")",
",",
")",
":",
"if",
"type_check",
"(",
"type_",
")",
":",
"return",
"builder",
"(",
"value",
",",
"property_path",
"=",
"property_path",
")",
"# NOTE: warning ignores type None (as that is the config var default)",
"if",
"type_",
":",
"warnings",
".",
"warn",
"(",
"f\"unhandled translation for type {type_!r} with value {value!r}\"",
")",
"return",
"{",
"}"
] | 36.419355 | 16.483871 |
def is_article(self, response, url):
"""
Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article
"""
site = self.__sites_object[url]
heuristics = self.__get_enabled_heuristics(url)
self.log.info("Checking site: %s", response.url)
statement = self.__get_condition(url)
self.log.debug("Condition (original): %s", statement)
for heuristic, condition in heuristics.items():
heuristic_func = getattr(self, heuristic)
result = heuristic_func(response, site)
check = self.__evaluate_result(result, condition)
statement = re.sub(r"\b%s\b" % heuristic, str(check), statement)
self.log.debug("Checking heuristic (%s)"
" result (%s) on condition (%s): %s",
heuristic, result, condition, check)
self.log.debug("Condition (evaluated): %s", statement)
is_article = eval(statement)
self.log.debug("Article accepted: %s", is_article)
return is_article | [
"def",
"is_article",
"(",
"self",
",",
"response",
",",
"url",
")",
":",
"site",
"=",
"self",
".",
"__sites_object",
"[",
"url",
"]",
"heuristics",
"=",
"self",
".",
"__get_enabled_heuristics",
"(",
"url",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Checking site: %s\"",
",",
"response",
".",
"url",
")",
"statement",
"=",
"self",
".",
"__get_condition",
"(",
"url",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Condition (original): %s\"",
",",
"statement",
")",
"for",
"heuristic",
",",
"condition",
"in",
"heuristics",
".",
"items",
"(",
")",
":",
"heuristic_func",
"=",
"getattr",
"(",
"self",
",",
"heuristic",
")",
"result",
"=",
"heuristic_func",
"(",
"response",
",",
"site",
")",
"check",
"=",
"self",
".",
"__evaluate_result",
"(",
"result",
",",
"condition",
")",
"statement",
"=",
"re",
".",
"sub",
"(",
"r\"\\b%s\\b\"",
"%",
"heuristic",
",",
"str",
"(",
"check",
")",
",",
"statement",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Checking heuristic (%s)\"",
"\" result (%s) on condition (%s): %s\"",
",",
"heuristic",
",",
"result",
",",
"condition",
",",
"check",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Condition (evaluated): %s\"",
",",
"statement",
")",
"is_article",
"=",
"eval",
"(",
"statement",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Article accepted: %s\"",
",",
"is_article",
")",
"return",
"is_article"
] | 42.09375 | 20.40625 |
def ethernet_adapters(self, ethernet_adapters):
"""
Sets the number of Ethernet adapters for this IOU VM.
:param ethernet_adapters: number of adapters
"""
self._ethernet_adapters.clear()
for _ in range(0, ethernet_adapters):
self._ethernet_adapters.append(EthernetAdapter(interfaces=4))
log.info('IOU "{name}" [{id}]: number of Ethernet adapters changed to {adapters}'.format(name=self._name,
id=self._id,
adapters=len(self._ethernet_adapters)))
self._adapters = self._ethernet_adapters + self._serial_adapters | [
"def",
"ethernet_adapters",
"(",
"self",
",",
"ethernet_adapters",
")",
":",
"self",
".",
"_ethernet_adapters",
".",
"clear",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"ethernet_adapters",
")",
":",
"self",
".",
"_ethernet_adapters",
".",
"append",
"(",
"EthernetAdapter",
"(",
"interfaces",
"=",
"4",
")",
")",
"log",
".",
"info",
"(",
"'IOU \"{name}\" [{id}]: number of Ethernet adapters changed to {adapters}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
",",
"adapters",
"=",
"len",
"(",
"self",
".",
"_ethernet_adapters",
")",
")",
")",
"self",
".",
"_adapters",
"=",
"self",
".",
"_ethernet_adapters",
"+",
"self",
".",
"_serial_adapters"
] | 48.0625 | 31.8125 |
def _normalize_stack(graphobjs):
"""Convert runs of qQ's in the stack into single graphobjs"""
for operands, operator in graphobjs:
operator = str(operator)
if re.match(r'Q*q+$', operator): # Zero or more Q, one or more q
for char in operator: # Split into individual
yield ([], char) # Yield individual
else:
yield (operands, operator) | [
"def",
"_normalize_stack",
"(",
"graphobjs",
")",
":",
"for",
"operands",
",",
"operator",
"in",
"graphobjs",
":",
"operator",
"=",
"str",
"(",
"operator",
")",
"if",
"re",
".",
"match",
"(",
"r'Q*q+$'",
",",
"operator",
")",
":",
"# Zero or more Q, one or more q",
"for",
"char",
"in",
"operator",
":",
"# Split into individual",
"yield",
"(",
"[",
"]",
",",
"char",
")",
"# Yield individual",
"else",
":",
"yield",
"(",
"operands",
",",
"operator",
")"
] | 44.777778 | 12 |
def get_id2config_mapping(self):
"""
returns a dict where the keys are the config_ids and the values
are the actual configurations
"""
new_dict = {}
for k, v in self.data.items():
new_dict[k] = {}
new_dict[k]['config'] = copy.deepcopy(v.config)
try:
new_dict[k]['config_info'] = copy.deepcopy(v.config_info)
except:
pass
return(new_dict) | [
"def",
"get_id2config_mapping",
"(",
"self",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"data",
".",
"items",
"(",
")",
":",
"new_dict",
"[",
"k",
"]",
"=",
"{",
"}",
"new_dict",
"[",
"k",
"]",
"[",
"'config'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"v",
".",
"config",
")",
"try",
":",
"new_dict",
"[",
"k",
"]",
"[",
"'config_info'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"v",
".",
"config_info",
")",
"except",
":",
"pass",
"return",
"(",
"new_dict",
")"
] | 25.571429 | 17.428571 |
def irafcrop(self, irafcropstring):
"""
This is a wrapper around crop(), similar to iraf imcopy,
using iraf conventions (100:199 will be 100 pixels, not 99).
"""
irafcropstring = irafcropstring[1:-1] # removing the [ ]
ranges = irafcropstring.split(",")
xr = ranges[0].split(":")
yr = ranges[1].split(":")
xmin = int(xr[0])
xmax = int(xr[1])+1
ymin = int(yr[0])
ymax = int(yr[1])+1
self.crop(xmin, xmax, ymin, ymax) | [
"def",
"irafcrop",
"(",
"self",
",",
"irafcropstring",
")",
":",
"irafcropstring",
"=",
"irafcropstring",
"[",
"1",
":",
"-",
"1",
"]",
"# removing the [ ]",
"ranges",
"=",
"irafcropstring",
".",
"split",
"(",
"\",\"",
")",
"xr",
"=",
"ranges",
"[",
"0",
"]",
".",
"split",
"(",
"\":\"",
")",
"yr",
"=",
"ranges",
"[",
"1",
"]",
".",
"split",
"(",
"\":\"",
")",
"xmin",
"=",
"int",
"(",
"xr",
"[",
"0",
"]",
")",
"xmax",
"=",
"int",
"(",
"xr",
"[",
"1",
"]",
")",
"+",
"1",
"ymin",
"=",
"int",
"(",
"yr",
"[",
"0",
"]",
")",
"ymax",
"=",
"int",
"(",
"yr",
"[",
"1",
"]",
")",
"+",
"1",
"self",
".",
"crop",
"(",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
")"
] | 36.142857 | 11 |
def autochisq_from_precomputed(sn, corr_sn, hautocorr, indices,
stride=1, num_points=None, oneside=None,
twophase=True, maxvalued=False):
"""
Compute correlation (two sided) between template and data
and compares with autocorrelation of the template: C(t) = IFFT(A*A/S(f))
Parameters
----------
sn: Array[complex]
normalized (!) array of complex snr for the template that produced the
trigger(s) being tested
corr_sn : Array[complex]
normalized (!) array of complex snr for the template that you want to
produce a correlation chisq test for. In the [common] case that sn and
corr_sn are the same, you are computing auto-correlation chisq.
hautocorr: Array[complex]
time domain autocorrelation for the template
indices: Array[int]
compute correlation chisquare at the points specified in this array,
num_points: [int, optional; default=None]
Number of points used for autochisq on each side, if None all points
are used.
stride: [int, optional; default = 1]
stride for points selection for autochisq
total length <= 2*num_points*stride
oneside: [str, optional; default=None]
whether to use one or two sided autochisquare. If None (or not
provided) twosided chi-squared will be used. If given, options are
'left' or 'right', to do one-sided chi-squared on the left or right.
twophase: Boolean, optional; default=True
If True calculate the auto-chisq using both phases of the filter.
If False only use the phase of the obtained trigger(s).
maxvalued: Boolean, optional; default=False
Return the largest auto-chisq at any of the points tested if True.
If False, return the sum of auto-chisq at all points tested.
Returns
-------
autochisq: [tuple]
returns autochisq values and snr corresponding to the instances
of time defined by indices
"""
Nsnr = len(sn)
achisq = np.zeros(len(indices))
num_points_all = int(Nsnr/stride)
if num_points is None:
num_points = num_points_all
if (num_points > num_points_all):
num_points = num_points_all
snrabs = np.abs(sn[indices])
cphi_array = (sn[indices]).real / snrabs
sphi_array = (sn[indices]).imag / snrabs
start_point = - stride*num_points
end_point = stride*num_points+1
if oneside == 'left':
achisq_idx_list = np.arange(start_point, 0, stride)
elif oneside == 'right':
achisq_idx_list = np.arange(stride, end_point, stride)
else:
achisq_idx_list_pt1 = np.arange(start_point, 0, stride)
achisq_idx_list_pt2 = np.arange(stride, end_point, stride)
achisq_idx_list = np.append(achisq_idx_list_pt1,
achisq_idx_list_pt2)
hauto_corr_vec = hautocorr[achisq_idx_list]
hauto_norm = hauto_corr_vec.real*hauto_corr_vec.real
# REMOVE THIS LINE TO REPRODUCE OLD RESULTS
hauto_norm += hauto_corr_vec.imag*hauto_corr_vec.imag
chisq_norm = 1.0 - hauto_norm
for ip,ind in enumerate(indices):
curr_achisq_idx_list = achisq_idx_list + ind
cphi = cphi_array[ip]
sphi = sphi_array[ip]
# By construction, the other "phase" of the SNR is 0
snr_ind = sn[ind].real*cphi + sn[ind].imag*sphi
# Wrap index if needed (maybe should fail in this case?)
if curr_achisq_idx_list[0] < 0:
curr_achisq_idx_list[curr_achisq_idx_list < 0] += Nsnr
if curr_achisq_idx_list[-1] > (Nsnr - 1):
curr_achisq_idx_list[curr_achisq_idx_list > (Nsnr-1)] -= Nsnr
z = corr_sn[curr_achisq_idx_list].real*cphi + \
corr_sn[curr_achisq_idx_list].imag*sphi
dz = z - hauto_corr_vec.real*snr_ind
curr_achisq_list = dz*dz/chisq_norm
if twophase:
chisq_norm = 1.0 - hauto_norm
z = -corr_sn[curr_achisq_idx_list].real*sphi + \
corr_sn[curr_achisq_idx_list].imag*cphi
dz = z - hauto_corr_vec.imag*snr_ind
curr_achisq_list += dz*dz/chisq_norm
if maxvalued:
achisq[ip] = curr_achisq_list.max()
else:
achisq[ip] = curr_achisq_list.sum()
dof = num_points
if oneside is None:
dof = dof * 2
if twophase:
dof = dof * 2
return dof, achisq, indices | [
"def",
"autochisq_from_precomputed",
"(",
"sn",
",",
"corr_sn",
",",
"hautocorr",
",",
"indices",
",",
"stride",
"=",
"1",
",",
"num_points",
"=",
"None",
",",
"oneside",
"=",
"None",
",",
"twophase",
"=",
"True",
",",
"maxvalued",
"=",
"False",
")",
":",
"Nsnr",
"=",
"len",
"(",
"sn",
")",
"achisq",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"indices",
")",
")",
"num_points_all",
"=",
"int",
"(",
"Nsnr",
"/",
"stride",
")",
"if",
"num_points",
"is",
"None",
":",
"num_points",
"=",
"num_points_all",
"if",
"(",
"num_points",
">",
"num_points_all",
")",
":",
"num_points",
"=",
"num_points_all",
"snrabs",
"=",
"np",
".",
"abs",
"(",
"sn",
"[",
"indices",
"]",
")",
"cphi_array",
"=",
"(",
"sn",
"[",
"indices",
"]",
")",
".",
"real",
"/",
"snrabs",
"sphi_array",
"=",
"(",
"sn",
"[",
"indices",
"]",
")",
".",
"imag",
"/",
"snrabs",
"start_point",
"=",
"-",
"stride",
"*",
"num_points",
"end_point",
"=",
"stride",
"*",
"num_points",
"+",
"1",
"if",
"oneside",
"==",
"'left'",
":",
"achisq_idx_list",
"=",
"np",
".",
"arange",
"(",
"start_point",
",",
"0",
",",
"stride",
")",
"elif",
"oneside",
"==",
"'right'",
":",
"achisq_idx_list",
"=",
"np",
".",
"arange",
"(",
"stride",
",",
"end_point",
",",
"stride",
")",
"else",
":",
"achisq_idx_list_pt1",
"=",
"np",
".",
"arange",
"(",
"start_point",
",",
"0",
",",
"stride",
")",
"achisq_idx_list_pt2",
"=",
"np",
".",
"arange",
"(",
"stride",
",",
"end_point",
",",
"stride",
")",
"achisq_idx_list",
"=",
"np",
".",
"append",
"(",
"achisq_idx_list_pt1",
",",
"achisq_idx_list_pt2",
")",
"hauto_corr_vec",
"=",
"hautocorr",
"[",
"achisq_idx_list",
"]",
"hauto_norm",
"=",
"hauto_corr_vec",
".",
"real",
"*",
"hauto_corr_vec",
".",
"real",
"# REMOVE THIS LINE TO REPRODUCE OLD RESULTS",
"hauto_norm",
"+=",
"hauto_corr_vec",
".",
"imag",
"*",
"hauto_corr_vec",
".",
"imag",
"chisq_norm",
"=",
"1.0",
"-",
"hauto_norm",
"for",
"ip",
",",
"ind",
"in",
"enumerate",
"(",
"indices",
")",
":",
"curr_achisq_idx_list",
"=",
"achisq_idx_list",
"+",
"ind",
"cphi",
"=",
"cphi_array",
"[",
"ip",
"]",
"sphi",
"=",
"sphi_array",
"[",
"ip",
"]",
"# By construction, the other \"phase\" of the SNR is 0",
"snr_ind",
"=",
"sn",
"[",
"ind",
"]",
".",
"real",
"*",
"cphi",
"+",
"sn",
"[",
"ind",
"]",
".",
"imag",
"*",
"sphi",
"# Wrap index if needed (maybe should fail in this case?)",
"if",
"curr_achisq_idx_list",
"[",
"0",
"]",
"<",
"0",
":",
"curr_achisq_idx_list",
"[",
"curr_achisq_idx_list",
"<",
"0",
"]",
"+=",
"Nsnr",
"if",
"curr_achisq_idx_list",
"[",
"-",
"1",
"]",
">",
"(",
"Nsnr",
"-",
"1",
")",
":",
"curr_achisq_idx_list",
"[",
"curr_achisq_idx_list",
">",
"(",
"Nsnr",
"-",
"1",
")",
"]",
"-=",
"Nsnr",
"z",
"=",
"corr_sn",
"[",
"curr_achisq_idx_list",
"]",
".",
"real",
"*",
"cphi",
"+",
"corr_sn",
"[",
"curr_achisq_idx_list",
"]",
".",
"imag",
"*",
"sphi",
"dz",
"=",
"z",
"-",
"hauto_corr_vec",
".",
"real",
"*",
"snr_ind",
"curr_achisq_list",
"=",
"dz",
"*",
"dz",
"/",
"chisq_norm",
"if",
"twophase",
":",
"chisq_norm",
"=",
"1.0",
"-",
"hauto_norm",
"z",
"=",
"-",
"corr_sn",
"[",
"curr_achisq_idx_list",
"]",
".",
"real",
"*",
"sphi",
"+",
"corr_sn",
"[",
"curr_achisq_idx_list",
"]",
".",
"imag",
"*",
"cphi",
"dz",
"=",
"z",
"-",
"hauto_corr_vec",
".",
"imag",
"*",
"snr_ind",
"curr_achisq_list",
"+=",
"dz",
"*",
"dz",
"/",
"chisq_norm",
"if",
"maxvalued",
":",
"achisq",
"[",
"ip",
"]",
"=",
"curr_achisq_list",
".",
"max",
"(",
")",
"else",
":",
"achisq",
"[",
"ip",
"]",
"=",
"curr_achisq_list",
".",
"sum",
"(",
")",
"dof",
"=",
"num_points",
"if",
"oneside",
"is",
"None",
":",
"dof",
"=",
"dof",
"*",
"2",
"if",
"twophase",
":",
"dof",
"=",
"dof",
"*",
"2",
"return",
"dof",
",",
"achisq",
",",
"indices"
] | 38.714286 | 19.339286 |
def win_encode(s):
"""Encode unicodes for process arguments on Windows."""
if isinstance(s, unicode):
return s.encode(locale.getpreferredencoding(False))
elif isinstance(s, bytes):
return s
elif s is not None:
raise TypeError('Expected bytes or text, but got %r' % (s,)) | [
"def",
"win_encode",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"s",
".",
"encode",
"(",
"locale",
".",
"getpreferredencoding",
"(",
"False",
")",
")",
"elif",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"return",
"s",
"elif",
"s",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'Expected bytes or text, but got %r'",
"%",
"(",
"s",
",",
")",
")"
] | 37.875 | 16.25 |
def from_detections_assignment(detections_1, detections_2, assignments):
"""
Creates traces out of given assignment and cell data.
"""
traces = []
for d1n, d2n in six.iteritems(assignments):
# check if the match is between existing cells
if d1n < len(detections_1) and d2n < len(detections_2):
traces.append(Trace(detections_1[d1n], detections_2[d2n]))
return traces | [
"def",
"from_detections_assignment",
"(",
"detections_1",
",",
"detections_2",
",",
"assignments",
")",
":",
"traces",
"=",
"[",
"]",
"for",
"d1n",
",",
"d2n",
"in",
"six",
".",
"iteritems",
"(",
"assignments",
")",
":",
"# check if the match is between existing cells",
"if",
"d1n",
"<",
"len",
"(",
"detections_1",
")",
"and",
"d2n",
"<",
"len",
"(",
"detections_2",
")",
":",
"traces",
".",
"append",
"(",
"Trace",
"(",
"detections_1",
"[",
"d1n",
"]",
",",
"detections_2",
"[",
"d2n",
"]",
")",
")",
"return",
"traces"
] | 37.083333 | 21.916667 |
def require_email_confirmation(self):
""" Mark email as unconfirmed"""
self.email_confirmed = False
self.email_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.email_link_expires = now + datetime.timedelta(hours=24) | [
"def",
"require_email_confirmation",
"(",
"self",
")",
":",
"self",
".",
"email_confirmed",
"=",
"False",
"self",
".",
"email_link",
"=",
"self",
".",
"generate_hash",
"(",
"50",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"email_link_expires",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"24",
")"
] | 45 | 7.166667 |
def purge_duplicates(list_in):
"""Remove duplicates from list while preserving order.
Parameters
----------
list_in: Iterable
Returns
-------
list
List of first occurences in order
"""
_list = []
for item in list_in:
if item not in _list:
_list.append(item)
return _list | [
"def",
"purge_duplicates",
"(",
"list_in",
")",
":",
"_list",
"=",
"[",
"]",
"for",
"item",
"in",
"list_in",
":",
"if",
"item",
"not",
"in",
"_list",
":",
"_list",
".",
"append",
"(",
"item",
")",
"return",
"_list"
] | 19.294118 | 19.941176 |
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1) | [
"def",
"_n_parameters",
"(",
"self",
")",
":",
"ndim",
"=",
"self",
".",
"means_",
".",
"shape",
"[",
"1",
"]",
"if",
"self",
".",
"covariance_type",
"==",
"'full'",
":",
"cov_params",
"=",
"self",
".",
"n_components",
"*",
"ndim",
"*",
"(",
"ndim",
"+",
"1",
")",
"/",
"2.",
"elif",
"self",
".",
"covariance_type",
"==",
"'diag'",
":",
"cov_params",
"=",
"self",
".",
"n_components",
"*",
"ndim",
"elif",
"self",
".",
"covariance_type",
"==",
"'tied'",
":",
"cov_params",
"=",
"ndim",
"*",
"(",
"ndim",
"+",
"1",
")",
"/",
"2.",
"elif",
"self",
".",
"covariance_type",
"==",
"'spherical'",
":",
"cov_params",
"=",
"self",
".",
"n_components",
"mean_params",
"=",
"ndim",
"*",
"self",
".",
"n_components",
"return",
"int",
"(",
"cov_params",
"+",
"mean_params",
"+",
"self",
".",
"n_components",
"-",
"1",
")"
] | 47.769231 | 9.153846 |
def notification_factory(code, subcode):
"""Returns a `Notification` message corresponding to given codes.
Parameters:
- `code`: (int) BGP error code
- `subcode`: (int) BGP error sub-code
"""
notification = BGPNotification(code, subcode)
if not notification.reason:
raise ValueError('Invalid code/sub-code.')
return notification | [
"def",
"notification_factory",
"(",
"code",
",",
"subcode",
")",
":",
"notification",
"=",
"BGPNotification",
"(",
"code",
",",
"subcode",
")",
"if",
"not",
"notification",
".",
"reason",
":",
"raise",
"ValueError",
"(",
"'Invalid code/sub-code.'",
")",
"return",
"notification"
] | 29.916667 | 13.083333 |
def keyPressEvent(self, event):
"""
Qt override.
"""
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
QTableWidget.keyPressEvent(self, event)
# To avoid having to enter one final tab
self.setDisabled(True)
self.setDisabled(False)
self._parent.keyPressEvent(event)
else:
QTableWidget.keyPressEvent(self, event) | [
"def",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"key",
"(",
")",
"in",
"[",
"Qt",
".",
"Key_Enter",
",",
"Qt",
".",
"Key_Return",
"]",
":",
"QTableWidget",
".",
"keyPressEvent",
"(",
"self",
",",
"event",
")",
"# To avoid having to enter one final tab\r",
"self",
".",
"setDisabled",
"(",
"True",
")",
"self",
".",
"setDisabled",
"(",
"False",
")",
"self",
".",
"_parent",
".",
"keyPressEvent",
"(",
"event",
")",
"else",
":",
"QTableWidget",
".",
"keyPressEvent",
"(",
"self",
",",
"event",
")"
] | 35.083333 | 10.083333 |
def read_array(path, mmap_mode=None):
"""Read a .npy array."""
file_ext = op.splitext(path)[1]
if file_ext == '.npy':
return np.load(path, mmap_mode=mmap_mode)
raise NotImplementedError("The file extension `{}` ".format(file_ext) +
"is not currently supported.") | [
"def",
"read_array",
"(",
"path",
",",
"mmap_mode",
"=",
"None",
")",
":",
"file_ext",
"=",
"op",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"if",
"file_ext",
"==",
"'.npy'",
":",
"return",
"np",
".",
"load",
"(",
"path",
",",
"mmap_mode",
"=",
"mmap_mode",
")",
"raise",
"NotImplementedError",
"(",
"\"The file extension `{}` \"",
".",
"format",
"(",
"file_ext",
")",
"+",
"\"is not currently supported.\"",
")"
] | 44.285714 | 12.285714 |
def t2T(self, seg, t):
"""returns the path parameter T which corresponds to the segment
parameter t. In other words, for any Path object, path, and any
segment in path, seg, T(t) = path.t2T(seg, t) is the unique
reparameterization such that path.point(T(t)) == seg.point(t) for all
0 <= t <= 1.
Input Note: seg can be a segment in the Path object or its
corresponding index."""
self._calc_lengths()
# Accept an index or a segment for seg
if isinstance(seg, int):
seg_idx = seg
else:
try:
seg_idx = self.index(seg)
except ValueError:
assert is_path_segment(seg) or isinstance(seg, int)
raise
segment_start = sum(self._lengths[:seg_idx])
segment_end = segment_start + self._lengths[seg_idx]
T = (segment_end - segment_start)*t + segment_start
return T | [
"def",
"t2T",
"(",
"self",
",",
"seg",
",",
"t",
")",
":",
"self",
".",
"_calc_lengths",
"(",
")",
"# Accept an index or a segment for seg",
"if",
"isinstance",
"(",
"seg",
",",
"int",
")",
":",
"seg_idx",
"=",
"seg",
"else",
":",
"try",
":",
"seg_idx",
"=",
"self",
".",
"index",
"(",
"seg",
")",
"except",
"ValueError",
":",
"assert",
"is_path_segment",
"(",
"seg",
")",
"or",
"isinstance",
"(",
"seg",
",",
"int",
")",
"raise",
"segment_start",
"=",
"sum",
"(",
"self",
".",
"_lengths",
"[",
":",
"seg_idx",
"]",
")",
"segment_end",
"=",
"segment_start",
"+",
"self",
".",
"_lengths",
"[",
"seg_idx",
"]",
"T",
"=",
"(",
"segment_end",
"-",
"segment_start",
")",
"*",
"t",
"+",
"segment_start",
"return",
"T"
] | 40.608696 | 18.478261 |
def get_orthology_matrix(self, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None, filter_condition='OR',
remove_strains_with_no_orthology=True,
remove_strains_with_no_differences=False,
remove_genes_not_in_base_model=True):
"""Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns
Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences.
Args:
pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100]
bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits
evalue_cutoff (float): Maximum E-value allowed between BLAST hits
filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it
is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue).
remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found
remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model.
Default is False because since orthology is found using a PID cutoff, all genes may be present but
differences may be on the sequence level.
remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our
base model. This happens if we use a genome file for our model that has other genes in it.
Returns:
DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
"""
# TODO: document and test other cutoffs
# Get the path to the reference genome
r_file = self.reference_gempro.genome_path
bbh_files = {}
log.info('Running bidirectional BLAST and finding best bidirectional hits (BBH)...')
for strain_gempro in tqdm(self.strains):
g_file = strain_gempro.genome_path
# Run bidirectional BLAST
log.debug('{} vs {}: Running bidirectional BLAST'.format(self.reference_gempro.id, strain_gempro.id))
r_vs_g, g_vs_r = ssbio.protein.sequence.utils.blast.run_bidirectional_blast(reference=r_file,
other_genome=g_file,
dbtype='prot',
outdir=self.sequences_by_organism_dir)
# Using the BLAST files, find the BBH
log.debug('{} vs {}: Finding BBHs'.format(self.reference_gempro.id, strain_gempro.id))
bbh = ssbio.protein.sequence.utils.blast.calculate_bbh(blast_results_1=r_vs_g, blast_results_2=g_vs_r,
outdir=self.sequences_by_organism_dir)
bbh_files[strain_gempro.id] = bbh
# Make the orthologous genes matrix
log.info('Creating orthology matrix from BBHs...')
ortho_matrix = ssbio.protein.sequence.utils.blast.create_orthology_matrix(r_name=self.reference_gempro.id,
genome_to_bbh_files=bbh_files,
pid_cutoff=pid_cutoff,
bitscore_cutoff=bitscore_cutoff,
evalue_cutoff=evalue_cutoff,
filter_condition=filter_condition,
outname='{}_{}_orthology.csv'.format(self.reference_gempro.id, 'prot'),
outdir=self.data_dir)
log.info('Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.'.format(ortho_matrix))
self.df_orthology_matrix = pd.read_csv(ortho_matrix, index_col=0)
# Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes
self._filter_orthology_matrix(remove_strains_with_no_orthology=remove_strains_with_no_orthology,
remove_strains_with_no_differences=remove_strains_with_no_differences,
remove_genes_not_in_base_model=remove_genes_not_in_base_model) | [
"def",
"get_orthology_matrix",
"(",
"self",
",",
"pid_cutoff",
"=",
"None",
",",
"bitscore_cutoff",
"=",
"None",
",",
"evalue_cutoff",
"=",
"None",
",",
"filter_condition",
"=",
"'OR'",
",",
"remove_strains_with_no_orthology",
"=",
"True",
",",
"remove_strains_with_no_differences",
"=",
"False",
",",
"remove_genes_not_in_base_model",
"=",
"True",
")",
":",
"# TODO: document and test other cutoffs",
"# Get the path to the reference genome",
"r_file",
"=",
"self",
".",
"reference_gempro",
".",
"genome_path",
"bbh_files",
"=",
"{",
"}",
"log",
".",
"info",
"(",
"'Running bidirectional BLAST and finding best bidirectional hits (BBH)...'",
")",
"for",
"strain_gempro",
"in",
"tqdm",
"(",
"self",
".",
"strains",
")",
":",
"g_file",
"=",
"strain_gempro",
".",
"genome_path",
"# Run bidirectional BLAST",
"log",
".",
"debug",
"(",
"'{} vs {}: Running bidirectional BLAST'",
".",
"format",
"(",
"self",
".",
"reference_gempro",
".",
"id",
",",
"strain_gempro",
".",
"id",
")",
")",
"r_vs_g",
",",
"g_vs_r",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"blast",
".",
"run_bidirectional_blast",
"(",
"reference",
"=",
"r_file",
",",
"other_genome",
"=",
"g_file",
",",
"dbtype",
"=",
"'prot'",
",",
"outdir",
"=",
"self",
".",
"sequences_by_organism_dir",
")",
"# Using the BLAST files, find the BBH",
"log",
".",
"debug",
"(",
"'{} vs {}: Finding BBHs'",
".",
"format",
"(",
"self",
".",
"reference_gempro",
".",
"id",
",",
"strain_gempro",
".",
"id",
")",
")",
"bbh",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"blast",
".",
"calculate_bbh",
"(",
"blast_results_1",
"=",
"r_vs_g",
",",
"blast_results_2",
"=",
"g_vs_r",
",",
"outdir",
"=",
"self",
".",
"sequences_by_organism_dir",
")",
"bbh_files",
"[",
"strain_gempro",
".",
"id",
"]",
"=",
"bbh",
"# Make the orthologous genes matrix",
"log",
".",
"info",
"(",
"'Creating orthology matrix from BBHs...'",
")",
"ortho_matrix",
"=",
"ssbio",
".",
"protein",
".",
"sequence",
".",
"utils",
".",
"blast",
".",
"create_orthology_matrix",
"(",
"r_name",
"=",
"self",
".",
"reference_gempro",
".",
"id",
",",
"genome_to_bbh_files",
"=",
"bbh_files",
",",
"pid_cutoff",
"=",
"pid_cutoff",
",",
"bitscore_cutoff",
"=",
"bitscore_cutoff",
",",
"evalue_cutoff",
"=",
"evalue_cutoff",
",",
"filter_condition",
"=",
"filter_condition",
",",
"outname",
"=",
"'{}_{}_orthology.csv'",
".",
"format",
"(",
"self",
".",
"reference_gempro",
".",
"id",
",",
"'prot'",
")",
",",
"outdir",
"=",
"self",
".",
"data_dir",
")",
"log",
".",
"info",
"(",
"'Saved orthology matrix at {}. See the \"df_orthology_matrix\" attribute.'",
".",
"format",
"(",
"ortho_matrix",
")",
")",
"self",
".",
"df_orthology_matrix",
"=",
"pd",
".",
"read_csv",
"(",
"ortho_matrix",
",",
"index_col",
"=",
"0",
")",
"# Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes",
"self",
".",
"_filter_orthology_matrix",
"(",
"remove_strains_with_no_orthology",
"=",
"remove_strains_with_no_orthology",
",",
"remove_strains_with_no_differences",
"=",
"remove_strains_with_no_differences",
",",
"remove_genes_not_in_base_model",
"=",
"remove_genes_not_in_base_model",
")"
] | 71.313433 | 47.149254 |
def password_length_needed(entropybits: Union[int, float], chars: str) -> int:
"""Calculate the length of a password for a given entropy and chars."""
if not isinstance(entropybits, (int, float)):
raise TypeError('entropybits can only be int or float')
if entropybits < 0:
raise ValueError('entropybits should be greater than 0')
if not isinstance(chars, str):
raise TypeError('chars can only be string')
if not chars:
raise ValueError("chars can't be null")
# entropy_bits(list(characters)) = 6.554588
entropy_c = entropy_bits(list(chars))
return ceil(entropybits / entropy_c) | [
"def",
"password_length_needed",
"(",
"entropybits",
":",
"Union",
"[",
"int",
",",
"float",
"]",
",",
"chars",
":",
"str",
")",
"->",
"int",
":",
"if",
"not",
"isinstance",
"(",
"entropybits",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"TypeError",
"(",
"'entropybits can only be int or float'",
")",
"if",
"entropybits",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'entropybits should be greater than 0'",
")",
"if",
"not",
"isinstance",
"(",
"chars",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'chars can only be string'",
")",
"if",
"not",
"chars",
":",
"raise",
"ValueError",
"(",
"\"chars can't be null\"",
")",
"# entropy_bits(list(characters)) = 6.554588",
"entropy_c",
"=",
"entropy_bits",
"(",
"list",
"(",
"chars",
")",
")",
"return",
"ceil",
"(",
"entropybits",
"/",
"entropy_c",
")"
] | 44.928571 | 14.714286 |
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
"""
# Deferred import
from pydev_ipython.inputhook import enable_gui as real_enable_gui
try:
return real_enable_gui(gui, app)
except ValueError as e:
raise UsageError("%s" % e) | [
"def",
"enable_gui",
"(",
"gui",
"=",
"None",
",",
"app",
"=",
"None",
")",
":",
"# Deferred import",
"from",
"pydev_ipython",
".",
"inputhook",
"import",
"enable_gui",
"as",
"real_enable_gui",
"try",
":",
"return",
"real_enable_gui",
"(",
"gui",
",",
"app",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"UsageError",
"(",
"\"%s\"",
"%",
"e",
")"
] | 35.444444 | 10.666667 |
def update(self, *args, **kwargs):
"""Update ConfigMap from mapping/iterable.
If the key exists the entry is updated else it is added.
Args:
*args: variable length argument list. A valid argument is a two item
tuple/list. The first item is the key and the second is the value.
**kwargs: Arbitrary keyword arguments representing the config.
"""
for k, v in args:
self[k] = v
for k, v in kwargs.items():
self[k] = v | [
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"k",
",",
"v",
"in",
"args",
":",
"self",
"[",
"k",
"]",
"=",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"self",
"[",
"k",
"]",
"=",
"v"
] | 36.857143 | 22.071429 |
def add_pooling_with_padding_types(builder, name, height, width, stride_height, stride_width,
layer_type, padding_type, input_name, output_name,
padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,
same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY',
exclude_pad_area = True, is_global = False):
"""
Add a pooling layer to the model.
This is our own implementation of add_pooling since current CoreML's version (0.5.0) of builder
doesn't provide support for padding types apart from valid. This support will be added in the
next release of coremltools. When that happens, this can be removed.
Parameters
----------
builder: NeuralNetworkBuilder
A neural network builder object.
name: str
The name of this layer.
height: int
Height of pooling region.
width: int
Number of elements to be padded on the right side of the input blob.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
layer_type: str
Type of pooling performed. Can either be 'MAX', 'AVERAGE' or 'L2'.
padding_type: str
Option for the output blob shape. Can be either 'VALID' , 'SAME' or 'INCLUDE_LAST_PIXEL'. Kindly look at NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if padding type is "VALID" or "INCLUDE_LAST_PIXEL"
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when padding_type = 'SAME'. Kindly look at NeuralNetwork.proto for details. Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
exclude_pad_area: boolean
Whether to exclude padded area in the pooling operation. Defaults to True.
- If True, the value of the padded area will be excluded.
- If False, the padded area will be included.
This flag is only used with average pooling.
is_global: boolean
Whether the pooling operation is global. Defaults to False.
- If True, the pooling operation is global -- the pooling region is of the same size of the input blob.
Parameters height, width, stride_height, stride_width will be ignored.
- If False, the pooling operation is not global.
See Also
--------
add_convolution, add_pooling, add_activation
"""
spec = builder.spec
nn_spec = builder.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.pooling
# Set the parameters
spec_layer_params.type = \
_NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Value(layer_type)
if padding_type == 'VALID':
height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = padding_top
height_border.endEdgeSize = padding_bottom
width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = padding_left
width_border.endEdgeSize = padding_right
elif padding_type == 'SAME':
if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):
raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode)
spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)
elif padding_type == 'INCLUDE_LAST_PIXEL':
if padding_top != padding_bottom or padding_left != padding_right:
raise ValueError("Only symmetric padding is supported with the INCLUDE_LAST_PIXEL padding type")
spec_layer_params.includeLastPixel.paddingAmounts.append(padding_top)
spec_layer_params.includeLastPixel.paddingAmounts.append(padding_left)
spec_layer_params.kernelSize.append(height)
spec_layer_params.kernelSize.append(width)
spec_layer_params.stride.append(stride_height)
spec_layer_params.stride.append(stride_width)
spec_layer_params.avgPoolExcludePadding = exclude_pad_area
spec_layer_params.globalPooling = is_global | [
"def",
"add_pooling_with_padding_types",
"(",
"builder",
",",
"name",
",",
"height",
",",
"width",
",",
"stride_height",
",",
"stride_width",
",",
"layer_type",
",",
"padding_type",
",",
"input_name",
",",
"output_name",
",",
"padding_top",
"=",
"0",
",",
"padding_bottom",
"=",
"0",
",",
"padding_left",
"=",
"0",
",",
"padding_right",
"=",
"0",
",",
"same_padding_asymmetry_mode",
"=",
"'BOTTOM_RIGHT_HEAVY'",
",",
"exclude_pad_area",
"=",
"True",
",",
"is_global",
"=",
"False",
")",
":",
"spec",
"=",
"builder",
".",
"spec",
"nn_spec",
"=",
"builder",
".",
"nn_spec",
"# Add a new layer\r",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"pooling",
"# Set the parameters\r",
"spec_layer_params",
".",
"type",
"=",
"_NeuralNetwork_pb2",
".",
"PoolingLayerParams",
".",
"PoolingType",
".",
"Value",
"(",
"layer_type",
")",
"if",
"padding_type",
"==",
"'VALID'",
":",
"height_border",
"=",
"spec_layer_params",
".",
"valid",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"height_border",
".",
"startEdgeSize",
"=",
"padding_top",
"height_border",
".",
"endEdgeSize",
"=",
"padding_bottom",
"width_border",
"=",
"spec_layer_params",
".",
"valid",
".",
"paddingAmounts",
".",
"borderAmounts",
".",
"add",
"(",
")",
"width_border",
".",
"startEdgeSize",
"=",
"padding_left",
"width_border",
".",
"endEdgeSize",
"=",
"padding_right",
"elif",
"padding_type",
"==",
"'SAME'",
":",
"if",
"not",
"(",
"same_padding_asymmetry_mode",
"==",
"'BOTTOM_RIGHT_HEAVY'",
"or",
"same_padding_asymmetry_mode",
"==",
"'TOP_LEFT_HEAVY'",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value %d of same_padding_asymmetry_mode parameter\"",
"%",
"same_padding_asymmetry_mode",
")",
"spec_layer_params",
".",
"same",
".",
"asymmetryMode",
"=",
"_NeuralNetwork_pb2",
".",
"SamePadding",
".",
"SamePaddingMode",
".",
"Value",
"(",
"same_padding_asymmetry_mode",
")",
"elif",
"padding_type",
"==",
"'INCLUDE_LAST_PIXEL'",
":",
"if",
"padding_top",
"!=",
"padding_bottom",
"or",
"padding_left",
"!=",
"padding_right",
":",
"raise",
"ValueError",
"(",
"\"Only symmetric padding is supported with the INCLUDE_LAST_PIXEL padding type\"",
")",
"spec_layer_params",
".",
"includeLastPixel",
".",
"paddingAmounts",
".",
"append",
"(",
"padding_top",
")",
"spec_layer_params",
".",
"includeLastPixel",
".",
"paddingAmounts",
".",
"append",
"(",
"padding_left",
")",
"spec_layer_params",
".",
"kernelSize",
".",
"append",
"(",
"height",
")",
"spec_layer_params",
".",
"kernelSize",
".",
"append",
"(",
"width",
")",
"spec_layer_params",
".",
"stride",
".",
"append",
"(",
"stride_height",
")",
"spec_layer_params",
".",
"stride",
".",
"append",
"(",
"stride_width",
")",
"spec_layer_params",
".",
"avgPoolExcludePadding",
"=",
"exclude_pad_area",
"spec_layer_params",
".",
"globalPooling",
"=",
"is_global"
] | 46.142857 | 27.693878 |
def spkssb(targ, et, ref):
"""
Return the state (position and velocity) of a target body
relative to the solar system barycenter.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkssb_c.html
:param targ: Target body.
:type targ: int
:param et: Target epoch.
:type et: float
:param ref: Target reference frame.
:type ref: str
:return: State of target.
:rtype: 6-Element Array of floats
"""
targ = ctypes.c_int(targ)
et = ctypes.c_double(et)
ref = stypes.stringToCharP(ref)
starg = stypes.emptyDoubleVector(6)
libspice.spkssb_c(targ, et, ref, starg)
return stypes.cVectorToPython(starg) | [
"def",
"spkssb",
"(",
"targ",
",",
"et",
",",
"ref",
")",
":",
"targ",
"=",
"ctypes",
".",
"c_int",
"(",
"targ",
")",
"et",
"=",
"ctypes",
".",
"c_double",
"(",
"et",
")",
"ref",
"=",
"stypes",
".",
"stringToCharP",
"(",
"ref",
")",
"starg",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"6",
")",
"libspice",
".",
"spkssb_c",
"(",
"targ",
",",
"et",
",",
"ref",
",",
"starg",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"starg",
")"
] | 29.545455 | 13 |
def _run_cheroot(app, config, mode):
"""Run WsgiDAV using cheroot.server if Cheroot is installed."""
assert mode == "cheroot"
try:
from cheroot import server, wsgi
# from cheroot.ssl.builtin import BuiltinSSLAdapter
# import cheroot.ssl.pyopenssl
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import Cheroot.")
_logger.error(
"Try `pip install cheroot` or specify another server using the --server option."
)
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgi.Server.version, util.PYTHON_VERSION
)
wsgi.Server.version = server_name
# Support SSL
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
ssl_adapter = config.get("ssl_adapter", "builtin")
protocol = "http"
if ssl_certificate and ssl_private_key:
ssl_adapter = server.get_ssl_adapter_class(ssl_adapter)
wsgi.Server.ssl_adapter = ssl_adapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled. Adapter: {}".format(ssl_adapter))
elif ssl_certificate or ssl_private_key:
raise RuntimeError(
"Option 'ssl_certificate' and 'ssl_private_key' must be used together."
)
# elif ssl_adapter:
# print("WARNING: Ignored option 'ssl_adapter' (requires 'ssl_certificate').")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
}
# Override or add custom args
server_args.update(config.get("server_args", {}))
server = wsgi.Server(**server_args)
# If the caller passed a startup event, monkey patch the server to set it
# when the request handler loop is entered
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick # undo the monkey patch
_logger.info("wsgi.Server is ready")
startup_event.set()
org_tick()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return | [
"def",
"_run_cheroot",
"(",
"app",
",",
"config",
",",
"mode",
")",
":",
"assert",
"mode",
"==",
"\"cheroot\"",
"try",
":",
"from",
"cheroot",
"import",
"server",
",",
"wsgi",
"# from cheroot.ssl.builtin import BuiltinSSLAdapter",
"# import cheroot.ssl.pyopenssl",
"except",
"ImportError",
":",
"_logger",
".",
"error",
"(",
"\"*\"",
"*",
"78",
")",
"_logger",
".",
"error",
"(",
"\"ERROR: Could not import Cheroot.\"",
")",
"_logger",
".",
"error",
"(",
"\"Try `pip install cheroot` or specify another server using the --server option.\"",
")",
"_logger",
".",
"error",
"(",
"\"*\"",
"*",
"78",
")",
"raise",
"server_name",
"=",
"\"WsgiDAV/{} {} Python/{}\"",
".",
"format",
"(",
"__version__",
",",
"wsgi",
".",
"Server",
".",
"version",
",",
"util",
".",
"PYTHON_VERSION",
")",
"wsgi",
".",
"Server",
".",
"version",
"=",
"server_name",
"# Support SSL",
"ssl_certificate",
"=",
"_get_checked_path",
"(",
"config",
".",
"get",
"(",
"\"ssl_certificate\"",
")",
",",
"config",
")",
"ssl_private_key",
"=",
"_get_checked_path",
"(",
"config",
".",
"get",
"(",
"\"ssl_private_key\"",
")",
",",
"config",
")",
"ssl_certificate_chain",
"=",
"_get_checked_path",
"(",
"config",
".",
"get",
"(",
"\"ssl_certificate_chain\"",
")",
",",
"config",
")",
"ssl_adapter",
"=",
"config",
".",
"get",
"(",
"\"ssl_adapter\"",
",",
"\"builtin\"",
")",
"protocol",
"=",
"\"http\"",
"if",
"ssl_certificate",
"and",
"ssl_private_key",
":",
"ssl_adapter",
"=",
"server",
".",
"get_ssl_adapter_class",
"(",
"ssl_adapter",
")",
"wsgi",
".",
"Server",
".",
"ssl_adapter",
"=",
"ssl_adapter",
"(",
"ssl_certificate",
",",
"ssl_private_key",
",",
"ssl_certificate_chain",
")",
"protocol",
"=",
"\"https\"",
"_logger",
".",
"info",
"(",
"\"SSL / HTTPS enabled. Adapter: {}\"",
".",
"format",
"(",
"ssl_adapter",
")",
")",
"elif",
"ssl_certificate",
"or",
"ssl_private_key",
":",
"raise",
"RuntimeError",
"(",
"\"Option 'ssl_certificate' and 'ssl_private_key' must be used together.\"",
")",
"# elif ssl_adapter:",
"# print(\"WARNING: Ignored option 'ssl_adapter' (requires 'ssl_certificate').\")",
"_logger",
".",
"info",
"(",
"\"Running {}\"",
".",
"format",
"(",
"server_name",
")",
")",
"_logger",
".",
"info",
"(",
"\"Serving on {}://{}:{} ...\"",
".",
"format",
"(",
"protocol",
",",
"config",
"[",
"\"host\"",
"]",
",",
"config",
"[",
"\"port\"",
"]",
")",
")",
"server_args",
"=",
"{",
"\"bind_addr\"",
":",
"(",
"config",
"[",
"\"host\"",
"]",
",",
"config",
"[",
"\"port\"",
"]",
")",
",",
"\"wsgi_app\"",
":",
"app",
",",
"\"server_name\"",
":",
"server_name",
",",
"}",
"# Override or add custom args",
"server_args",
".",
"update",
"(",
"config",
".",
"get",
"(",
"\"server_args\"",
",",
"{",
"}",
")",
")",
"server",
"=",
"wsgi",
".",
"Server",
"(",
"*",
"*",
"server_args",
")",
"# If the caller passed a startup event, monkey patch the server to set it",
"# when the request handler loop is entered",
"startup_event",
"=",
"config",
".",
"get",
"(",
"\"startup_event\"",
")",
"if",
"startup_event",
":",
"def",
"_patched_tick",
"(",
")",
":",
"server",
".",
"tick",
"=",
"org_tick",
"# undo the monkey patch",
"_logger",
".",
"info",
"(",
"\"wsgi.Server is ready\"",
")",
"startup_event",
".",
"set",
"(",
")",
"org_tick",
"(",
")",
"org_tick",
"=",
"server",
".",
"tick",
"server",
".",
"tick",
"=",
"_patched_tick",
"try",
":",
"server",
".",
"start",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"_logger",
".",
"warning",
"(",
"\"Caught Ctrl-C, shutting down...\"",
")",
"finally",
":",
"server",
".",
"stop",
"(",
")",
"return"
] | 33.725 | 21.6375 |
def get_built_image_info(self):
"""
query docker about built image
:return dict
"""
logger.info("getting information about built image '%s'", self.image)
image_info = self.tasker.get_image_info_by_image_name(self.image)
items_count = len(image_info)
if items_count == 1:
return image_info[0]
elif items_count <= 0:
logger.error("image '%s' not found", self.image)
raise RuntimeError("image '%s' not found" % self.image)
else:
logger.error("multiple (%d) images found for image '%s'", items_count, self.image)
raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count,
self.image)) | [
"def",
"get_built_image_info",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"getting information about built image '%s'\"",
",",
"self",
".",
"image",
")",
"image_info",
"=",
"self",
".",
"tasker",
".",
"get_image_info_by_image_name",
"(",
"self",
".",
"image",
")",
"items_count",
"=",
"len",
"(",
"image_info",
")",
"if",
"items_count",
"==",
"1",
":",
"return",
"image_info",
"[",
"0",
"]",
"elif",
"items_count",
"<=",
"0",
":",
"logger",
".",
"error",
"(",
"\"image '%s' not found\"",
",",
"self",
".",
"image",
")",
"raise",
"RuntimeError",
"(",
"\"image '%s' not found\"",
"%",
"self",
".",
"image",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"multiple (%d) images found for image '%s'\"",
",",
"items_count",
",",
"self",
".",
"image",
")",
"raise",
"RuntimeError",
"(",
"\"multiple (%d) images found for image '%s'\"",
"%",
"(",
"items_count",
",",
"self",
".",
"image",
")",
")"
] | 44.555556 | 22.333333 |
def jackknife_indexes(data):
"""
Given data points data, where axis 0 is considered to delineate points, return
a list of arrays where each array is a set of jackknife indexes.
For a given set of data Y, the jackknife sample J[i] is defined as the data set
Y with the ith data point deleted.
"""
base = np.arange(0,len(data))
return (np.delete(base,i) for i in base) | [
"def",
"jackknife_indexes",
"(",
"data",
")",
":",
"base",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"len",
"(",
"data",
")",
")",
"return",
"(",
"np",
".",
"delete",
"(",
"base",
",",
"i",
")",
"for",
"i",
"in",
"base",
")"
] | 37.4 | 17 |
def PathCollection(mode="agg", *args, **kwargs):
"""
mode: string
- "raw" (speed: fastest, size: small, output: ugly, no dash,
no thickness)
- "agg" (speed: medium, size: medium output: nice, some flaws, no dash)
- "agg+" (speed: slow, size: big, output: perfect, no dash)
"""
if mode == "raw":
return RawPathCollection(*args, **kwargs)
elif mode == "agg+":
return AggPathCollection(*args, **kwargs)
return AggFastPathCollection(*args, **kwargs) | [
"def",
"PathCollection",
"(",
"mode",
"=",
"\"agg\"",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"mode",
"==",
"\"raw\"",
":",
"return",
"RawPathCollection",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"elif",
"mode",
"==",
"\"agg+\"",
":",
"return",
"AggPathCollection",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"AggFastPathCollection",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 36.642857 | 16.928571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.