text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def viewport(self) -> Tuple[int, int, int, int]:
'''
tuple: The viewport of the window.
'''
return self.wnd.viewport | [
"def",
"viewport",
"(",
"self",
")",
"->",
"Tuple",
"[",
"int",
",",
"int",
",",
"int",
",",
"int",
"]",
":",
"return",
"self",
".",
"wnd",
".",
"viewport"
] | 24.666667 | 20 |
def encoding(encoding=True):
"""DEPRECATED: use pynvim.decode()."""
if isinstance(encoding, str):
encoding = True
def dec(f):
f._nvim_decode = encoding
return f
return dec | [
"def",
"encoding",
"(",
"encoding",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"encoding",
",",
"str",
")",
":",
"encoding",
"=",
"True",
"def",
"dec",
"(",
"f",
")",
":",
"f",
".",
"_nvim_decode",
"=",
"encoding",
"return",
"f",
"return",
"dec"... | 22.666667 | 17.555556 |
def droit_d_accise(depense, droit_cn, consommation_cn, taux_plein_tva):
"""
Calcule le montant de droit d'accise sur un volume de dépense payé pour le poste adéquat.
"""
return depense * ((1 + taux_plein_tva) * droit_cn) / (consommation_cn - (1 + taux_plein_tva) * droit_cn) | [
"def",
"droit_d_accise",
"(",
"depense",
",",
"droit_cn",
",",
"consommation_cn",
",",
"taux_plein_tva",
")",
":",
"return",
"depense",
"*",
"(",
"(",
"1",
"+",
"taux_plein_tva",
")",
"*",
"droit_cn",
")",
"/",
"(",
"consommation_cn",
"-",
"(",
"1",
"+",
... | 57.2 | 30.4 |
def _adjust_sell_amount(self, stock_code, amount):
"""
根据实际持仓值计算雪球卖出股数
因为雪球的交易指令是基于持仓百分比,在取近似值的情况下可能出现不精确的问题。
导致如下情况的产生,计算出的指令为买入 1049 股,取近似值买入 1000 股。
而卖出的指令计算出为卖出 1051 股,取近似值卖出 1100 股,超过 1000 股的买入量,
导致卖出失败
:param stock_code: 证券代码
:type stock_code: str
:param amount: 卖出股份数
:type amount: int
:return: 考虑实际持仓之后的卖出股份数
:rtype: int
"""
stock_code = stock_code[-6:]
user = self._users[0]
position = user.position
try:
stock = next(s for s in position if s['证券代码'] == stock_code)
except StopIteration:
log.info('根据持仓调整 %s 卖出额,发现未持有股票 %s, 不做任何调整',
stock_code, stock_code)
return amount
available_amount = stock['可用余额']
if available_amount >= amount:
return amount
adjust_amount = available_amount // 100 * 100
log.info('股票 %s 实际可用余额 %s, 指令卖出股数为 %s, 调整为 %s',
stock_code, available_amount, amount, adjust_amount)
return adjust_amount | [
"def",
"_adjust_sell_amount",
"(",
"self",
",",
"stock_code",
",",
"amount",
")",
":",
"stock_code",
"=",
"stock_code",
"[",
"-",
"6",
":",
"]",
"user",
"=",
"self",
".",
"_users",
"[",
"0",
"]",
"position",
"=",
"user",
".",
"position",
"try",
":",
... | 33.375 | 14.3125 |
def _make_ta_service_dict(self):
"""
Build our service and limits dict. This is laid out identical to
``self.all_services``, but keys limits by their ``ta_service_name``
and ``ta_limit_name`` properties.
:return: dict of TA service names to TA limit names to AwsLimit objects.
"""
res = {}
for svc_name in self.all_services:
svc_obj = self.all_services[svc_name]
for lim_name, lim in svc_obj.get_limits().items():
if lim.ta_service_name not in res:
res[lim.ta_service_name] = {}
res[lim.ta_service_name][lim.ta_limit_name] = lim
return res | [
"def",
"_make_ta_service_dict",
"(",
"self",
")",
":",
"res",
"=",
"{",
"}",
"for",
"svc_name",
"in",
"self",
".",
"all_services",
":",
"svc_obj",
"=",
"self",
".",
"all_services",
"[",
"svc_name",
"]",
"for",
"lim_name",
",",
"lim",
"in",
"svc_obj",
"."... | 42.0625 | 17.4375 |
def _GetLineNumbers(code_object):
"""Generator for getting the line numbers of a code object.
Args:
code_object: the code object.
Yields:
The next line number in the code object.
"""
# Get the line number deltas, which are the odd number entries, from the
# lnotab. See
# https://svn.python.org/projects/python/branches/pep-0384/Objects/lnotab_notes.txt
# In Python 3, this is just a byte array. In Python 2 it is a string so the
# numerical values have to be extracted from the individual characters.
if six.PY3:
line_incrs = code_object.co_lnotab[1::2]
else:
line_incrs = (ord(c) for c in code_object.co_lnotab[1::2])
current_line = code_object.co_firstlineno
for line_incr in line_incrs:
current_line += line_incr
yield current_line | [
"def",
"_GetLineNumbers",
"(",
"code_object",
")",
":",
"# Get the line number deltas, which are the odd number entries, from the",
"# lnotab. See",
"# https://svn.python.org/projects/python/branches/pep-0384/Objects/lnotab_notes.txt",
"# In Python 3, this is just a byte array. In Python 2 it is a... | 34.818182 | 21.090909 |
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw) | [
"def",
"new_datetime",
"(",
"d",
")",
":",
"kw",
"=",
"[",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
"]",
"if",
"isinstance",
"(",
"d",
",",
"real_datetime",
")",
":",
"kw",
".",
"extend",
"(",
"[",
"d",
".",
"hour",
","... | 34.625 | 14.625 |
def find(self, path, match, flags):
""" find every matching child path under path """
try:
match = re.compile(match, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
offset = len(path)
for cpath in Tree(self, path).get():
if match.search(cpath[offset:]):
yield cpath | [
"def",
"find",
"(",
"self",
",",
"path",
",",
"match",
",",
"flags",
")",
":",
"try",
":",
"match",
"=",
"re",
".",
"compile",
"(",
"match",
",",
"flags",
")",
"except",
"sre_constants",
".",
"error",
"as",
"ex",
":",
"print",
"(",
"\"Bad regexp: %s\... | 32.5 | 11.416667 |
def _check_valid_data(self, data):
"""Checks that the given data is a float array with one channel.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to check.
Raises
------
ValueError
If the data is invalid.
"""
if data.dtype.type is not np.float32 and \
data.dtype.type is not np.float64:
raise ValueError(
'Illegal data type. Depth images only support float arrays')
if len(data.shape) == 3 and data.shape[2] != 1:
raise ValueError(
'Illegal data type. Depth images only support single channel') | [
"def",
"_check_valid_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
".",
"dtype",
".",
"type",
"is",
"not",
"np",
".",
"float32",
"and",
"data",
".",
"dtype",
".",
"type",
"is",
"not",
"np",
".",
"float64",
":",
"raise",
"ValueError",
"(",
... | 31.714286 | 18.809524 |
def distance(x,y):
"""[summary]
HELPER-FUNCTION
calculates the (eulidean) distance between vector x and y.
Arguments:
x {[tuple]} -- [vector]
y {[tuple]} -- [vector]
"""
assert len(x) == len(y), "The vector must have same length"
result = ()
sum = 0
for i in range(len(x)):
result += (x[i] -y[i],)
for component in result:
sum += component**2
return math.sqrt(sum) | [
"def",
"distance",
"(",
"x",
",",
"y",
")",
":",
"assert",
"len",
"(",
"x",
")",
"==",
"len",
"(",
"y",
")",
",",
"\"The vector must have same length\"",
"result",
"=",
"(",
")",
"sum",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x",
"... | 25 | 16.941176 |
def parse_chromosome_string(job, chromosome_string):
"""
Parse a chromosome string into a list.
:param chromosome_string: Input chromosome string
:return: list of chromosomes to handle
:rtype: list
"""
if chromosome_string is None:
return []
else:
assert isinstance(chromosome_string, str)
chroms = [c.strip() for c in chromosome_string.split(',')]
if 'canonical' in chroms:
assert 'canonical_chr' not in chroms, 'Cannot have canonical and canonical_chr'
chr_prefix = False
chroms.remove('canonical')
out_chroms = [str(c) for c in range(1, 23)] + ['X', 'Y']
elif 'canonical_chr' in chroms:
assert 'canonical' not in chroms, 'Cannot have canonical and canonical_chr'
chr_prefix = True
chroms.remove('canonical_chr')
out_chroms = ['chr' + str(c) for c in range(1, 23)] + ['chrX', 'chrY']
else:
chr_prefix = None
out_chroms = []
for chrom in chroms:
if chr_prefix is not None and chrom.startswith('chr') is not chr_prefix:
job.fileStore.logToMaster('chromosome %s does not match the rest that %s begin '
'with "chr".' % (chrom,
'all' if chr_prefix else 'don\'t'),
level=logging.WARNING)
out_chroms.append(chrom)
return chrom_sorted(out_chroms) | [
"def",
"parse_chromosome_string",
"(",
"job",
",",
"chromosome_string",
")",
":",
"if",
"chromosome_string",
"is",
"None",
":",
"return",
"[",
"]",
"else",
":",
"assert",
"isinstance",
"(",
"chromosome_string",
",",
"str",
")",
"chroms",
"=",
"[",
"c",
".",
... | 44.323529 | 19.441176 |
def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | [
"def",
"fof",
"(",
"self",
",",
"linkinglength",
",",
"out",
"=",
"None",
",",
"method",
"=",
"'splay'",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"numpy",
".",
"empty",
"(",
"self",
".",
"size",
",",
"dtype",
"=",
"'intp'",
")",
"ret... | 37.25 | 15.125 |
def flush_synced(self, index=None, params=None):
"""
Perform a normal flush, then add a generated unique marker (sync_id) to all shards.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string for all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
"""
return self.transport.perform_request(
"POST", _make_path(index, "_flush", "synced"), params=params
) | [
"def",
"flush_synced",
"(",
"self",
",",
"index",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"POST\"",
",",
"_make_path",
"(",
"index",
",",
"\"_flush\"",
",",
"\"synced\"",
")",
... | 55.263158 | 26.421053 |
def _pfp__snapshot(self, recurse=True):
"""Save off the current value of the field
"""
super(Struct, self)._pfp__snapshot(recurse=recurse)
if recurse:
for child in self._pfp__children:
child._pfp__snapshot(recurse=recurse) | [
"def",
"_pfp__snapshot",
"(",
"self",
",",
"recurse",
"=",
"True",
")",
":",
"super",
"(",
"Struct",
",",
"self",
")",
".",
"_pfp__snapshot",
"(",
"recurse",
"=",
"recurse",
")",
"if",
"recurse",
":",
"for",
"child",
"in",
"self",
".",
"_pfp__children",
... | 34.5 | 12.375 |
def _get_descendent_cat_idstrs(self, cat_id, hierarchy_session=None):
"""Recursively returns a list of all descendent catalog ids, inclusive"""
def get_descendent_ids(h_session):
idstr_list = [str(cat_id)]
if h_session is None:
pkg_name = cat_id.get_identifier_namespace().split('.')[0]
cat_name = cat_id.get_identifier_namespace().split('.')[1]
try:
mgr = self._get_provider_manager('HIERARCHY')
h_session = mgr.get_hierarchy_traversal_session_for_hierarchy(
Id(authority=pkg_name.upper(),
namespace='CATALOG',
identifier=cat_name.upper()),
proxy=self._proxy)
except (errors.OperationFailed, errors.Unsupported):
return idstr_list # there is no hierarchy
if h_session.has_children(cat_id):
for child_id in h_session.get_children(cat_id):
idstr_list += self._get_descendent_cat_idstrs(child_id, h_session)
return list(set(idstr_list))
use_caching = False
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:useCachingForQualifierIds@json')
if config.get_value_by_parameter(parameter_id).get_boolean_value():
use_caching = True
else:
pass
except (AttributeError, KeyError, errors.NotFound):
pass
if use_caching:
key = 'descendent-catalog-ids-{0}'.format(str(cat_id))
# If configured to use memcache as the caching engine, use it.
# Otherwise default to diskcache
caching_engine = 'diskcache'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingEngine@json')
caching_engine = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
if caching_engine == 'memcache':
import memcache
caching_host = '127.0.0.1:11211'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingHostURI@json')
caching_host = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
mc = memcache.Client([caching_host], debug=0)
catalog_ids = mc.get(key)
if catalog_ids is None:
catalog_ids = get_descendent_ids(hierarchy_session)
mc.set(key, catalog_ids)
elif caching_engine == 'diskcache':
import diskcache
with diskcache.Cache('/tmp/dlkit_cache') as cache:
# A little bit non-DRY, since it's almost the same as for memcache above.
# However, for diskcache.Cache, we have to call ".close()" or use a
# ``with`` statement to safeguard calling ".close()", so we keep this
# separate from the memcache implementation.
catalog_ids = cache.get(key)
if catalog_ids is None:
catalog_ids = get_descendent_ids(hierarchy_session)
cache.set(key, catalog_ids)
else:
raise errors.NotFound('The {0} caching engine was not found.'.format(caching_engine))
else:
catalog_ids = get_descendent_ids(hierarchy_session)
return catalog_ids | [
"def",
"_get_descendent_cat_idstrs",
"(",
"self",
",",
"cat_id",
",",
"hierarchy_session",
"=",
"None",
")",
":",
"def",
"get_descendent_ids",
"(",
"h_session",
")",
":",
"idstr_list",
"=",
"[",
"str",
"(",
"cat_id",
")",
"]",
"if",
"h_session",
"is",
"None"... | 48.506494 | 22.571429 |
def send_batch(messages, api_key=None, secure=None, test=None, **request_args):
'''Send a batch of messages.
:param messages: Messages to send.
:type message: A list of `dict` or :class:`Message`
:param api_key: Your Postmark API key. Required, if `test` is not `True`.
:param secure: Use the https scheme for the Postmark API.
Defaults to `True`
:param test: Use the Postmark Test API. Defaults to `False`.
:param \*\*request_args: Keyword arguments to pass to
:func:`requests.request`.
:rtype: :class:`BatchSendResponse`
'''
return _default_pyst_batch_sender.send(messages=messages, api_key=api_key,
secure=secure, test=test,
**request_args) | [
"def",
"send_batch",
"(",
"messages",
",",
"api_key",
"=",
"None",
",",
"secure",
"=",
"None",
",",
"test",
"=",
"None",
",",
"*",
"*",
"request_args",
")",
":",
"return",
"_default_pyst_batch_sender",
".",
"send",
"(",
"messages",
"=",
"messages",
",",
... | 48.1875 | 21.4375 |
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens to have nice filenames.
From Django's "django/template/defaultfilters.py".
>>> slugify("El pingüino Wenceslao hizo kilómetros bajo exhaustiva lluvia y frío, añoraba a su querido cachorro. ortez ce vieux whisky au juge blond qui fume sur son île intérieure, à Γαζέες καὶ μυρτιὲς δὲν θὰ βρῶ πιὰ στὸ χρυσαφὶ ξέφωτο いろはにほへとちりぬるを Pchnąć w tę łódź jeża lub ośm skrzyń fig กว่าบรรดาฝูงสัตว์เดรัจฉาน")
'El_pinguino_Wenceslao_hizo_kilometros_bajo_exhaustiva_lluvia_y_frio_anoraba_a_su_querido_cachorro_ortez_ce_vieux_whisky_au_juge_blond_qui_fume_sur_son_ile_interieure_a_Pchnac_w_te_odz_jeza_lub_osm_skrzyn_fig'
"""
try:
unicode_type = unicode
except NameError:
unicode_type = str
if not isinstance(value, unicode_type):
value = unicode_type(value)
value = (unicodedata.normalize('NFKD', value).
encode('ascii', 'ignore').decode('ascii'))
value = unicode_type(_SLUGIFY_STRIP_RE.sub('', value).strip())
return _SLUGIFY_HYPHENATE_RE.sub('_', value) | [
"def",
"slugify",
"(",
"value",
")",
":",
"try",
":",
"unicode_type",
"=",
"unicode",
"except",
"NameError",
":",
"unicode_type",
"=",
"str",
"if",
"not",
"isinstance",
"(",
"value",
",",
"unicode_type",
")",
":",
"value",
"=",
"unicode_type",
"(",
"value"... | 56.85 | 38.25 |
def config_cmd():
"""Configuration handling.
Other Parameters:
conf.config
"""
if not (conf.common.config or conf.config.create or
conf.config.create_local or conf.config.update or
conf.config.edit):
config_pp(conf.sections_())
loam.tools.config_cmd_handler(conf) | [
"def",
"config_cmd",
"(",
")",
":",
"if",
"not",
"(",
"conf",
".",
"common",
".",
"config",
"or",
"conf",
".",
"config",
".",
"create",
"or",
"conf",
".",
"config",
".",
"create_local",
"or",
"conf",
".",
"config",
".",
"update",
"or",
"conf",
".",
... | 28.545455 | 14.090909 |
def playlist(netease, name, id):
"""Download a playlist's songs by id."""
if name:
netease.download_playlist_by_search(name)
if id:
netease.download_playlist_by_id(id, 'playlist'+str(id)) | [
"def",
"playlist",
"(",
"netease",
",",
"name",
",",
"id",
")",
":",
"if",
"name",
":",
"netease",
".",
"download_playlist_by_search",
"(",
"name",
")",
"if",
"id",
":",
"netease",
".",
"download_playlist_by_id",
"(",
"id",
",",
"'playlist'",
"+",
"str",
... | 30 | 19.714286 |
def get_date_data(self, date_string, date_formats=None):
"""
Parse string representing date and/or time in recognizable localized formats.
Supports parsing multiple languages and timezones.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages.
:type date_formats: list
:return: a dict mapping keys to :mod:`datetime.datetime` object and *period*. For example:
{'date_obj': datetime.datetime(2015, 6, 1, 0, 0), 'period': u'day'}
:raises: ValueError - Unknown Language
.. note:: *Period* values can be a 'day' (default), 'week', 'month', 'year'.
*Period* represents the granularity of date parsed from the given string.
In the example below, since no day information is present, the day is assumed to be current
day ``16`` from *current date* (which is June 16, 2015, at the moment of writing this).
Hence, the level of precision is ``month``:
>>> DateDataParser().get_date_data(u'March 2015')
{'date_obj': datetime.datetime(2015, 3, 16, 0, 0), 'period': u'month'}
Similarly, for date strings with no day and month information present, level of precision
is ``year`` and day ``16`` and month ``6`` are from *current_date*.
>>> DateDataParser().get_date_data(u'2014')
{'date_obj': datetime.datetime(2014, 6, 16, 0, 0), 'period': u'year'}
Dates with time zone indications or UTC offsets are returned in UTC time unless
specified using `Settings`_.
>>> DateDataParser().get_date_data(u'23 March 2000, 1:21 PM CET')
{'date_obj': datetime.datetime(2000, 3, 23, 14, 21), 'period': 'day'}
"""
if not(isinstance(date_string, six.text_type) or isinstance(date_string, six.string_types)):
raise TypeError('Input type must be str or unicode')
if isinstance(date_string, bytes):
date_string = date_string.decode('utf-8')
res = parse_with_formats(date_string, date_formats or [], self._settings)
if res['date_obj']:
return res
date_string = sanitize_date(date_string)
for locale in self._get_applicable_locales(date_string):
parsed_date = _DateLocaleParser.parse(
locale, date_string, date_formats, settings=self._settings)
if parsed_date:
parsed_date['locale'] = locale.shortname
if self.try_previous_locales:
self.previous_locales.insert(0, locale)
return parsed_date
else:
return {'date_obj': None, 'period': 'day', 'locale': None} | [
"def",
"get_date_data",
"(",
"self",
",",
"date_string",
",",
"date_formats",
"=",
"None",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"date_string",
",",
"six",
".",
"text_type",
")",
"or",
"isinstance",
"(",
"date_string",
",",
"six",
".",
"string_typ... | 45.846154 | 30 |
def delete(self, **args):
'''
Delete a gist by gistname/gistID
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Provide GistName to delete')
url = 'gists'
if self.gist_id:
r = requests.delete(
'%s/%s/%s'%(BASE_URL,url,self.gist_id),
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'id': self.gist_id,
}
return response
raise Exception('Can not delete gist') | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"args",
")",
":",
"if",
"'name'",
"in",
"args",
":",
"self",
".",
"gist_name",
"=",
"args",
"[",
"'name'",
"]",
"self",
".",
"gist_id",
"=",
"self",
".",
"getMyID",
"(",
"self",
".",
"gist_name",
")",
... | 20.576923 | 20.730769 |
def list_inputs(self):
"""Return a string listing all the Step's input names and their types.
The types are returned in a copy/pastable format, so if the type is
`string`, `'string'` (with single quotes) is returned.
Returns:
str containing all input names and types.
"""
doc = []
for inp, typ in self.input_types.items():
if isinstance(typ, six.string_types):
typ = "'{}'".format(typ)
doc.append('{}: {}'.format(inp, typ))
return '\n'.join(doc) | [
"def",
"list_inputs",
"(",
"self",
")",
":",
"doc",
"=",
"[",
"]",
"for",
"inp",
",",
"typ",
"in",
"self",
".",
"input_types",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"typ",
",",
"six",
".",
"string_types",
")",
":",
"typ",
"=",
"\"... | 36.6 | 16.933333 |
def get_group_policy(self, group_name, policy_name):
"""
Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'GroupName' : group_name,
'PolicyName' : policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST') | [
"def",
"get_group_policy",
"(",
"self",
",",
"group_name",
",",
"policy_name",
")",
":",
"params",
"=",
"{",
"'GroupName'",
":",
"group_name",
",",
"'PolicyName'",
":",
"policy_name",
"}",
"return",
"self",
".",
"get_response",
"(",
"'GetGroupPolicy'",
",",
"p... | 36.571429 | 18.857143 |
def folder_shared_message(self, request, user, folder):
"""
Send messages.success message after successful share.
"""
messages.success(request, _("Folder {} is now shared with {}".format(folder, user))) | [
"def",
"folder_shared_message",
"(",
"self",
",",
"request",
",",
"user",
",",
"folder",
")",
":",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"\"Folder {} is now shared with {}\"",
".",
"format",
"(",
"folder",
",",
"user",
")",
")",
")"
] | 46 | 17.6 |
def from_file( cls, filename ):
"""
Create a VASPMeta object by reading a `vaspmeta.yaml` file
Args:
filename (Str): filename to read in.
Returns:
(vasppy.VASPMeta): the VASPMeta object
"""
with open( filename, 'r' ) as stream:
data = yaml.load( stream, Loader=yaml.SafeLoader )
notes = data.get( 'notes' )
v_type = data.get( 'type' )
track = data.get( 'track' )
xargs = {}
if track:
if type( track ) is str:
track = [ track ]
xargs['track'] = track
vaspmeta = VASPMeta( data['title'],
data['description'],
data['status'],
notes=notes,
type=v_type,
**xargs )
return vaspmeta | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"stream",
":",
"data",
"=",
"yaml",
".",
"load",
"(",
"stream",
",",
"Loader",
"=",
"yaml",
".",
"SafeLoader",
")",
"notes",
"=",
"da... | 34.666667 | 11.703704 |
def partial(cls, id, token, *, adapter):
"""Creates a partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
Parameters
-----------
id: :class:`int`
The ID of the webhook.
token: :class:`str`
The authentication token of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for ``aiohttp`` or
:class:`RequestsWebhookAdapter` for ``requests``.
"""
if not isinstance(adapter, WebhookAdapter):
raise TypeError('adapter must be a subclass of WebhookAdapter')
data = {
'id': id,
'token': token
}
return cls(data, adapter=adapter) | [
"def",
"partial",
"(",
"cls",
",",
"id",
",",
"token",
",",
"*",
",",
"adapter",
")",
":",
"if",
"not",
"isinstance",
"(",
"adapter",
",",
"WebhookAdapter",
")",
":",
"raise",
"TypeError",
"(",
"'adapter must be a subclass of WebhookAdapter'",
")",
"data",
"... | 31.653846 | 20.692308 |
def _get_running_apps(self, instance, requests_config):
"""
Determine what mode was specified
"""
tags = instance.get('tags', [])
if tags is None:
tags = []
master_address = self._get_master_address(instance)
# Get the cluster name from the instance configuration
cluster_name = instance.get('cluster_name')
if cluster_name is None:
raise Exception('The cluster_name must be specified in the instance configuration')
tags.append('cluster_name:%s' % cluster_name)
tags = list(set(tags))
# Determine the cluster mode
cluster_mode = instance.get(SPARK_CLUSTER_MODE)
if cluster_mode is None:
self.log.warning(
'The value for `spark_cluster_mode` was not set in the configuration. '
'Defaulting to "%s"' % SPARK_YARN_MODE
)
cluster_mode = SPARK_YARN_MODE
if cluster_mode == SPARK_STANDALONE_MODE:
# check for PRE-20
pre20 = is_affirmative(instance.get(SPARK_PRE_20_MODE, False))
return self._standalone_init(master_address, pre20, requests_config, tags)
elif cluster_mode == SPARK_MESOS_MODE:
running_apps = self._mesos_init(instance, master_address, requests_config, tags)
return self._get_spark_app_ids(running_apps, requests_config, tags)
elif cluster_mode == SPARK_YARN_MODE:
running_apps = self._yarn_init(master_address, requests_config, tags)
return self._get_spark_app_ids(running_apps, requests_config, tags)
else:
raise Exception('Invalid setting for %s. Received %s.' % (SPARK_CLUSTER_MODE, cluster_mode)) | [
"def",
"_get_running_apps",
"(",
"self",
",",
"instance",
",",
"requests_config",
")",
":",
"tags",
"=",
"instance",
".",
"get",
"(",
"'tags'",
",",
"[",
"]",
")",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"master_address",
"=",
"self",
... | 45 | 22.368421 |
def cleanup_on_delete(self, sender, document, **kwargs):
'''
Clean up slug redirections on object deletion
'''
if not self.follow or sender is not self.owner_document:
return
slug = getattr(document, self.db_field)
namespace = self.owner_document.__name__
SlugFollow.objects(namespace=namespace, new_slug=slug).delete() | [
"def",
"cleanup_on_delete",
"(",
"self",
",",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"follow",
"or",
"sender",
"is",
"not",
"self",
".",
"owner_document",
":",
"return",
"slug",
"=",
"getattr",
"(",
"doc... | 42.111111 | 19.888889 |
def set_visual_style(self, full_screen_style=False):
"""
Sets the Application visual style.
:param full_screen_style: Use fullscreen stylesheet file.
:type full_screen_style: bool
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Setting Application visual style.")
platform_styles = {"Windows": (("Windows", "Microsoft"),
UiConstants.windows_style,
UiConstants.windows_stylesheet_file,
UiConstants.windows_full_screen_stylesheet_file),
"Darwin": (("Darwin",),
UiConstants.darwin_style,
UiConstants.darwin_stylesheet_file,
UiConstants.darwin_full_screen_stylesheet_file),
"Linux": (("Linux",),
UiConstants.linux_style,
UiConstants.linux_stylesheet_file,
UiConstants.linux_full_screen_stylesheet_file)}
style_sheet_file = None
for platform_style, settings in platform_styles.iteritems():
LOGGER.debug("> Setting '{0}' visual style.".format(platform_style))
platform_systems, style, styleSheeFile, full_screen_style_sheet_file = settings
if platform.system() in platform_systems:
RuntimeGlobals.application.setStyle(style)
style_sheet_path = umbra.ui.common.get_resource_path(styleSheeFile)
if full_screen_style:
full_screen_style_sheet_path = umbra.ui.common.get_resource_path(full_screen_style_sheet_file,
raise_exception=False)
style_sheet_path = full_screen_style_sheet_path or style_sheet_path
style_sheet_file = foundations.io.File(style_sheet_path)
break
if not style_sheet_file:
raise foundations.exceptions.FileExistsError(
"{0} | No stylesheet file found, visual style will not be applied!".format(self.__class__.__name__))
if foundations.common.path_exists(style_sheet_file.path):
LOGGER.debug("> Reading style sheet file: '{0}'.".format(style_sheet_file.path))
style_sheet_file.cache()
for i, line in enumerate(style_sheet_file.content):
search = re.search(r"url\((?P<url>.*)\)", line)
if not search:
continue
style_sheet_file.content[i] = line.replace(search.group("url"),
foundations.strings.to_forward_slashes(
umbra.ui.common.get_resource_path(search.group("url"))))
RuntimeGlobals.application.setStyleSheet(QString("".join(style_sheet_file.content)))
return True
else:
raise foundations.exceptions.FileExistsError(
"{0} | '{1}' stylesheet file is not available, visual style will not be applied!".format(
self.__class__.__name__, style_sheet_file.path)) | [
"def",
"set_visual_style",
"(",
"self",
",",
"full_screen_style",
"=",
"False",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"> Setting Application visual style.\"",
")",
"platform_styles",
"=",
"{",
"\"Windows\"",
":",
"(",
"(",
"\"Windows\"",
",",
"\"Microsoft\"",
"... | 55.915254 | 29.779661 |
def from_response(response, body):
"""
Return an instance of a ClientException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status_code != 200:
raise exception_from_response(resp, body)
"""
if isinstance(response, dict):
status = response.get("status_code")
else:
status = response.status_code
cls = _code_map.get(int(status), ClientException)
# import pyrax
# pyrax.utils.trace()
request_id = response.headers.get("x-compute-request-id")
if body:
message = "n/a"
details = "n/a"
if isinstance(body, dict):
message = body.get("message")
details = body.get("details")
if message is details is None:
error = body[next(iter(body))]
if isinstance(error, dict):
message = error.get("message", None)
details = error.get("details", None)
else:
message = error
details = None
else:
message = body
return cls(code=status, message=message, details=details,
request_id=request_id)
else:
return cls(code=status, request_id=request_id) | [
"def",
"from_response",
"(",
"response",
",",
"body",
")",
":",
"if",
"isinstance",
"(",
"response",
",",
"dict",
")",
":",
"status",
"=",
"response",
".",
"get",
"(",
"\"status_code\"",
")",
"else",
":",
"status",
"=",
"response",
".",
"status_code",
"c... | 30.926829 | 14.829268 |
def leland94(V, s, r, a, t, C=None, d=None, PosEq=False):
"""Leland94 Capital Structure model, Corporate Bond valuation model
Parameters:
-----------
V : float
Asset Value of the unlevered firm
s : float
Volatility s of the asset value V of the unlevered firm
r : float
Risk free rate
a : float
Bankruptcy cost
t : float
Corporate tax rate
C : float
(option, default C=None)
The Coupon in $ per $100.
- If C>0 then exogenous bancruptcy case, i.e.
a failure to pay credit event is triggered
when the firm cannot pay the coupon C
- If C=None then an endogenous bankcruptcy case,
i.e. the management can set endogenously an
'optimal' coupon: min VB, max W=E+D, E>=0
(see pp.1222).
The internally computed 'optimal' coupon
is retured as output argument.
d : float
(optional, default d=None)
Required dividend by investors, or resp the net cash
payout by the firm.
- if d=None then 100% retained profits
- if d>0 then d is the fixed dividend rate proportional
to the firm's asset value.
The intermediate result 'X' dependends on 'd'.
PosEq : bool
(optional, default PosEq=False)
If True, then enforce a positive net worth, i.e. obligors demand a
"protected bond covenant with positive net worth requirement"
(pp.1233) [dt. Positive Eigenkapitalbasis]
Returns:
--------
D : float
Value of debt (p.1219)
[dt. Wert des Fremdkapital]
E : float
Value of equity Wert (p.1221)
[dt. Eigenkapitalwert]
W : float
Value of levered company, or Total value of the firm (p.1221)
[dt. Firmenwert]
W = V + T - B
W = D + E
T : float
Value of tax benefit (p.1220)
[dt. Steuervorteil]
B : float
Value of bankruptcy costs (p.1220)
[dt. Insolvenzkosten]
VB : float
Level of bankruptcy, i.e. the asset value V at which
bankruptcy is declared [dt. Restwert bei Insolvenz]
- if PosEq=False then formula in pp.1222
- if PosEq=True then the covenant "VB - D = 0" is
applied to protect creditors (pp.1233)
PV : float
PV of $1 if bankruptcy (p.1219)
[dt. Kapitalwert 1 GE bei Insolvenz]
Returns (shiny financial metrics):
----------------------------------
lr : float
Leverage Ratio [dt. Kredithebel]
i.e. value of debt divided by value of levered firm value
D / W
yld : float
Yield on Debt [dt. Fremdkapitalrendite]
i.e. coupon in $ divided by value of debt
C / D
sprd : float
Yield Spread in bp [dt. Kreditspread in bp]
i.e. yield on debt minus riskfree rate converted to bps
(C/D - r) * 10000
Returns (intermediate results):
-------------------------------
X : float
Net Cash Payout X will differ depending on the
dividend policy.
- If d=None, then 100% retained profits (p.1218)
[dt. Thesaurierend]
- If d>0, then fixed dividend per firm value (p.1241)
[dt. Prozentuale Dividendenausschüttung]
(intermediate result)
C : float
The Coupon in $ per $100.
- If input argument is C>0 then the input
argument C is returned as is (exogenous brankruptcy
case).
- If input argument C=None, then the internally
computed 'optimal' coupon the the endogenous
brankruptcy case is returned (pp.1222)
(intermediate result)
A : float
Annuity value (Wert der Annuitaet), "A=C/r",
The coupon (in $) divded by the risk-free rate.
(intermediate result)
Examples:
---------
PosEq: No (False), Pos Net Worth covenant (True)
Coupon: Endo (C=None), Exo (C>0)
Source:
-------
Leland, Hayne E. 1994. "Corporate Debt Value, Bond Covenants, and
Optimal Capital Structure." The Journal of Finance 49 (4): 1213–52.
https://doi.org/10.1111/j.1540-6261.1994.tb02452.x.
"""
# subfunction for
def netcashpayout_by_dividend(r, d, s):
"""net cash payout proportional to the firm's asset value
for a given required dividend rate (p.1241)
"""
import math
s2 = s * s
tmp = r - d - 0.5 * s2
return (tmp + math.sqrt(tmp * tmp + 2.0 * s2 * r)) / s2
def optimal_coupon(V, r, a, t, X):
"""Coupon for the endogenous bankcruptcy case (pp.1222)"""
m = ((1.0 - t) * X / (r * (1.0 + X)))**X / (1.0 + X)
h = (1.0 + X + a * (1 - t) * X / t) * m
return V * ((1.0 + X) * h)**(-1.0 / X)
def positivenetworth_target(VB, V, a, A, X):
"""protected bond covenant with positive net worth requirement"""
return VB - A - ((1.0 - a) * VB - A) * (VB / V)**X
# (1a) Net Cash Payout 'X'
if d is None:
# Net cash Payout if 100% retained profits (p.1218)
X = (2.0 * r) / (s * s)
else:
# net cash payout proportional to the firm's asset value
# for a given required dividend rate (p.1241)
X = netcashpayout_by_dividend(r, d, s)
# (1b) Optimal coupon of the endogenous bankruptcy
# case (p.1222ff.)
if C is None:
C = optimal_coupon(V, r, a, t, X)
# (1c) Wert der Annuitaet
A = C / r
# (2a) Level of bankruptcy VB (pp.1222)
VB = (1.0 - t) * C / (r + 0.5 * s * s)
# (2b) protected bond covenant with positive net worth
# requirement (pp.1233)
if PosEq:
from scipy.optimize import fsolve
VB = fsolve(func=positivenetworth_target, x0=VB, args=(V, a, A, X))
VB = float(VB)
# (3a) PV of $1 if bankruptcy (p.1219)
PV = (VB / V)**X
# (3b) Value of debt (p.1219)
D = A + ((1.0 - a) * VB - A) * PV
# (3c) Value of bankruptcy costs (p.1220)
B = a * VB * PV
# (3d) Value of tax benefit (p.1220)
T = t * A * (1.0 - PV)
# (3e) Total value of the firm, or Value of levered company (p.1221)
W = V + T - B
# (3f) Value of equity (p.1221)
E = W - D
# (4a) Leverage Ratio
lr = D / W
# (4b) Yield on Debt
yld = C / D
# (4c) Yield Spread in bp
sprd = (yld - r) * 10000.0
# return results
return D, E, W, T, B, VB, PV, lr, yld, sprd, X, C, A | [
"def",
"leland94",
"(",
"V",
",",
"s",
",",
"r",
",",
"a",
",",
"t",
",",
"C",
"=",
"None",
",",
"d",
"=",
"None",
",",
"PosEq",
"=",
"False",
")",
":",
"# subfunction for",
"def",
"netcashpayout_by_dividend",
"(",
"r",
",",
"d",
",",
"s",
")",
... | 28.125 | 21.013393 |
def keybd_event(bVk: int, bScan: int, dwFlags: int, dwExtraInfo: int) -> None:
"""keybd_event from Win32."""
ctypes.windll.user32.keybd_event(bVk, bScan, dwFlags, dwExtraInfo) | [
"def",
"keybd_event",
"(",
"bVk",
":",
"int",
",",
"bScan",
":",
"int",
",",
"dwFlags",
":",
"int",
",",
"dwExtraInfo",
":",
"int",
")",
"->",
"None",
":",
"ctypes",
".",
"windll",
".",
"user32",
".",
"keybd_event",
"(",
"bVk",
",",
"bScan",
",",
"... | 60.333333 | 22.666667 |
def fit_mcmc(self,nwalkers=300,nburn=200,niter=100,
p0=None,initial_burn=None,
ninitial=100, loglike_kwargs=None,
**kwargs):
"""Fits stellar model using MCMC.
:param nwalkers: (optional)
Number of walkers to pass to :class:`emcee.EnsembleSampler`.
Default is 200.
:param nburn: (optional)
Number of iterations for "burn-in." Default is 100.
:param niter: (optional)
Number of for-keeps iterations for MCMC chain.
Default is 200.
:param p0: (optional)
Initial parameters for emcee. If not provided, then chains
will behave according to whether inital_burn is set.
:param initial_burn: (optional)
If `True`, then initialize walkers first with a random initialization,
then cull the walkers, keeping only those with > 15% acceptance
rate, then reinitialize sampling. If `False`, then just do
normal burn-in. Default is `None`, which will be set to `True` if
fitting for distance (i.e., if there are apparent magnitudes as
properties of the model), and `False` if not.
:param ninitial: (optional)
Number of iterations to test walkers for acceptance rate before
re-initializing.
:param loglike_args:
Any arguments to pass to :func:`StarModel.loglike`, such
as what priors to use.
:param **kwargs:
Additional keyword arguments passed to :class:`emcee.EnsembleSampler`
constructor.
:return:
:class:`emcee.EnsembleSampler` object.
"""
#clear any saved _samples
if self._samples is not None:
self._samples = None
if self.fit_for_distance:
npars = 5
if initial_burn is None:
initial_burn = True
else:
if initial_burn is None:
initial_burn = False
npars = 3
if p0 is None:
m0,age0,feh0 = self.ic.random_points(nwalkers)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nwalkers))
AV0 = rand.uniform(0,self.maxAV,size=nwalkers)
if self.fit_for_distance:
p0 = np.array([m0,age0,feh0,d0,AV0]).T
else:
p0 = np.array([m0,age0,feh0]).T
if initial_burn:
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost,
**kwargs)
#ninitial = 300 #should this be parameter?
pos, prob, state = sampler.run_mcmc(p0, ninitial)
wokinds = np.where((sampler.naccepted/ninitial > 0.15) &
(sampler.naccepted/ninitial < 0.4))[0]
i=1
while len(wokinds)==0:
thresh = 0.15 - i*0.02
if thresh < 0:
raise RuntimeError('Initial burn has no acceptance?')
wokinds = np.where((sampler.naccepted/ninitial > thresh) &
(sampler.naccepted/ninitial < 0.4))[0]
i += 1
inds = rand.randint(len(wokinds),size=nwalkers)
p0 = sampler.chain[wokinds[inds],:,:].mean(axis=1) #reset p0
p0 *= (1 + rand.normal(size=p0.shape)*0.01)
else:
p0 = np.array(p0)
p0 = rand.normal(size=(nwalkers,npars))*0.01 + p0.T[None,:]
if self.fit_for_distance:
p0[:,3] *= (1 + rand.normal(size=nwalkers)*0.5)
sampler = emcee.EnsembleSampler(nwalkers,npars,self.lnpost)
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
sampler.run_mcmc(pos, niter, rstate0=state)
self._sampler = sampler
return sampler | [
"def",
"fit_mcmc",
"(",
"self",
",",
"nwalkers",
"=",
"300",
",",
"nburn",
"=",
"200",
",",
"niter",
"=",
"100",
",",
"p0",
"=",
"None",
",",
"initial_burn",
"=",
"None",
",",
"ninitial",
"=",
"100",
",",
"loglike_kwargs",
"=",
"None",
",",
"*",
"*... | 39.646465 | 20.737374 |
def check_multi_dimensional_coords(self, ds):
'''
Checks that no multidimensional coordinate shares a name with its
dimensions.
Chapter 5 paragraph 4
We recommend that the name of a [multidimensional coordinate] should
not match the name of any of its dimensions.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
# This can only apply to auxiliary coordinate variables
for coord in self._find_aux_coord_vars(ds):
variable = ds.variables[coord]
if variable.ndim < 2:
continue
not_matching = TestCtx(BaseCheck.MEDIUM, self.section_titles['5'])
not_matching.assert_true(coord not in variable.dimensions,
'{} shares the same name as one of its dimensions'
''.format(coord))
ret_val.append(not_matching.to_result())
return ret_val | [
"def",
"check_multi_dimensional_coords",
"(",
"self",
",",
"ds",
")",
":",
"ret_val",
"=",
"[",
"]",
"# This can only apply to auxiliary coordinate variables",
"for",
"coord",
"in",
"self",
".",
"_find_aux_coord_vars",
"(",
"ds",
")",
":",
"variable",
"=",
"ds",
"... | 35.206897 | 24.103448 |
def _do_http(opts, profile='default'):
'''
Make the http request and return the data
'''
ret = {}
url = __salt__['config.get']('modjk:{0}:url'.format(profile), '')
user = __salt__['config.get']('modjk:{0}:user'.format(profile), '')
passwd = __salt__['config.get']('modjk:{0}:pass'.format(profile), '')
realm = __salt__['config.get']('modjk:{0}:realm'.format(profile), '')
timeout = __salt__['config.get']('modjk:{0}:timeout'.format(profile), '')
if not url:
raise Exception('missing url in profile {0}'.format(profile))
if user and passwd:
auth = _auth(url=url, realm=realm, user=user, passwd=passwd)
_install_opener(auth)
url += '?{0}'.format(_urlencode(opts))
for line in _urlopen(url, timeout=timeout).read().splitlines():
splt = line.split('=', 1)
if splt[0] in ret:
ret[splt[0]] += ',{0}'.format(splt[1])
else:
ret[splt[0]] = splt[1]
return ret | [
"def",
"_do_http",
"(",
"opts",
",",
"profile",
"=",
"'default'",
")",
":",
"ret",
"=",
"{",
"}",
"url",
"=",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'modjk:{0}:url'",
".",
"format",
"(",
"profile",
")",
",",
"''",
")",
"user",
"=",
"__salt__",
"... | 31.833333 | 25.766667 |
def find_all(query: Query=None) -> List['ApiKey']:
"""
List all API keys.
"""
return [ApiKey.from_db(key) for key in db.get_keys(query)] | [
"def",
"find_all",
"(",
"query",
":",
"Query",
"=",
"None",
")",
"->",
"List",
"[",
"'ApiKey'",
"]",
":",
"return",
"[",
"ApiKey",
".",
"from_db",
"(",
"key",
")",
"for",
"key",
"in",
"db",
".",
"get_keys",
"(",
"query",
")",
"]"
] | 32.8 | 10 |
def changes_since(self, domain, date_or_datetime):
"""
Gets the changes for a domain since the specified date/datetime.
The date can be one of:
- a Python datetime object
- a Python date object
- a string in the format 'YYYY-MM-YY HH:MM:SS'
- a string in the format 'YYYY-MM-YY'
It returns a list of dicts, whose keys depend on the specific change
that was made. A simple example of such a change dict:
{u'accountId': 000000,
u'action': u'update',
u'changeDetails': [{u'field': u'serial_number',
u'newValue': u'1354038941',
u'originalValue': u'1354038940'},
{u'field': u'updated_at',
u'newValue': u'Tue Nov 27 17:55:41 UTC 2012',
u'originalValue': u'Tue Nov 27 17:55:40 UTC 2012'}],
u'domain': u'example.com',
u'targetId': 00000000,
u'targetType': u'Domain'}
"""
domain_id = utils.get_id(domain)
dt = utils.iso_time_string(date_or_datetime, show_tzinfo=True)
uri = "/domains/%s/changes?since=%s" % (domain_id, dt)
resp, body = self._retry_get(uri)
return body.get("changes", []) | [
"def",
"changes_since",
"(",
"self",
",",
"domain",
",",
"date_or_datetime",
")",
":",
"domain_id",
"=",
"utils",
".",
"get_id",
"(",
"domain",
")",
"dt",
"=",
"utils",
".",
"iso_time_string",
"(",
"date_or_datetime",
",",
"show_tzinfo",
"=",
"True",
")",
... | 42.724138 | 13 |
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume') | [
"def",
"get_volume",
"(",
"self",
")",
":",
"self",
".",
"request",
"(",
"EP_GET_VOLUME",
")",
"return",
"0",
"if",
"self",
".",
"last_response",
"is",
"None",
"else",
"self",
".",
"last_response",
".",
"get",
"(",
"'payload'",
")",
".",
"get",
"(",
"'... | 48 | 20.75 |
def delay(self):
'''How long to wait before the next check'''
if self._last_checked:
return self._interval - (time.time() - self._last_checked)
return self._interval | [
"def",
"delay",
"(",
"self",
")",
":",
"if",
"self",
".",
"_last_checked",
":",
"return",
"self",
".",
"_interval",
"-",
"(",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"_last_checked",
")",
"return",
"self",
".",
"_interval"
] | 39.4 | 17.4 |
def pre_order(root):
# type: (Nonterminal) -> Generator
"""
Perform pre-order traversing. Expects tree like structure.
Traverse in DFS fashion.
:param root: Root tree of the parsed tree.
:return: Sequence of nodes to traverse.
"""
def traverse_rule(item, callback):
yield item
for el in item.to_symbols:
yield callback(el)
def traverse_nonterminal(item, callback):
yield item
yield callback(item.to_rule)
def traverse_terminal(item, callback):
yield item
return Traversing.traverse_separated(root, traverse_rule, traverse_nonterminal, traverse_terminal) | [
"def",
"pre_order",
"(",
"root",
")",
":",
"# type: (Nonterminal) -> Generator",
"def",
"traverse_rule",
"(",
"item",
",",
"callback",
")",
":",
"yield",
"item",
"for",
"el",
"in",
"item",
".",
"to_symbols",
":",
"yield",
"callback",
"(",
"el",
")",
"def",
... | 31.818182 | 17.181818 |
def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False,
read_concern=None,
write_concern=None,
parse_write_concern_error=False,
collation=None,
session=None,
client=None,
retryable_write=False,
publish_events=True,
user_fields=None):
"""Execute a command or raise an error.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
- `session`: optional ClientSession instance.
- `client`: optional MongoClient for gossipping $clusterTime.
- `retryable_write`: True if this command is a retryable write.
- `publish_events`: Should we publish events for this command?
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
"""
self.validate_session(client, session)
session = _validate_session_write_concern(session, write_concern)
# Ensure command name remains in first place.
if not isinstance(spec, ORDERED_TYPES):
spec = SON(spec)
if (read_concern and self.max_wire_version < 4
and not read_concern.ok_for_legacy):
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (read_concern.level, self.max_wire_version))
if not (write_concern is None or write_concern.acknowledged or
collation is None):
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if (self.max_wire_version >= 5 and
write_concern and
not write_concern.is_server_default):
spec['writeConcern'] = write_concern.document
elif self.max_wire_version < 5 and collation is not None:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use a collation.')
if session:
session._apply_to(spec, retryable_write, read_preference)
self.send_cluster_time(spec, session, client)
listeners = self.listeners if publish_events else None
unacknowledged = write_concern and not write_concern.acknowledged
if self.op_msg_enabled:
self._raise_if_not_writable(unacknowledged)
try:
return command(self.sock, dbname, spec, slave_ok,
self.is_mongos, read_preference, codec_options,
session, client, check, allowable_errors,
self.address, check_keys, listeners,
self.max_bson_size, read_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation,
compression_ctx=self.compression_context,
use_op_msg=self.op_msg_enabled,
unacknowledged=unacknowledged,
user_fields=user_fields)
except OperationFailure:
raise
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
except BaseException as error:
self._raise_connection_failure(error) | [
"def",
"command",
"(",
"self",
",",
"dbname",
",",
"spec",
",",
"slave_ok",
"=",
"False",
",",
"read_preference",
"=",
"ReadPreference",
".",
"PRIMARY",
",",
"codec_options",
"=",
"DEFAULT_CODEC_OPTIONS",
",",
"check",
"=",
"True",
",",
"allowable_errors",
"="... | 49.686047 | 18.313953 |
def get_plugin_info(plugin):
"""
Fetch information about the given package on PyPI and return it as a dict.
If the package cannot be found on PyPI, :exc:`NameError` will be raised.
"""
url = 'https://pypi.python.org/pypi/{}/json'.format(plugin)
try:
resp = request.urlopen(url)
except HTTPError as e:
if e.code == 404:
raise NameError("Plugin {} could not be found.".format(plugin))
else:
raise ValueError(
"Checking plugin status on {} returned HTTP code {}".format(
url, resp.getcode()
)
)
try:
json_resp = json.loads(resp.read().decode())
# Catch ValueError instead of JSONDecodeError which is only available in
# Python 3.5+
except ValueError:
raise ValueError(
"Could not decode JSON info for plugin at {}".format(url)
)
return json_resp['info'] | [
"def",
"get_plugin_info",
"(",
"plugin",
")",
":",
"url",
"=",
"'https://pypi.python.org/pypi/{}/json'",
".",
"format",
"(",
"plugin",
")",
"try",
":",
"resp",
"=",
"request",
".",
"urlopen",
"(",
"url",
")",
"except",
"HTTPError",
"as",
"e",
":",
"if",
"e... | 31.758621 | 22.862069 |
def parse_consumer_offsets(cls, json_file):
"""Parse current offsets from json-file."""
with open(json_file, 'r') as consumer_offsets_json:
try:
parsed_offsets = {}
parsed_offsets_data = json.load(consumer_offsets_json)
# Create new dict with partition-keys as integers
parsed_offsets['groupid'] = parsed_offsets_data['groupid']
parsed_offsets['offsets'] = {}
for topic, topic_data in six.iteritems(parsed_offsets_data['offsets']):
parsed_offsets['offsets'][topic] = {}
for partition, offset in six.iteritems(topic_data):
parsed_offsets['offsets'][topic][int(partition)] = offset
return parsed_offsets
except ValueError:
print(
"Error: Given consumer-data json data-file {file} could not be "
"parsed".format(file=json_file),
file=sys.stderr,
)
raise | [
"def",
"parse_consumer_offsets",
"(",
"cls",
",",
"json_file",
")",
":",
"with",
"open",
"(",
"json_file",
",",
"'r'",
")",
"as",
"consumer_offsets_json",
":",
"try",
":",
"parsed_offsets",
"=",
"{",
"}",
"parsed_offsets_data",
"=",
"json",
".",
"load",
"(",... | 50.190476 | 19.761905 |
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
return {model.model_id: model.weights(matrix_id) for model in self.models} | [
"def",
"weights",
"(",
"self",
",",
"matrix_id",
"=",
"0",
")",
":",
"return",
"{",
"model",
".",
"model_id",
":",
"model",
".",
"weights",
"(",
"matrix_id",
")",
"for",
"model",
"in",
"self",
".",
"models",
"}"
] | 49.875 | 29.375 |
def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
import pandas as pd
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
column_names = dimensions
dim_vals = OrderedDict([(dim, self.dimension_values(dim)) for dim in column_names])
df = pd.DataFrame(dim_vals)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df | [
"def",
"dframe",
"(",
"self",
",",
"dimensions",
"=",
"None",
",",
"multi_index",
"=",
"False",
")",
":",
"import",
"pandas",
"as",
"pd",
"if",
"dimensions",
"is",
"None",
":",
"dimensions",
"=",
"[",
"d",
".",
"name",
"for",
"d",
"in",
"self",
".",
... | 39.166667 | 22.75 |
def require(self, name):
"""Return the value of the requested parameter or raise an error."""
value = self.get(name)
if value is None:
raise TypeError(
"{0} requires the parameter '{1}'.".format(
self.__class__, name
)
)
return value | [
"def",
"require",
"(",
"self",
",",
"name",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"name",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"{0} requires the parameter '{1}'.\"",
".",
"format",
"(",
"self",
".",
"__class__",
... | 33.2 | 14.2 |
async def start(self):
"""Start the websocket server.
When this method returns, the websocket server will be running and
the port property of this class will have its assigned port number.
This method should be called only once in the lifetime of the server
and must be paired with a call to stop() to cleanly release the
server's resources.
"""
if self._server_task is not None:
self._logger.debug("AsyncValidatingWSServer.start() called twice, ignoring")
return
started_signal = self._loop.create_future()
self._server_task = self._loop.add_task(self._run_server_task(started_signal))
await started_signal
if self.port is None:
self.port = started_signal.result() | [
"async",
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_server_task",
"is",
"not",
"None",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"AsyncValidatingWSServer.start() called twice, ignoring\"",
")",
"return",
"started_signal",
"=",
"self",
".... | 35.545455 | 25.681818 |
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
model = self.get(**kwargs)
is_created = False
if model is None:
is_created = True
model = self._model_class()
for key, value in list(kwargs.items()):
setattr(model, key, value)
return model, is_created | [
"def",
"get_or_create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"self",
".",
"get",
"(",
"*",
"*",
"kwargs",
")",
"is_created",
"=",
"False",
"if",
"model",
"is",
"None",
":",
"is_created",
"=",
"True",
"model",
"=",
"self",
"... | 28.578947 | 19.105263 |
def detect_log_config(arguments):
"""
Detect access log config (path and format) of nginx. Offer user to select if multiple access logs are detected.
:return: path and format of detected / selected access log
"""
config = arguments['--config']
if config is None:
config = detect_config_path()
if not os.path.exists(config):
error_exit('Nginx config file not found: %s' % config)
with open(config) as f:
config_str = f.read()
access_logs = dict(get_access_logs(config_str))
if not access_logs:
error_exit('Access log file is not provided and ngxtop cannot detect it from your config file (%s).' % config)
log_formats = dict(get_log_formats(config_str))
if len(access_logs) == 1:
log_path, format_name = list(access_logs.items())[0]
if format_name == 'combined':
return log_path, LOG_FORMAT_COMBINED
if format_name not in log_formats:
error_exit('Incorrect format name set in config for access log file "%s"' % log_path)
return log_path, log_formats[format_name]
# multiple access logs configured, offer to select one
print('Multiple access logs detected in configuration:')
log_path = choose_one(list(access_logs.keys()), 'Select access log file to process: ')
format_name = access_logs[log_path]
if format_name not in log_formats:
error_exit('Incorrect format name set in config for access log file "%s"' % log_path)
return log_path, log_formats[format_name] | [
"def",
"detect_log_config",
"(",
"arguments",
")",
":",
"config",
"=",
"arguments",
"[",
"'--config'",
"]",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"detect_config_path",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config",
")... | 45.363636 | 20.575758 |
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:] | [
"def",
"_show",
"(",
"self",
")",
":",
"p",
"=",
"_runshell",
"(",
"[",
"brctlexe",
",",
"'show'",
",",
"self",
".",
"name",
"]",
",",
"\"Could not show %s.\"",
"%",
"self",
".",
"name",
")",
"return",
"p",
".",
"stdout",
".",
"read",
"(",
")",
"."... | 42.6 | 8.6 |
def _get_choices(self, gandi):
""" Internal method to get choices list """
image_list = []
for item in gandi.image.list():
label = item['label']
if item['visibility'] == 'deprecated':
label = '*%s' % label
image_list.append(label)
disk_list = [item['name'] for item in gandi.disk.list_create()]
return sorted(tuple(set(image_list))) + disk_list | [
"def",
"_get_choices",
"(",
"self",
",",
"gandi",
")",
":",
"image_list",
"=",
"[",
"]",
"for",
"item",
"in",
"gandi",
".",
"image",
".",
"list",
"(",
")",
":",
"label",
"=",
"item",
"[",
"'label'",
"]",
"if",
"item",
"[",
"'visibility'",
"]",
"=="... | 42.7 | 10 |
def user(self) -> str:
"""Generate a random user.
:return: Path to user.
:Example:
/home/oretha
"""
user = self.random.choice(USERNAMES)
user = user.capitalize() if 'win' in self.platform else user.lower()
return str(self._pathlib_home / user) | [
"def",
"user",
"(",
"self",
")",
"->",
"str",
":",
"user",
"=",
"self",
".",
"random",
".",
"choice",
"(",
"USERNAMES",
")",
"user",
"=",
"user",
".",
"capitalize",
"(",
")",
"if",
"'win'",
"in",
"self",
".",
"platform",
"else",
"user",
".",
"lower... | 27.545455 | 17.454545 |
def read_namespaced_replication_controller_scale(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_replication_controller_scale # noqa: E501
read scale of the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replication_controller_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_replication_controller_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | [
"def",
"read_namespaced_replication_controller_scale",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")"... | 54.913043 | 29.086957 |
def move(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(self.list_path(path), dest)) | [
"def",
"move",
"(",
"self",
",",
"path",
",",
"dest",
")",
":",
"parts",
"=",
"dest",
".",
"rstrip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"dir_path",
"=",
"'/'",
".",
"join",
"(",
"par... | 37.5 | 12.125 |
def log(self, n=None, template=None):
"""
Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``)
"""
cmd = ['bzr', 'log']
if n:
cmd.append('-l%d' % n)
return self.sh(cmd, shell=False) | [
"def",
"log",
"(",
"self",
",",
"n",
"=",
"None",
",",
"template",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'bzr'",
",",
"'log'",
"]",
"if",
"n",
":",
"cmd",
".",
"append",
"(",
"'-l%d'",
"%",
"n",
")",
"return",
"self",
".",
"sh",
"(",
"cmd"... | 26.090909 | 12.090909 |
def _drop_indices(self):
"""Drops the database indices relating to n-grams."""
self._logger.info('Dropping database indices')
self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL)
self._logger.info('Finished dropping database indices') | [
"def",
"_drop_indices",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Dropping database indices'",
")",
"self",
".",
"_conn",
".",
"execute",
"(",
"constants",
".",
"DROP_TEXTNGRAM_INDEX_SQL",
")",
"self",
".",
"_logger",
".",
"info",
"(... | 52.8 | 15 |
def segment_to_line(document, coords):
"polyline with 2 vertices using <line> tag"
return setattribs(
document.createElement('line'),
x1 = coords[0],
y1 = coords[1],
x2 = coords[2],
y2 = coords[3],
) | [
"def",
"segment_to_line",
"(",
"document",
",",
"coords",
")",
":",
"return",
"setattribs",
"(",
"document",
".",
"createElement",
"(",
"'line'",
")",
",",
"x1",
"=",
"coords",
"[",
"0",
"]",
",",
"y1",
"=",
"coords",
"[",
"1",
"]",
",",
"x2",
"=",
... | 22.666667 | 18.222222 |
async def main_loop(loop, password, user, ip): # pylint: disable=invalid-name
"""Main loop."""
async with aiohttp.ClientSession(loop=loop) as session:
VAR['sma'] = pysma.SMA(session, ip, password=password, group=user)
await VAR['sma'].new_session()
if VAR['sma'].sma_sid is None:
_LOGGER.info("No session ID")
return
_LOGGER.info("NEW SID: %s", VAR['sma'].sma_sid)
VAR['running'] = True
cnt = 5
sensors = pysma.Sensors()
while VAR.get('running'):
await VAR['sma'].read(sensors)
print_table(sensors)
cnt -= 1
if cnt == 0:
break
await asyncio.sleep(2)
await VAR['sma'].close_session() | [
"async",
"def",
"main_loop",
"(",
"loop",
",",
"password",
",",
"user",
",",
"ip",
")",
":",
"# pylint: disable=invalid-name",
"async",
"with",
"aiohttp",
".",
"ClientSession",
"(",
"loop",
"=",
"loop",
")",
"as",
"session",
":",
"VAR",
"[",
"'sma'",
"]",
... | 32.347826 | 16.26087 |
def build_reverse_dictionary(word_to_id):
"""Given a dictionary that maps word to integer id.
Returns a reverse dictionary that maps a id to word.
Parameters
----------
word_to_id : dictionary
that maps word to ID.
Returns
--------
dictionary
A dictionary that maps IDs to words.
"""
reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
return reverse_dictionary | [
"def",
"build_reverse_dictionary",
"(",
"word_to_id",
")",
":",
"reverse_dictionary",
"=",
"dict",
"(",
"zip",
"(",
"word_to_id",
".",
"values",
"(",
")",
",",
"word_to_id",
".",
"keys",
"(",
")",
")",
")",
"return",
"reverse_dictionary"
] | 25.117647 | 20.294118 |
def cp_objectinfo_worker(task):
'''This is a parallel worker for `parallel_update_cp_objectinfo`.
Parameters
----------
task : tuple
- task[0] = checkplot pickle file
- task[1] = kwargs
Returns
-------
str
The name of the checkplot file that was updated. None if the update
fails for some reason.
'''
cpf, cpkwargs = task
try:
newcpf = update_checkplot_objectinfo(cpf, **cpkwargs)
return newcpf
except Exception as e:
LOGEXCEPTION('failed to update objectinfo for %s' % cpf)
return None | [
"def",
"cp_objectinfo_worker",
"(",
"task",
")",
":",
"cpf",
",",
"cpkwargs",
"=",
"task",
"try",
":",
"newcpf",
"=",
"update_checkplot_objectinfo",
"(",
"cpf",
",",
"*",
"*",
"cpkwargs",
")",
"return",
"newcpf",
"except",
"Exception",
"as",
"e",
":",
"LOG... | 19.827586 | 27.758621 |
def _rest_request_to_json(self, address, object_path, service_name, requests_config, tags, *args, **kwargs):
"""
Query the given URL and return the JSON response
"""
response = self._rest_request(address, object_path, service_name, requests_config, tags, *args, **kwargs)
try:
response_json = response.json()
except JSONDecodeError as e:
self.service_check(
service_name,
AgentCheck.CRITICAL,
tags=['url:%s' % self._get_url_base(address)] + tags,
message='JSON Parse failed: {0}'.format(e),
)
raise
return response_json | [
"def",
"_rest_request_to_json",
"(",
"self",
",",
"address",
",",
"object_path",
",",
"service_name",
",",
"requests_config",
",",
"tags",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"_rest_request",
"(",
"address",
"... | 35.368421 | 23.473684 |
def redirectLoggerStreamHandlers(oldStream, newStream):
"""Redirect the stream of a stream handler to a different stream
"""
for handler in list(logger.handlers): #Remove old handlers
if handler.stream == oldStream:
handler.close()
logger.removeHandler(handler)
for handler in logger.handlers: #Do not add a duplicate handler
if handler.stream == newStream:
return
logger.addHandler(logging.StreamHandler(newStream)) | [
"def",
"redirectLoggerStreamHandlers",
"(",
"oldStream",
",",
"newStream",
")",
":",
"for",
"handler",
"in",
"list",
"(",
"logger",
".",
"handlers",
")",
":",
"#Remove old handlers",
"if",
"handler",
".",
"stream",
"==",
"oldStream",
":",
"handler",
".",
"clos... | 43.363636 | 10.727273 |
def earthsun_distance(moment):
r'''Calculates the distance between the earth and the sun as a function
of date and time. Uses the Reda and Andreas (2004) model described in [1]_,
originally incorporated into the excellent
`pvlib library <https://github.com/pvlib/pvlib-python>`_
Parameters
----------
moment : datetime
Time and date for the calculation, in UTC time (or GMT, which is
almost the same thing); not local time, [-]
Returns
-------
distance : float
Distance between the center of the earth and the center of the sun,
[m]
Examples
--------
>>> earthsun_distance(datetime(2003, 10, 17, 13, 30, 30))
149090925951.18338
The distance at perihelion, which occurs at 4:21 according to this
algorithm. The real value is 04:38 (January 2nd).
>>> earthsun_distance(datetime(2013, 1, 2, 4, 21, 50))
147098089490.67123
The distance at aphelion, which occurs at 14:44 according to this
algorithm. The real value is dead on - 14:44 (July 5).
>>> earthsun_distance(datetime(2013, 7, 5, 14, 44, 51, 0))
152097354414.36044
Notes
-----
This function is quite accurate. The difference comes from the impact of
the moon.
Note this function is not continuous; the sun-earth distance is not
sufficiently accurately modeled for the change to be continuous throughout
each day.
References
----------
.. [1] Reda, Ibrahim, and Afshin Andreas. "Solar Position Algorithm for
Solar Radiation Applications." Solar Energy 76, no. 5 (January 1, 2004):
577-89. https://doi.org/10.1016/j.solener.2003.12.003.
'''
from fluids.optional import spa
delta_t = spa.calculate_deltat(moment.year, moment.month)
import calendar
unixtime = calendar.timegm(moment.timetuple())
# Convert datetime object to unixtime
return float(spa.earthsun_distance(unixtime, delta_t=delta_t))*au | [
"def",
"earthsun_distance",
"(",
"moment",
")",
":",
"from",
"fluids",
".",
"optional",
"import",
"spa",
"delta_t",
"=",
"spa",
".",
"calculate_deltat",
"(",
"moment",
".",
"year",
",",
"moment",
".",
"month",
")",
"import",
"calendar",
"unixtime",
"=",
"c... | 34.982143 | 26.303571 |
def set_indent(TokenClass, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), TokenClass, text
context.pos = match.end()
return callback | [
"def",
"set_indent",
"(",
"TokenClass",
",",
"implicit",
"=",
"False",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"if",
"context",
".",
"indent",
"<",
"context",
".... | 40 | 8.25 |
def recorded(self):
"""Return if the stream is a recording."""
if self.tune and self.tune.get('@src'):
return True if self.tune.get('@src').startswith('mbr') else False
else:
raise PyMediaroomError("No information in <node> about @src") | [
"def",
"recorded",
"(",
"self",
")",
":",
"if",
"self",
".",
"tune",
"and",
"self",
".",
"tune",
".",
"get",
"(",
"'@src'",
")",
":",
"return",
"True",
"if",
"self",
".",
"tune",
".",
"get",
"(",
"'@src'",
")",
".",
"startswith",
"(",
"'mbr'",
")... | 46.5 | 20.833333 |
def _definition_equivalent_to_call(definition, call):
"""Check if a definition signature is equivalent to a call."""
if definition.kwargs:
same_kw_variadics = definition.kwargs in call.starred_kws
else:
same_kw_variadics = not call.starred_kws
if definition.varargs:
same_args_variadics = definition.varargs in call.starred_args
else:
same_args_variadics = not call.starred_args
same_kwonlyargs = all(kw in call.kws for kw in definition.kwonlyargs)
same_args = definition.args == call.args
no_additional_kwarg_arguments = True
if call.kws:
for keyword in call.kws:
is_arg = keyword in call.args
is_kwonly = keyword in definition.kwonlyargs
if not is_arg and not is_kwonly:
# Maybe this argument goes into **kwargs,
# or it is an extraneous argument.
# In any case, the signature is different than
# the call site, which stops our search.
no_additional_kwarg_arguments = False
break
return all(
(
same_args,
same_kwonlyargs,
same_args_variadics,
same_kw_variadics,
no_additional_kwarg_arguments,
)
) | [
"def",
"_definition_equivalent_to_call",
"(",
"definition",
",",
"call",
")",
":",
"if",
"definition",
".",
"kwargs",
":",
"same_kw_variadics",
"=",
"definition",
".",
"kwargs",
"in",
"call",
".",
"starred_kws",
"else",
":",
"same_kw_variadics",
"=",
"not",
"cal... | 35.942857 | 17.6 |
def boundplot(results, dims, it=None, idx=None, prior_transform=None,
periodic=None, ndraws=5000, color='gray', plot_kwargs=None,
labels=None, label_kwargs=None, max_n_ticks=5,
use_math_text=False, show_live=False, live_color='darkviolet',
live_kwargs=None, span=None, fig=None):
"""
Return the bounding distribution used to propose either (1) live points
at a given iteration or (2) a specific dead point during
the course of a run, projected onto the two dimensions specified
by `dims`.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run.
dims : length-2 tuple
The dimensions used to plot the bounding.
it : int, optional
If provided, returns the bounding distribution at the specified
iteration of the nested sampling run. **Note that this option and
`idx` are mutually exclusive.**
idx : int, optional
If provided, returns the bounding distribution used to propose the
dead point at the specified iteration of the nested sampling run.
**Note that this option and `it` are mutually exclusive.**
prior_transform : func, optional
The function transforming samples within the unit cube back to samples
in the native model space. If provided, the transformed bounding
distribution will be plotted in the native model space.
periodic : iterable, optional
A list of indices for parameters with periodic boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may wrap around the edge. It is assumed that their periodicity
is dealt with in the `prior_transform`.
Default is `None` (i.e. no periodic boundary conditions).
ndraws : int, optional
The number of random samples to draw from the bounding distribution
when plotting. Default is `5000`.
color : str, optional
The color of the points randomly sampled from the bounding
distribution. Default is `'gray'`.
plot_kwargs : dict, optional
Extra keyword arguments used when plotting the bounding draws.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
show_live : bool, optional
Whether the live points at a given iteration (for `it`) or
associated with the bounding (for `idx`) should be highlighted.
Default is `False`. In the dynamic case, only the live points
associated with the batch used to construct the relevant bound
are plotted.
live_color : str, optional
The color of the live points. Default is `'darkviolet'`.
live_kwargs : dict, optional
Extra keyword arguments used when plotting the live points.
span : iterable with shape (2,), optional
A list where each element is a length-2 tuple containing
lower and upper bounds. Default is `None` (no bound).
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the draws onto the provided figure.
Otherwise, by default an internal figure is generated.
Returns
-------
bounding_plot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output plot of the bounding distribution.
"""
# Initialize values.
if plot_kwargs is None:
plot_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if live_kwargs is None:
live_kwargs = dict()
# Check that either `idx` or `it` has been specified.
if (it is None and idx is None) or (it is not None and idx is not None):
raise ValueError("You must specify either an iteration or an index!")
# Set defaults.
plot_kwargs['marker'] = plot_kwargs.get('marker', 'o')
plot_kwargs['linestyle'] = plot_kwargs.get('linestyle', 'None')
plot_kwargs['markersize'] = plot_kwargs.get('markersize', 1)
plot_kwargs['alpha'] = plot_kwargs.get('alpha', 0.4)
live_kwargs['marker'] = live_kwargs.get('marker', 'o')
live_kwargs['linestyle'] = live_kwargs.get('linestyle', 'None')
live_kwargs['markersize'] = live_kwargs.get('markersize', 1)
# Extract bounding distributions.
try:
bounds = results['bound']
except:
raise ValueError("No bounds were saved in the results!")
nsamps = len(results['samples'])
# Gather non-periodic boundary conditions.
if periodic is not None:
nonperiodic = np.ones(bounds[0].n, dtype='bool')
nonperiodic[periodic] = False
else:
nonperiodic = None
if it is not None:
if it >= nsamps:
raise ValueError("The iteration requested goes beyond the "
"number of iterations in the run.")
# Extract bound iterations.
try:
bound_iter = np.array(results['bound_iter'])
except:
raise ValueError("Cannot reconstruct the bound used at the "
"specified iteration since bound "
"iterations were not saved in the results.")
# Find bound at the specified iteration.
if it == 0:
pidx = 0
else:
pidx = bound_iter[it]
else:
if idx >= nsamps:
raise ValueError("The index requested goes beyond the "
"number of samples in the run.")
try:
samples_bound = results['samples_bound']
except:
raise ValueError("Cannot reconstruct the bound used to "
"compute the specified dead point since "
"sample bound indices were not saved "
"in the results.")
# Grab relevant bound.
pidx = samples_bound[idx]
# Get desired bound.
bound = bounds[pidx]
# Do we want to show the live points at the specified iteration?
# If so, we need to rewind our bound to check.
# (We could also go forward; this is an arbitrary choice.)
if show_live:
try:
# We can only reconstruct the run if the final set of live points
# were added to the results. This is true by default for dynamic
# nested sampling runs but not guaranteeed for standard runs.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not included "
"in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
# Run our sampling backwards.
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
# In the dynamic sampling case, we will show the live points used
# during the batch associated with a particular iteration/bound.
batch = results['samples_batch'][it] # select batch
nbatch = results['batch_nlive'][batch] # nlive in the batch
bsel = results['samples_batch'] == batch # select batch
niter_eff = sum(bsel) - nbatch # "effective" iterations in batch
# Grab our final set of live points (with proper IDs).
samples = results['samples_u'][bsel]
samples_id = results['samples_id'][bsel]
samples_id -= min(samples_id) # re-index to start at zero
ndim = samples.shape[1]
live_u = np.empty((nbatch, ndim))
live_u[samples_id[-nbatch:]] = samples[-nbatch:]
# Find generating bound ID if necessary.
if it is None:
it = results['samples_it'][idx]
it_eff = sum(bsel[:it+1]) # effective iteration in batch
# Run our sampling backwards.
for i in range(1, niter_eff - it_eff + 1):
r = -(nbatch + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
# Draw samples from the bounding distribution.
try:
# If bound is "fixed", go ahead and draw samples from it.
psamps = bound.samples(ndraws)
except:
# If bound is based on the distribution of live points at a
# specific iteration, we need to reconstruct what those were.
if not show_live:
try:
# Only reconstruct the run if we haven't done it already.
nlive = results['nlive']
niter = results['niter']
if nsamps - niter != nlive:
raise ValueError("Cannot reconstruct bound because the "
"final set of live points are not "
"included in the results.")
# Grab our final set of live points (with proper IDs).
samples = results['samples_u']
samples_id = results['samples_id']
ndim = samples.shape[1]
live_u = np.empty((nlive, ndim))
live_u[samples_id[-nlive:]] = samples[-nlive:]
# Run our sampling backwards.
if it is None:
it = results['samples_it'][idx]
for i in range(1, niter - it + 1):
r = -(nlive + i)
uidx = samples_id[r]
live_u[uidx] = samples[r]
except:
raise ValueError("Live point tracking currently not "
"implemented for dynamic sampling results.")
# Construct a KDTree to speed up nearest-neighbor searches.
kdtree = spatial.KDTree(live_u)
# Draw samples.
psamps = bound.samples(ndraws, live_u, kdtree=kdtree)
# Projecting samples to input dimensions and possibly
# the native model space.
if prior_transform is None:
x1, x2 = psamps[:, dims].T
if show_live:
l1, l2 = live_u[:, dims].T
else:
# Remove points outside of the unit cube as appropriate.
sel = [unitcheck(point, nonperiodic) for point in psamps]
vsamps = np.array(list(map(prior_transform, psamps[sel])))
x1, x2 = vsamps[:, dims].T
if show_live:
lsamps = np.array(list(map(prior_transform, live_u)))
l1, l2 = lsamps[:, dims].T
# Setting up default plot layout.
if fig is None:
fig, axes = pl.subplots(1, 1, figsize=(6, 6))
else:
fig, axes = fig
try:
axes.plot()
except:
raise ValueError("Provided axes do not match the required shape "
"for plotting samples.")
# Plotting.
axes.plot(x1, x2, color=color, zorder=1, **plot_kwargs)
if show_live:
axes.plot(l1, l2, color=live_color, zorder=2, **live_kwargs)
# Setup axes
if span is not None:
axes.set_xlim(span[0])
axes.set_ylim(span[1])
if max_n_ticks == 0:
axes.xaxis.set_major_locator(NullLocator())
axes.yaxis.set_major_locator(NullLocator())
else:
axes.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
axes.yaxis.set_major_locator(MaxNLocator(max_n_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
axes.xaxis.set_major_formatter(sf)
axes.yaxis.set_major_formatter(sf)
if labels is not None:
axes.set_xlabel(labels[0], **label_kwargs)
axes.set_ylabel(labels[1], **label_kwargs)
else:
axes.set_xlabel(r"$x_{"+str(dims[0]+1)+"}$", **label_kwargs)
axes.set_ylabel(r"$x_{"+str(dims[1]+1)+"}$", **label_kwargs)
return fig, axes | [
"def",
"boundplot",
"(",
"results",
",",
"dims",
",",
"it",
"=",
"None",
",",
"idx",
"=",
"None",
",",
"prior_transform",
"=",
"None",
",",
"periodic",
"=",
"None",
",",
"ndraws",
"=",
"5000",
",",
"color",
"=",
"'gray'",
",",
"plot_kwargs",
"=",
"No... | 40.880259 | 21.029126 |
def name(self, *args):
'''
get/set the descriptive name text of this object.
'''
if len(args):
self.__name = args[0]
else:
return self.__name | [
"def",
"name",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
":",
"self",
".",
"__name",
"=",
"args",
"[",
"0",
"]",
"else",
":",
"return",
"self",
".",
"__name"
] | 24.75 | 19.5 |
def set_value_for_keypath(obj, path, new_value, preserve_child = False):
"""Set attribute value new_value at key path of start object obj.
"""
parts = path.split('.')
last_part = len(parts) - 1
dst = obj
for i, part in enumerate(parts):
match = re.match(list_index_re, part)
if match is not None:
dst = _extract(dst, match.group(1))
if not isinstance(dst, list) and not isinstance(dst, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
if i == last_part:
dst[index] = new_value
else:
dst = dst[index]
else:
if i != last_part:
dst = _extract(dst, part)
else:
if isinstance(dst, dict):
dst[part] = new_value
else:
if not preserve_child:
setattr(dst, part, new_value)
else:
try:
v = getattr(dst, part)
except AttributeError:
setattr(dst, part, new_value) | [
"def",
"set_value_for_keypath",
"(",
"obj",
",",
"path",
",",
"new_value",
",",
"preserve_child",
"=",
"False",
")",
":",
"parts",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"last_part",
"=",
"len",
"(",
"parts",
")",
"-",
"1",
"dst",
"=",
"obj",
"f... | 37.483871 | 11.483871 |
def _get_digraph_char(self, cli):
" Return `False`, or the Digraph symbol to be used. "
if cli.quoted_insert:
return '^'
if cli.vi_state.waiting_for_digraph:
if cli.vi_state.digraph_symbol1:
return cli.vi_state.digraph_symbol1
return '?'
return False | [
"def",
"_get_digraph_char",
"(",
"self",
",",
"cli",
")",
":",
"if",
"cli",
".",
"quoted_insert",
":",
"return",
"'^'",
"if",
"cli",
".",
"vi_state",
".",
"waiting_for_digraph",
":",
"if",
"cli",
".",
"vi_state",
".",
"digraph_symbol1",
":",
"return",
"cli... | 36.222222 | 12.666667 |
def set_banner(self, banner_type, value=None, default=False,
disable=False):
"""Configures system banners
Args:
banner_type(str): banner to be changed (likely login or motd)
value(str): value to set for the banner
default (bool): Controls the use of the default keyword
disable (bool): Controls the use of the no keyword`
Returns:
bool: True if the commands completed successfully otherwise False
"""
command_string = "banner %s" % banner_type
if default is True or disable is True:
cmd = self.command_builder(command_string, value=None,
default=default, disable=disable)
return self.configure(cmd)
else:
if not value.endswith("\n"):
value = value + "\n"
command_input = dict(cmd=command_string, input=value)
return self.configure([command_input]) | [
"def",
"set_banner",
"(",
"self",
",",
"banner_type",
",",
"value",
"=",
"None",
",",
"default",
"=",
"False",
",",
"disable",
"=",
"False",
")",
":",
"command_string",
"=",
"\"banner %s\"",
"%",
"banner_type",
"if",
"default",
"is",
"True",
"or",
"disable... | 40.708333 | 19.583333 |
def set_top_bar_color(self, index):
"""Set the color of the upper frame to the background color of the reftrack status
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.ForegroundRole
c = index.model().index(index.row(), 8, index.parent()).data(dr)
if not c:
c = self.upper_fr_default_bg_color
self.upper_fr.setStyleSheet('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue())) | [
"def",
"set_top_bar_color",
"(",
"self",
",",
"index",
")",
":",
"dr",
"=",
"QtCore",
".",
"Qt",
".",
"ForegroundRole",
"c",
"=",
"index",
".",
"model",
"(",
")",
".",
"index",
"(",
"index",
".",
"row",
"(",
")",
",",
"8",
",",
"index",
".",
"par... | 39.571429 | 17.642857 |
def expand_on(self, col1, col2, rename1 = None, rename2 = None, drop = [], drop_collections = False):
"""
Returns a reshaped version of extractor's data, where unique combinations of values from col1 and col2
are given individual rows.
Example function call from ``tidymbox``:
.. code-block:: python
self.expand_on('From', 'To', ['MessageID', 'Recipient'], rename1='From', rename2='Recipient')
Columns to be expanded upon should be either atomic values or dictionaries of dictionaries. For example:
Input Data:
+-----------------+-------------------------------------------------------------------+
| col1 (Atomic) | col2 (Dict of Dict) |
+=================+===================================================================+
| value1 | {valueA : {attr1: X1, attr2: Y1}, valueB: {attr1: X2, attr2: Y2} |
+-----------------+-------------------------------------------------------------------+
| value2 | {valueC : {attr1: X3, attr2: Y3}, valueD: {attr1: X4, attr2: Y4} |
+-----------------+-------------------------------------------------------------------+
Output Data:
+---------------+---------------+-------+-------+
| col1_extended | col2_extended | attr1 | attr2 |
+===============+===============+=======+=======+
| value1 | valueA | X1 | Y1 |
+---------------+---------------+-------+-------+
| value1 | valueB | X2 | Y2 |
+---------------+---------------+-------+-------+
| value2 | valueA | X3 | Y3 |
+---------------+---------------+-------+-------+
| value2 | valueB | X4 | Y4 |
+---------------+---------------+-------+-------+
:param str col1: The first column to expand on. May be an atomic value, or a dict of dict.
:param str col2: The second column to expand on. May be an atomic value, or a dict of dict.
:param str rename1: The name for col1 after expansion. Defaults to col1_extended.
:param str rename2: The name for col2 after expansion. Defaults to col2_extended.
:param list drop: Column names to be dropped from output.
:param bool drop_collections: Should columns with compound values be dropped?
:return: pandas.DataFrame
"""
# Assumption 1: Expanded columns are either atomic are built in collections
# Assumption 2: New test_data columns added to rows from dicts in columns of collections.
# How many rows expected in the output?
count = len(self._data)
# How often should the progress bar be updated?
update_interval = max(min(count//100, 100), 5)
# What are the column names?
column_list = list(self._data.columns)
# Determine column index (for itertuples)
try:
col1_index = column_list.index(col1)
except ValueError:
warnings.warn('Could not find "{}" in columns.'.format(col1))
raise
try:
col2_index = column_list.index(col2)
except ValueError:
warnings.warn('Could not find "{}" in columns.'.format(col2))
raise
# Standardize the order of the specified columns
first_index = min(col1_index, col2_index)
second_index = max(col1_index, col2_index)
first_name = column_list[first_index]
second_name = column_list[second_index]
first_rename = rename1 if first_index == col1_index else rename2
second_rename = rename2 if first_index == col1_index else rename1
# New column names:
new_column_list = column_list[:first_index] + \
[first_name+'_extended' if first_rename is None else first_rename] + \
column_list[first_index+1:second_index] + \
[second_name+'_extended' if second_rename is None else second_rename] + \
column_list[second_index+1:]
# Assert that there are no duplicates!
if len(set(new_column_list)) != len(new_column_list):
raise Exception('Duplicate columns names found. Note that you cannot rename a column with a name '
'that is already taken by another column.')
# List of tuples. Rows in new test_data frame.
old_attr_df_tuples = []
new_attr_df_dicts = []
# MultiIndex tuples
index_tuples = []
def iter_product(item1,item2):
"""
Enumerates possible combinations of items from item1 and item 2. Allows atomic values.
:param item1: Any
:param item2: Any
:return: A list of tuples.
"""
if hasattr(item1, '__iter__') and type(item1) != str:
iter1 = item1
else:
iter1 = [item1]
if hasattr(item2, '__iter__') and type(item2) != str:
iter2 = item2
else:
iter2 = [item2]
return it.product(iter1,iter2)
# Create test_data for output.
with tqdm.tqdm(total=count) as pbar:
for row in self._data.itertuples(index=False):
# Enumerate commit/file pairs
for index in iter_product(row[first_index],row[second_index]):
new_row = row[:first_index] + \
(index[0],) + \
row[first_index+1:second_index] + \
(index[1],) + \
row[second_index+1:]
# Add new row to list of row tuples
old_attr_df_tuples.append(new_row)
# Add key tuple to list of indices
index_tuples.append((index[0],index[1]))
# If there's test_data in either of the columns add the test_data to the new attr test_data frame.
temp_attrs = {}
# Get a copy of the first cell value for this index.
# If it's a dict, get the appropriate entry.
temp_first = row[first_index]
if type(temp_first) == dict:
temp_first = temp_first[index[0]]
temp_second = row[second_index]
if type(temp_second) == dict:
temp_second = temp_second[index[1]]
# Get nested test_data for this index.
if type(temp_first) == dict:
for k in temp_first:
temp_attrs[first_name + '/' + k] = temp_first[k]
if type(temp_second) == dict:
for k in temp_second:
temp_attrs[second_name + '/' + k] = temp_second[k]
# Add to the "new test_data" records.
new_attr_df_dicts.append(temp_attrs)
# Update progress bar
pbar.update(update_interval)
# An expanded test_data frame with only the columns of the original test_data frame
df_1 = pd.DataFrame.from_records(old_attr_df_tuples,
columns=new_column_list)
# An expanded test_data frame containing any test_data held in value:key collections in the expanded cols
df_2 = pd.DataFrame.from_records(new_attr_df_dicts)
# The final expanded test_data set
df_out = pd.concat([df_1, df_2], axis=1)
# Set new index
# index_cols has been depracated
# df_out = df_out.set_index(index_cols)
# Drop unwanted columns
for col in drop:
if col in df_out.columns:
df_out = df_out.drop(col,1)
if drop_collections is True:
df_out = self._drop_collections(df_out)
return df_out | [
"def",
"expand_on",
"(",
"self",
",",
"col1",
",",
"col2",
",",
"rename1",
"=",
"None",
",",
"rename2",
"=",
"None",
",",
"drop",
"=",
"[",
"]",
",",
"drop_collections",
"=",
"False",
")",
":",
"# Assumption 1: Expanded columns are either atomic are built in col... | 43.467033 | 25.313187 |
def shared_blockchain_instance(self):
""" This method will initialize ``SharedInstance.instance`` and return it.
The purpose of this method is to have offer single default
instance that can be reused by multiple classes.
"""
if not self._sharedInstance.instance:
klass = self.get_instance_class()
self._sharedInstance.instance = klass(**self._sharedInstance.config)
return self._sharedInstance.instance | [
"def",
"shared_blockchain_instance",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_sharedInstance",
".",
"instance",
":",
"klass",
"=",
"self",
".",
"get_instance_class",
"(",
")",
"self",
".",
"_sharedInstance",
".",
"instance",
"=",
"klass",
"(",
"*",... | 52.666667 | 11.888889 |
def _invoke(task, args):
'''Invoke a task with the appropriate args; return the remaining args.'''
kwargs = task.defaults.copy()
if task.kwargs:
temp_kwargs, args = getopt.getopt(args, '', task.kwargs)
temp_kwargs = _opts_to_dict(*temp_kwargs)
kwargs.update(temp_kwargs)
if task.args:
for arg in task.args:
if not len(args):
abort(LOCALE['error_wrong_args'], task, len(task.args))
kwargs.update({arg: args[0]})
args = args[1:]
if task.consume:
task(*args, **kwargs)
return []
else:
task(**kwargs)
return args | [
"def",
"_invoke",
"(",
"task",
",",
"args",
")",
":",
"kwargs",
"=",
"task",
".",
"defaults",
".",
"copy",
"(",
")",
"if",
"task",
".",
"kwargs",
":",
"temp_kwargs",
",",
"args",
"=",
"getopt",
".",
"getopt",
"(",
"args",
",",
"''",
",",
"task",
... | 25 | 22.047619 |
def get(self, file_id: str) -> [typing.BinaryIO, str, datetime.datetime]:
"""Return the file identified by a file_id string, its file name and upload date."""
raise NotImplementedError("Downloading files for downloading files in FileStore has not been implemented yet.") | [
"def",
"get",
"(",
"self",
",",
"file_id",
":",
"str",
")",
"->",
"[",
"typing",
".",
"BinaryIO",
",",
"str",
",",
"datetime",
".",
"datetime",
"]",
":",
"raise",
"NotImplementedError",
"(",
"\"Downloading files for downloading files in FileStore has not been implem... | 94.666667 | 37.333333 |
def visitor(arg_type):
"""Decorator that creates a visitor method."""
def decorator(fn):
declaring_class = _declaring_class(fn)
_methods[(declaring_class, arg_type)] = fn
# Replace all decorated methods with _visitor_impl
return _visitor_impl
return decorator | [
"def",
"visitor",
"(",
"arg_type",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"declaring_class",
"=",
"_declaring_class",
"(",
"fn",
")",
"_methods",
"[",
"(",
"declaring_class",
",",
"arg_type",
")",
"]",
"=",
"fn",
"# Replace all decorated methods wi... | 26.909091 | 20.181818 |
def run(self, ket: State = None) -> State:
"""Compiles and runs a program. The optional program argument
supplies the initial state and memory. Else qubits and classical
bits start from zero states.
"""
if ket is None:
qubits = self.qubits
ket = zero_state(qubits)
ket = self._initilize(ket)
pc = 0
while pc >= 0 and pc < len(self):
instr = self.instructions[pc]
ket = ket.update({PC: pc + 1})
ket = instr.run(ket)
pc = ket.memory[PC]
return ket | [
"def",
"run",
"(",
"self",
",",
"ket",
":",
"State",
"=",
"None",
")",
"->",
"State",
":",
"if",
"ket",
"is",
"None",
":",
"qubits",
"=",
"self",
".",
"qubits",
"ket",
"=",
"zero_state",
"(",
"qubits",
")",
"ket",
"=",
"self",
".",
"_initilize",
... | 30.210526 | 13.789474 |
def _AtNonLeaf(self, attr_value, path):
"""Makes dictionaries expandable when dealing with plists."""
if isinstance(attr_value, dict):
for value in self.Expand(attr_value, path[1:]):
yield value
else:
for v in objectfilter.ValueExpander._AtNonLeaf(self, attr_value, path):
yield v | [
"def",
"_AtNonLeaf",
"(",
"self",
",",
"attr_value",
",",
"path",
")",
":",
"if",
"isinstance",
"(",
"attr_value",
",",
"dict",
")",
":",
"for",
"value",
"in",
"self",
".",
"Expand",
"(",
"attr_value",
",",
"path",
"[",
"1",
":",
"]",
")",
":",
"yi... | 39.125 | 16.5 |
def _check_cmdline(data):
'''
In some cases where there are an insane number of processes being created
on a system a PID can get recycled or assigned to a non-Salt process.
On Linux this fn checks to make sure the PID we are checking on is actually
a Salt process.
For non-Linux systems we punt and just return True
'''
if not salt.utils.platform.is_linux():
return True
pid = data.get('pid')
if not pid:
return False
if not os.path.isdir('/proc'):
return True
path = os.path.join('/proc/{0}/cmdline'.format(pid))
if not os.path.isfile(path):
return False
try:
with salt.utils.files.fopen(path, 'rb') as fp_:
if b'salt' in fp_.read():
return True
except (OSError, IOError):
return False | [
"def",
"_check_cmdline",
"(",
"data",
")",
":",
"if",
"not",
"salt",
".",
"utils",
".",
"platform",
".",
"is_linux",
"(",
")",
":",
"return",
"True",
"pid",
"=",
"data",
".",
"get",
"(",
"'pid'",
")",
"if",
"not",
"pid",
":",
"return",
"False",
"if... | 32 | 20.48 |
def easeInOutCirc(n):
"""A circular tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
"""
_checkRange(n)
n = n * 2
if n < 1:
return -0.5 * (math.sqrt(1 - n**2) - 1)
else:
n = n - 2
return 0.5 * (math.sqrt(1 - n**2) + 1) | [
"def",
"easeInOutCirc",
"(",
"n",
")",
":",
"_checkRange",
"(",
"n",
")",
"n",
"=",
"n",
"*",
"2",
"if",
"n",
"<",
"1",
":",
"return",
"-",
"0.5",
"*",
"(",
"math",
".",
"sqrt",
"(",
"1",
"-",
"n",
"**",
"2",
")",
"-",
"1",
")",
"else",
"... | 30.3125 | 25 |
def check_entitlement(doi):
"""Check whether IP and credentials enable access to content for a doi.
This function uses the entitlement endpoint of the Elsevier API to check
whether an article is available to a given institution. Note that this
feature of the API is itself not available for all institution keys.
"""
if doi.lower().startswith('doi:'):
doi = doi[4:]
url = '%s/%s' % (elsevier_entitlement_url, doi)
params = {'httpAccept': 'text/xml'}
res = requests.get(url, params, headers=ELSEVIER_KEYS)
if not res.status_code == 200:
logger.error('Could not check entitlements for article %s: '
'status code %d' % (doi, res.status_code))
logger.error('Response content: %s' % res.text)
return False
return True | [
"def",
"check_entitlement",
"(",
"doi",
")",
":",
"if",
"doi",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'doi:'",
")",
":",
"doi",
"=",
"doi",
"[",
"4",
":",
"]",
"url",
"=",
"'%s/%s'",
"%",
"(",
"elsevier_entitlement_url",
",",
"doi",
")",
... | 44.055556 | 17.944444 |
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | [
"def",
"winsorize",
"(",
"row",
",",
"min_percentile",
",",
"max_percentile",
")",
":",
"a",
"=",
"row",
".",
"copy",
"(",
")",
"nan_count",
"=",
"isnan",
"(",
"row",
")",
".",
"sum",
"(",
")",
"nonnan_count",
"=",
"a",
".",
"size",
"-",
"nan_count",... | 35.678571 | 20.964286 |
def read_frame(self):
"""
Read an AMQP frame.
"""
frame_type, channel, size = unpack('>BHI', self._read(7))
payload = self._read(size)
ch = ord(self._read(1))
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ch) | [
"def",
"read_frame",
"(",
"self",
")",
":",
"frame_type",
",",
"channel",
",",
"size",
"=",
"unpack",
"(",
"'>BHI'",
",",
"self",
".",
"_read",
"(",
"7",
")",
")",
"payload",
"=",
"self",
".",
"_read",
"(",
"size",
")",
"ch",
"=",
"ord",
"(",
"se... | 31.416667 | 16.916667 |
def data_to_binary(self):
"""
:return: bytes
"""
return bytes([
COMMAND_CODE,
self._wday,
self._hour,
self._min
]) | [
"def",
"data_to_binary",
"(",
"self",
")",
":",
"return",
"bytes",
"(",
"[",
"COMMAND_CODE",
",",
"self",
".",
"_wday",
",",
"self",
".",
"_hour",
",",
"self",
".",
"_min",
"]",
")"
] | 19.3 | 14.9 |
def sanitize_cloud(cloud: str) -> str:
"""
Fix rare cloud layer issues
"""
if len(cloud) < 4:
return cloud
if not cloud[3].isdigit() and cloud[3] != '/':
if cloud[3] == 'O':
cloud = cloud[:3] + '0' + cloud[4:] # Bad "O": FEWO03 -> FEW003
else: # Move modifiers to end: BKNC015 -> BKN015C
cloud = cloud[:3] + cloud[4:] + cloud[3]
return cloud | [
"def",
"sanitize_cloud",
"(",
"cloud",
":",
"str",
")",
"->",
"str",
":",
"if",
"len",
"(",
"cloud",
")",
"<",
"4",
":",
"return",
"cloud",
"if",
"not",
"cloud",
"[",
"3",
"]",
".",
"isdigit",
"(",
")",
"and",
"cloud",
"[",
"3",
"]",
"!=",
"'/'... | 33.666667 | 13.5 |
def lis_to_bio_map(folder):
"""
Senators have a lis_id that is used in some places. That's dumb. Build a
dict from lis_id to bioguide_id which every member of congress has.
"""
logger.info("Opening legislator csv for lis_dct creation")
lis_dic = {}
leg_path = "{0}/legislators.csv".format(folder)
logger.info(leg_path)
with open(leg_path, 'r') as csvfile:
leg_reader = csv.reader(csvfile)
for row in leg_reader:
if row[22]:
lis_dic[row[22]] = row[19]
return lis_dic | [
"def",
"lis_to_bio_map",
"(",
"folder",
")",
":",
"logger",
".",
"info",
"(",
"\"Opening legislator csv for lis_dct creation\"",
")",
"lis_dic",
"=",
"{",
"}",
"leg_path",
"=",
"\"{0}/legislators.csv\"",
".",
"format",
"(",
"folder",
")",
"logger",
".",
"info",
... | 35.666667 | 13.533333 |
def intersect_3(self, second, third):
"""
Intersection routine for three inputs. Built out of the intersect,
coalesce and play routines
"""
self.intersection(second)
self.intersection(third)
self.coalesce()
return len(self) | [
"def",
"intersect_3",
"(",
"self",
",",
"second",
",",
"third",
")",
":",
"self",
".",
"intersection",
"(",
"second",
")",
"self",
".",
"intersection",
"(",
"third",
")",
"self",
".",
"coalesce",
"(",
")",
"return",
"len",
"(",
"self",
")"
] | 27.555556 | 12 |
def _evaluate_objective_multiple(objective_function, arg_batch,
batch_evaluate_objective):
"""Evaluates the objective function on a batch of points.
If `batch_evaluate_objective` is True, returns
`objective function(arg_batch)` else it maps the `objective_function`
across the `arg_batch`.
Args:
objective_function: A Python callable that accepts a single `Tensor` of
rank 'R > 1' and any shape 's' and returns a scalar `Tensor` of real dtype
containing the value of the function at that point. If
`batch a `Tensor` of shape `[batch_size] + s ` where `batch_size` is the
size of the batch of args. In this case, the expected return value is a
`Tensor` of shape `[batch_size]`.
arg_batch: A `Tensor` of real dtype. The batch of arguments at which to
evaluate the `objective_function`. If `batch_evaluate_objective` is False,
`arg_batch` will be unpacked along the zeroth axis and the
`objective_function` will be applied to each element.
batch_evaluate_objective: `bool`. Whether the `objective_function` can
evaluate a batch of arguments at once.
Returns:
A tuple containing:
objective_values: A `Tensor` of real dtype and shape `[batch_size]`.
The value of the objective function evaluated at the supplied
`arg_batch`.
num_evaluations: An `int32` scalar `Tensor`containing the number of
points on which the objective function was evaluated (i.e `batch_size`).
"""
n_points = tf.shape(input=arg_batch)[0]
if batch_evaluate_objective:
return objective_function(arg_batch), n_points
return tf.map_fn(objective_function, arg_batch), n_points | [
"def",
"_evaluate_objective_multiple",
"(",
"objective_function",
",",
"arg_batch",
",",
"batch_evaluate_objective",
")",
":",
"n_points",
"=",
"tf",
".",
"shape",
"(",
"input",
"=",
"arg_batch",
")",
"[",
"0",
"]",
"if",
"batch_evaluate_objective",
":",
"return",... | 49.147059 | 24.088235 |
def json2lte(self, filename):
""" convert json to lte
return tuple of json, lte file content
"""
data_json = open(filename, 'r').read().strip()
latins = lattice.Lattice(data_json)
self.lattice_instance = latins
self.all_beamlines = latins.getAllBl()
if self.use_beamline is None:
self.use_beamline = 'BL' if 'BL' in self.all_beamlines else self.all_beamlines[
0]
bl_ele_list = [latins.getFullBeamline(k, True)
for k in self.all_beamlines]
self.beamlines_dict = dict(zip(self.all_beamlines, bl_ele_list))
data_lte = latins.generateLatticeFile(self.use_beamline, 'sio')
return data_json, data_lte | [
"def",
"json2lte",
"(",
"self",
",",
"filename",
")",
":",
"data_json",
"=",
"open",
"(",
"filename",
",",
"'r'",
")",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"latins",
"=",
"lattice",
".",
"Lattice",
"(",
"data_json",
")",
"self",
".",
"lat... | 38.421053 | 17.684211 |
def update_items(portal_type=None, uid=None, endpoint=None, **kw):
""" update items
1. If the uid is given, the user wants to update the object with the data
given in request body
2. If no uid is given, the user wants to update a bunch of objects.
-> each record contains either an UID, path or parent_path + id
"""
# disable CSRF
req.disable_csrf_protection()
# the data to update
records = req.get_request_data()
# we have an uid -> try to get an object for it
obj = get_object_by_uid(uid)
if obj:
record = records[0] # ignore other records if we got an uid
obj = update_object_with_data(obj, record)
return make_items_for([obj], endpoint=endpoint)
# no uid -> go through the record items
results = []
for record in records:
obj = get_object_by_record(record)
# no object found for this record
if obj is None:
continue
# update the object with the given record data
obj = update_object_with_data(obj, record)
results.append(obj)
if not results:
fail(400, "No Objects could be updated")
return make_items_for(results, endpoint=endpoint) | [
"def",
"update_items",
"(",
"portal_type",
"=",
"None",
",",
"uid",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"# disable CSRF",
"req",
".",
"disable_csrf_protection",
"(",
")",
"# the data to update",
"records",
"=",
"req",
... | 30.230769 | 20.666667 |
def decode_and_evaluate(self,
checkpoint: Optional[int] = None,
output_name: str = os.devnull) -> Dict[str, float]:
"""
Decodes data set and evaluates given a checkpoint.
:param checkpoint: Checkpoint to load parameters from.
:param output_name: Filename to write translations to. Defaults to /dev/null.
:return: Mapping of metric names to scores.
"""
models, vocab_target = inference_image.load_models(context=self.context,
max_input_len=self.max_input_len,
beam_size=self.beam_size,
batch_size=self.batch_size,
model_folders=[self.model],
checkpoints=[checkpoint],
softmax_temperature=self.softmax_temperature,
max_output_length_num_stds=self.max_output_length_num_stds,
source_image_size=tuple(self.source_image_size),
forced_max_output_len=self.max_output_length)
translator = inference_image.ImageCaptioner(context=self.context,
ensemble_mode=self.ensemble_mode,
bucket_source_width=0,
length_penalty=inference.LengthPenalty(
self.length_penalty_alpha,
self.length_penalty_beta),
brevity_penalty=inference.BrevityPenalty(
weight=0.0),
beam_prune=0.0,
beam_search_stop='all',
models=models,
source_vocabs=None,
target_vocab=vocab_target,
restrict_lexicon=None,
store_beam=False,
source_image_size=tuple(
self.source_image_size),
source_root=self.image_root,
use_feature_loader=self.use_feature_loader)
trans_wall_time = 0.0
translations = []
with data_io.smart_open(output_name, 'w') as output:
handler = output_handler.StringOutputHandler(output)
tic = time.time()
trans_inputs = [] # type: List[inference.TranslatorInput]
for i, inputs in enumerate(self.inputs_sentences):
trans_inputs.append(
inference.make_input_from_multiple_strings(i, inputs))
trans_outputs = translator.translate(trans_inputs)
trans_wall_time = time.time() - tic
for trans_input, trans_output in zip(trans_inputs, trans_outputs):
handler.handle(trans_input, trans_output)
translations.append(trans_output.translation)
avg_time = trans_wall_time / len(self.target_sentences)
# TODO(fhieber): eventually add more metrics (METEOR etc.)
return {C.BLEU_VAL: evaluate.raw_corpus_bleu(hypotheses=translations,
references=self.target_sentences,
offset=0.01),
C.CHRF_VAL: evaluate.raw_corpus_chrf(hypotheses=translations,
references=self.target_sentences),
C.AVG_TIME: avg_time,
C.DECODING_TIME: trans_wall_time} | [
"def",
"decode_and_evaluate",
"(",
"self",
",",
"checkpoint",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"output_name",
":",
"str",
"=",
"os",
".",
"devnull",
")",
"->",
"Dict",
"[",
"str",
",",
"float",
"]",
":",
"models",
",",
"vocab_target"... | 66.140625 | 32.515625 |
def delete_association(self, target, api_type=None, api_sub_type=None, unique_id=None):
"""
Deletes a association from a Indicator/Group/Victim
Args:
target:
api_type:
api_sub_type:
unique_id:
Returns:
"""
api_type = api_type or target.api_type
api_sub_type = api_sub_type or target.api_sub_type
unique_id = unique_id or target.unique_id
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if not target.can_update():
self._tcex.handle_error(910, [target.type])
return self.tc_requests.delete_association(
self.api_type,
self.api_sub_type,
self.unique_id,
api_type,
api_sub_type,
unique_id,
owner=self.owner,
) | [
"def",
"delete_association",
"(",
"self",
",",
"target",
",",
"api_type",
"=",
"None",
",",
"api_sub_type",
"=",
"None",
",",
"unique_id",
"=",
"None",
")",
":",
"api_type",
"=",
"api_type",
"or",
"target",
".",
"api_type",
"api_sub_type",
"=",
"api_sub_type... | 27.516129 | 19.516129 |
def nfa_determinization(nfa: dict) -> dict:
""" Returns a DFA that reads the same language of the input NFA.
Let A be an NFA, then there exists a DFA :math:`A_d` such
that :math:`L(A_d) = L(A)`. Intuitively, :math:`A_d`
collapses all possible runs of A on a given input word into
one run over a larger state set.
:math:`A_d` is defined as:
:math:`A_d = (Σ, 2^S , s_0 , ρ_d , F_d )`
where:
• :math:`2^S` , i.e., the state set of :math:`A_d` , consists
of all sets of states S in A;
• :math:`s_0 = S^0` , i.e., the single initial state of
:math:`A_d` is the set :math:`S_0` of initial states of A;
• :math:`F_d = \{Q | Q ∩ F ≠ ∅\}`, i.e., the collection of
sets of states that intersect F nontrivially;
• :math:`ρ_d(Q, a) = \{s' | (s,a, s' ) ∈ ρ\ for\ some\ s ∈ Q\}`.
:param dict nfa: input NFA.
:return: *(dict)* representing a DFA
"""
def state_name(s):
return str(set(sorted(s)))
dfa = {
'alphabet': nfa['alphabet'].copy(),
'initial_state': None,
'states': set(),
'accepting_states': set(),
'transitions': dict()
}
if len(nfa['initial_states']) > 0:
dfa['initial_state'] = state_name(nfa['initial_states'])
dfa['states'].add(state_name(nfa['initial_states']))
sets_states = list()
sets_queue = list()
sets_queue.append(nfa['initial_states'])
sets_states.append(nfa['initial_states'])
if len(sets_states[0].intersection(nfa['accepting_states'])) > 0:
dfa['accepting_states'].add(state_name(sets_states[0]))
while sets_queue:
current_set = sets_queue.pop(0)
for a in dfa['alphabet']:
next_set = set()
for state in current_set:
if (state, a) in nfa['transitions']:
for next_state in nfa['transitions'][state, a]:
next_set.add(next_state)
if len(next_set) == 0:
continue
if next_set not in sets_states:
sets_states.append(next_set)
sets_queue.append(next_set)
dfa['states'].add(state_name(next_set))
if next_set.intersection(nfa['accepting_states']):
dfa['accepting_states'].add(state_name(next_set))
dfa['transitions'][state_name(current_set), a] = state_name(next_set)
return dfa | [
"def",
"nfa_determinization",
"(",
"nfa",
":",
"dict",
")",
"->",
"dict",
":",
"def",
"state_name",
"(",
"s",
")",
":",
"return",
"str",
"(",
"set",
"(",
"sorted",
"(",
"s",
")",
")",
")",
"dfa",
"=",
"{",
"'alphabet'",
":",
"nfa",
"[",
"'alphabet'... | 35.666667 | 18.590909 |
def VersionPath():
"""Returns a path to version.ini."""
# Try to get a version.ini. It should be in the resources if the code
# was packed with "pip sdist". It will be 2 levels up from grr_response_core
# if the code was installed via "pip install -e".
version_ini = (
package.ResourcePath("grr-response-core", "version.ini") or
package.ResourcePath("grr-response-core", "../../version.ini"))
if not os.path.exists(version_ini):
raise RuntimeError("Can't find version.ini at %s" % version_ini)
return version_ini | [
"def",
"VersionPath",
"(",
")",
":",
"# Try to get a version.ini. It should be in the resources if the code",
"# was packed with \"pip sdist\". It will be 2 levels up from grr_response_core",
"# if the code was installed via \"pip install -e\".",
"version_ini",
"=",
"(",
"package",
".",
"R... | 38 | 25 |
def build_vec(self):
"""build call validity vector for each device"""
for item in all_calls:
self.__dict__[item] = []
for dev in self.devices:
for item in all_calls:
if self.system.__dict__[dev].n == 0:
val = False
else:
val = self.system.__dict__[dev].calls.get(item, False)
self.__dict__[item].append(val) | [
"def",
"build_vec",
"(",
"self",
")",
":",
"for",
"item",
"in",
"all_calls",
":",
"self",
".",
"__dict__",
"[",
"item",
"]",
"=",
"[",
"]",
"for",
"dev",
"in",
"self",
".",
"devices",
":",
"for",
"item",
"in",
"all_calls",
":",
"if",
"self",
".",
... | 36.083333 | 14.083333 |
def add_interval(self, precision=0):
""" Adds an interval to :prop:intervals
-> #str formatted time
"""
precision = precision or self.precision
interval = round((self._stop - self._start), precision)
self.intervals.append(interval)
self._intervals_len += 1
self._start = time.perf_counter()
return self.format_time(interval) | [
"def",
"add_interval",
"(",
"self",
",",
"precision",
"=",
"0",
")",
":",
"precision",
"=",
"precision",
"or",
"self",
".",
"precision",
"interval",
"=",
"round",
"(",
"(",
"self",
".",
"_stop",
"-",
"self",
".",
"_start",
")",
",",
"precision",
")",
... | 39.1 | 5.1 |
def synchronized(wrapped):
""" Synchronization decorator. """
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
self = args[0]
with self._lock:
return wrapped(*args, **kwargs)
return wrapper | [
"def",
"synchronized",
"(",
"wrapped",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"wrapped",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
"=",
"args",
"[",
"0",
"]",
"with",
"self",
".",
"_lock",
":",
"r... | 23.3 | 17.1 |
def _prime_group_perm_caches(self):
"""
Prime the group cache and put them on the ``self.group``.
In addition add a cache filled flag on ``self.group``.
"""
perm_cache = self._get_group_cached_perms()
self.group._authority_perm_cache = perm_cache
self.group._authority_perm_cache_filled = True | [
"def",
"_prime_group_perm_caches",
"(",
"self",
")",
":",
"perm_cache",
"=",
"self",
".",
"_get_group_cached_perms",
"(",
")",
"self",
".",
"group",
".",
"_authority_perm_cache",
"=",
"perm_cache",
"self",
".",
"group",
".",
"_authority_perm_cache_filled",
"=",
"T... | 42.75 | 11.25 |
def strlimit (s, length=72):
"""If the length of the string exceeds the given limit, it will be cut
off and three dots will be appended.
@param s: the string to limit
@type s: string
@param length: maximum length
@type length: non-negative integer
@return: limited string, at most length+3 characters long
"""
assert length >= 0, "length limit must be a non-negative integer"
if not s or len(s) <= length:
return s
if length == 0:
return ""
return "%s..." % s[:length] | [
"def",
"strlimit",
"(",
"s",
",",
"length",
"=",
"72",
")",
":",
"assert",
"length",
">=",
"0",
",",
"\"length limit must be a non-negative integer\"",
"if",
"not",
"s",
"or",
"len",
"(",
"s",
")",
"<=",
"length",
":",
"return",
"s",
"if",
"length",
"=="... | 32.375 | 13.9375 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.