code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def load_tmp_dh(self, dhfile):
"""
Load parameters for Ephemeral Diffie-Hellman
:param dhfile: The file to load EDH parameters from (``bytes`` or
``unicode``).
:return: None
"""
dhfile = _path_string(dhfile)
bio = _lib.BIO_new_file(dhfile, b"r")
if bio == _ffi.NULL:
_raise_current_error()
bio = _ffi.gc(bio, _lib.BIO_free)
dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)
dh = _ffi.gc(dh, _lib.DH_free)
_lib.SSL_CTX_set_tmp_dh(self._context, dh)
|
Load parameters for Ephemeral Diffie-Hellman
:param dhfile: The file to load EDH parameters from (``bytes`` or
``unicode``).
:return: None
|
def get_impact_report_as_string(analysis_dir):
"""Retrieve an html string of table report (impact-report-output.html).
:param analysis_dir: Directory of where the report located.
:type analysis_dir: str
:return: HTML string of the report.
:rtype: str
"""
html_report_products = [
'impact-report-output.html',
'multi-exposure-impact-report-output.html']
output_dir_path = join(analysis_dir, 'output')
for html_report_product in html_report_products:
table_report_path = join(output_dir_path, html_report_product)
if exists(table_report_path):
break
table_report_path = None
if not table_report_path:
return None
# We can display an impact report.
# We need to open the file in UTF-8, the HTML may have some accents
with open(table_report_path, 'r', encoding='utf-8') as table_report_file:
report = table_report_file.read()
return report
|
Retrieve an html string of table report (impact-report-output.html).
:param analysis_dir: Directory of where the report located.
:type analysis_dir: str
:return: HTML string of the report.
:rtype: str
|
def result(self):
"""
Construye la expresion
"""
field = re.sub(REGEX_CLEANER, '', self.field_name)
if self.alias:
alias = re.sub(REGEX_CLEANER, '', self.alias)
return "%s AS %s" % (field, alias)
else:
return field
|
Construye la expresion
|
def get_issue_labels(self, issue_key):
"""
Get issue labels.
:param issue_key:
:return:
"""
url = 'rest/api/2/issue/{issue_key}?fields=labels'.format(issue_key=issue_key)
return (self.get(url) or {}).get('fields').get('labels')
|
Get issue labels.
:param issue_key:
:return:
|
def convert(self, mode):
"""Convert the current image to the given *mode*. See :class:`Image`
for a list of available modes.
"""
if mode == self.mode:
return
if mode not in ["L", "LA", "RGB", "RGBA",
"YCbCr", "YCbCrA", "P", "PA"]:
raise ValueError("Mode %s not recognized." % (mode))
if self.is_empty():
self.mode = mode
return
if mode == self.mode + "A":
self.channels.append(np.ma.ones(self.channels[0].shape))
if self.fill_value is not None:
self.fill_value += [1]
self.mode = mode
elif mode + "A" == self.mode:
self.channels = self.channels[:-1]
if self.fill_value is not None:
self.fill_value = self.fill_value[:-1]
self.mode = mode
elif mode.endswith("A") and not self.mode.endswith("A"):
self.convert(self.mode + "A")
self.convert(mode)
elif self.mode.endswith("A") and not mode.endswith("A"):
self.convert(self.mode[:-1])
self.convert(mode)
else:
cases = {
"RGB": {"YCbCr": self._rgb2ycbcr,
"L": self._rgb2l,
"P": self._to_p},
"RGBA": {"YCbCrA": self._rgb2ycbcr,
"LA": self._rgb2l,
"PA": self._to_p},
"YCbCr": {"RGB": self._ycbcr2rgb,
"L": self._ycbcr2l,
"P": self._to_p},
"YCbCrA": {"RGBA": self._ycbcr2rgb,
"LA": self._ycbcr2l,
"PA": self._to_p},
"L": {"RGB": self._l2rgb,
"YCbCr": self._l2ycbcr,
"P": self._to_p},
"LA": {"RGBA": self._l2rgb,
"YCbCrA": self._l2ycbcr,
"PA": self._to_p},
"P": {"RGB": self._from_p,
"YCbCr": self._from_p,
"L": self._from_p},
"PA": {"RGBA": self._from_p,
"YCbCrA": self._from_p,
"LA": self._from_p}}
try:
cases[self.mode][mode](mode)
except KeyError:
raise ValueError("Conversion from %s to %s not implemented !"
% (self.mode, mode))
|
Convert the current image to the given *mode*. See :class:`Image`
for a list of available modes.
|
def strip_figures(figure):
"""
Strips a figure into multiple figures with a trace on each of them
Parameters:
-----------
figure : Figure
Plotly Figure
"""
fig=[]
for trace in figure['data']:
fig.append(dict(data=[trace],layout=figure['layout']))
return fig
|
Strips a figure into multiple figures with a trace on each of them
Parameters:
-----------
figure : Figure
Plotly Figure
|
def sliding(self, size, step=1):
"""
Groups elements in fixed size blocks by passing a sliding window over them.
The last window has at least one element but may have less than size elements
:param size: size of sliding window
:param step: step size between windows
:return: sequence of sliding windows
"""
return self._transform(transformations.sliding_t(_wrap, size, step))
|
Groups elements in fixed size blocks by passing a sliding window over them.
The last window has at least one element but may have less than size elements
:param size: size of sliding window
:param step: step size between windows
:return: sequence of sliding windows
|
def _get_tmaster_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_tmaster_path(topologyName)
if isWatching:
LOG.info("Adding data watch for path: " + path)
# pylint: disable=unused-variable, unused-argument
@self.client.DataWatch(path)
def watch_tmaster(data, stats):
""" invoke callback to watch tmaster """
if data:
tmaster = TMasterLocation()
tmaster.ParseFromString(data)
callback(tmaster)
else:
callback(None)
# Returning False will result in no future watches
# being triggered. If isWatching is True, then
# the future watches will be triggered.
return isWatching
|
Helper function to get pplan with
a callback. The future watch is placed
only if isWatching is True.
|
def has_name_version(self, name: str, version: str) -> bool:
"""Check if there exists a network with the name/version combination in the database."""
return self.session.query(exists().where(and_(Network.name == name, Network.version == version))).scalar()
|
Check if there exists a network with the name/version combination in the database.
|
def getElementsCustomFilter(self, filterFunc, root='root'):
'''
getElementsCustomFilter - Scan elements using a provided function
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - TagCollection of all matching elements
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and filterFunc(root) is True:
elements.append(root)
getElementsCustomFilter = self.getElementsCustomFilter
for child in root.children:
if filterFunc(child) is True:
elements.append(child)
elements += getElementsCustomFilter(filterFunc, child)
return TagCollection(elements)
|
getElementsCustomFilter - Scan elements using a provided function
@param filterFunc <function>(node) - A function that takes an AdvancedTag as an argument, and returns True if some arbitrary criteria is met
@return - TagCollection of all matching elements
|
def parse_PRIK(chunk, encryption_key):
"""Parse PRIK chunk which contains private RSA key"""
decrypted = decode_aes256('cbc',
encryption_key[:16],
decode_hex(chunk.payload),
encryption_key)
hex_key = re.match(br'^LastPassPrivateKey<(?P<hex_key>.*)>LastPassPrivateKey$', decrypted).group('hex_key')
rsa_key = RSA.importKey(decode_hex(hex_key))
rsa_key.dmp1 = rsa_key.d % (rsa_key.p - 1)
rsa_key.dmq1 = rsa_key.d % (rsa_key.q - 1)
rsa_key.iqmp = number.inverse(rsa_key.q, rsa_key.p)
return rsa_key
|
Parse PRIK chunk which contains private RSA key
|
def use_plenary_resource_view(self):
"""Pass through to provider ResourceLookupSession.use_plenary_resource_view"""
self._object_views['resource'] = PLENARY
# self._get_provider_session('resource_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_resource_view()
except AttributeError:
pass
|
Pass through to provider ResourceLookupSession.use_plenary_resource_view
|
def radio_field(*args, **kwargs):
'''
Get a password
'''
radio_field = wtforms.RadioField(*args, **kwargs)
radio_field.input_type = 'radio_field'
return radio_field
|
Get a password
|
def key_source(self):
"""
:return: the relation whose primary key values are passed, sequentially, to the
``make`` method when populate() is called.
The default value is the join of the parent relations.
Users may override to change the granularity or the scope of populate() calls.
"""
def parent_gen(self):
if self.target.full_table_name not in self.connection.dependencies:
self.connection.dependencies.load()
for parent_name, fk_props in self.target.parents(primary=True).items():
if not parent_name.isdigit(): # simple foreign key
yield FreeTable(self.connection, parent_name).proj()
else:
grandparent = list(self.connection.dependencies.in_edges(parent_name))[0][0]
yield FreeTable(self.connection, grandparent).proj(**{
attr: ref for attr, ref in fk_props['attr_map'].items() if ref != attr})
if self._key_source is None:
parents = parent_gen(self)
try:
self._key_source = next(parents)
except StopIteration:
raise DataJointError('A relation must have primary dependencies for auto-populate to work') from None
for q in parents:
self._key_source *= q
return self._key_source
|
:return: the relation whose primary key values are passed, sequentially, to the
``make`` method when populate() is called.
The default value is the join of the parent relations.
Users may override to change the granularity or the scope of populate() calls.
|
async def get_entries(self, **kwargs):
"""
GET /api/entries.{_format}
Retrieve all entries. It could be filtered by many options.
:param kwargs: can contain one of the following filters
archive: '0' or '1', default '0' filter by archived status.
starred: '0' or '1', default '0' filter by starred status.
sort: 'created' or 'updated', default 'created'
order: 'asc' or 'desc', default 'desc'
page: int default 1 what page you want
perPage: int default 30 result per page
tags: list of tags url encoded.
since: int default 0 from what timestamp you want
Will returns entries that matches ALL tags
:return data related to the ext
"""
# default values
params = dict({'access_token': self.token,
'sort': 'created',
'order': 'desc',
'page': 1,
'perPage': 30,
'tags': '',
'since': 0})
if 'archive' in kwargs and int(kwargs['archive']) in (0, 1):
params['archive'] = int(kwargs['archive'])
if 'starred' in kwargs and int(kwargs['starred']) in (0, 1):
params['starred'] = int(kwargs['starred'])
if 'order' in kwargs and kwargs['order'] in ('asc', 'desc'):
params['order'] = kwargs['order']
if 'page' in kwargs and isinstance(kwargs['page'], int):
params['page'] = kwargs['page']
if 'perPage' in kwargs and isinstance(kwargs['perPage'], int):
params['perPage'] = kwargs['perPage']
if 'tags' in kwargs and isinstance(kwargs['tags'], list):
params['tags'] = ', '.join(kwargs['tags'])
if 'since' in kwargs and isinstance(kwargs['since'], int):
params['since'] = kwargs['since']
path = '/api/entries.{ext}'.format(ext=self.format)
return await self.query(path, "get", **params)
|
GET /api/entries.{_format}
Retrieve all entries. It could be filtered by many options.
:param kwargs: can contain one of the following filters
archive: '0' or '1', default '0' filter by archived status.
starred: '0' or '1', default '0' filter by starred status.
sort: 'created' or 'updated', default 'created'
order: 'asc' or 'desc', default 'desc'
page: int default 1 what page you want
perPage: int default 30 result per page
tags: list of tags url encoded.
since: int default 0 from what timestamp you want
Will returns entries that matches ALL tags
:return data related to the ext
|
def tick(self):
"""Returns a message to be displayed if game is over, else None"""
for npc in self.npcs:
self.move_entity(npc, *npc.towards(self.player))
for entity1, entity2 in itertools.combinations(self.entities, 2):
if (entity1.x, entity1.y) == (entity2.x, entity2.y):
if self.player in (entity1, entity2):
return 'you lost on turn %d' % self.turn
entity1.die()
entity2.die()
if all(npc.speed == 0 for npc in self.npcs):
return 'you won on turn %d' % self.turn
self.turn += 1
if self.turn % 20 == 0:
self.player.speed = max(1, self.player.speed - 1)
self.player.display = on_blue(green(bold(unicode_str(self.player.speed))))
|
Returns a message to be displayed if game is over, else None
|
def tabulate(self, format='html', syntax=''):
'''
a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table
'''
from tabulate import tabulate as _tabulate
# define headers
headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description']
rows = []
default_values = False
additional_conditions = False
field_description = False
# construct rows
for key, value in self.keyMap.items():
key_segments = _segment_path(key)
if key_segments:
row = []
# add field column
field_name = ''
if len(key_segments) > 1:
for i in range(1,len(key_segments)):
field_name += ' '
if key_segments[-1] == '0':
field_name += '<i>item</i>'
else:
field_name += key_segments[-1]
row.append(field_name)
# add datatype column
value_datatype = value['value_datatype']
if 'integer_data' in value.keys():
if value['integer_data'] and syntax != 'javascript':
value_datatype = 'integer'
elif value['value_datatype'] == 'map':
if syntax == 'javascript':
value_datatype = 'object'
elif value['value_datatype'] == 'list':
if syntax == 'javascript':
value_datatype = 'array'
# retrieve datatype of item in list
item_key = key + '[0]'
item_datatype = self.keyMap[item_key]['value_datatype']
if syntax == 'javascript':
if item_datatype == 'list':
item_datatype = 'array'
elif item_datatype == 'map':
item_datatype = 'object'
elif 'integer_data' in self.keyMap[item_key].keys():
if self.keyMap[item_key]['integer_data']:
item_datatype = 'integer'
value_datatype += ' of %ss' % item_datatype
row.append(value_datatype)
# add required column
if value['required_field']:
row.append('yes')
else:
row.append('')
# add default column
if 'default_value' in value.keys():
default_values = True
if isinstance(value['default_value'], str):
row.append('"%s"' % value['default_value'])
elif isinstance(value['default_value'], bool):
row.append(str(value['default_value']).lower())
else:
row.append(str(value['default_value']))
else:
row.append('')
# define recursive example constructor
def determine_example(k, v):
example_value = ''
if 'example_values' in v.keys():
for i in v['example_values']:
if example_value:
example_value += ', '
if isinstance(i, str):
example_value += '"%s"' % i
else:
example_value += value
elif 'declared_value' in v.keys():
if isinstance(v['declared_value'], str):
example_value = '"%s"' % v['declared_value']
elif isinstance(v['declared_value'], bool):
example_value = str(v['declared_value']).lower()
else:
example_value = v['declared_value']
else:
if v['value_datatype'] == 'map':
example_value = '{...}'
elif v['value_datatype'] == 'list':
example_value = '[...]'
elif v['value_datatype'] == 'null':
example_value = 'null'
return example_value
# add examples column
row.append(determine_example(key, value))
# add additional conditions
conditions = ''
description = ''
for k, v in value.items():
extra_integer = False
if k == 'integer_data' and syntax == 'javascript':
extra_integer = True
if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer:
add_extra = False
if k == 'extra_fields':
if v:
add_extra = True
if k in ('field_description', 'field_title'):
field_description = True
if k == 'field_description':
description = v
elif not description:
description = v
elif k != 'extra_fields' or add_extra:
additional_conditions = True
if conditions:
conditions += '<br>'
condition_value = v
if isinstance(v, str):
condition_value = '"%s"' % v
elif isinstance(v, bool):
condition_value = str(v).lower()
conditions += '%s: %s' % (k, condition_value)
row.append(conditions)
row.append(description)
# add row to rows
rows.append(row)
# add rows for top field
top_dict = self.keyMap['.']
if top_dict['extra_fields']:
rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', ''])
if 'max_bytes' in top_dict.keys():
rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', ''])
# eliminate unused columns
if not field_description:
headers.pop()
if not additional_conditions:
headers.pop()
if not default_values:
headers.pop(3)
for row in rows:
if not field_description:
row.pop()
if not additional_conditions:
row.pop()
if not default_values:
row.pop(3)
# construct table html
table_html = _tabulate(rows, headers, tablefmt='html')
# add links to urls in text
# markdown_url = re.compile('\[(.*?)\]\((.*)\)')
table_html = _add_links(table_html)
return table_html
|
a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table
|
def sodium_unpad(s, blocksize):
"""
Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes
"""
ensure(isinstance(s, bytes),
raising=exc.TypeError)
ensure(isinstance(blocksize, integer_types),
raising=exc.TypeError)
s_len = len(s)
u_len = ffi.new("size_t []", 1)
rc = lib.sodium_unpad(u_len, s, s_len, blocksize)
if rc != 0:
raise exc.CryptoError("Unpadding failure")
return s[:u_len[0]]
|
Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes
|
def exists(self, key, **opts):
"""Return if a key exists in the cache."""
key, store = self._expand_opts(key, opts)
data = store.get(key)
# Note that we do not actually delete the thing here as the max_age
# just for this call may have triggered a False.
if not data or self._has_expired(data, opts):
return False
return True
|
Return if a key exists in the cache.
|
def loadstore(self, addrs, length=1):
"""
Load and store address in order given.
:param addrs: iteratable of address tuples: [(loads, stores), ...]
:param length: will load and store all bytes between addr and
addr+length (for each address)
"""
if not isinstance(addrs, Iterable):
raise ValueError("addr must be iteratable")
self.first_level.loadstore(addrs, length=length)
|
Load and store address in order given.
:param addrs: iteratable of address tuples: [(loads, stores), ...]
:param length: will load and store all bytes between addr and
addr+length (for each address)
|
def tag_image(self, image, target_image, force=False):
"""
tag provided image with specified image_name, registry and tag
:param image: str or ImageName, image to tag
:param target_image: ImageName, new name for the image
:param force: bool, force tag the image?
:return: str, image (reg.om/img:v1)
"""
logger.info("tagging image '%s' as '%s'", image, target_image)
logger.debug("image = '%s', target_image_name = '%s'", image, target_image)
if not isinstance(image, ImageName):
image = ImageName.parse(image)
if image != target_image:
response = self.d.tag(
image.to_str(),
target_image.to_str(tag=False),
tag=target_image.tag,
force=force) # returns True/False
if not response:
logger.error("failed to tag image")
raise RuntimeError("Failed to tag image '%s': target_image = '%s'" %
image.to_str(), target_image)
else:
logger.debug('image already tagged correctly, nothing to do')
return target_image.to_str()
|
tag provided image with specified image_name, registry and tag
:param image: str or ImageName, image to tag
:param target_image: ImageName, new name for the image
:param force: bool, force tag the image?
:return: str, image (reg.om/img:v1)
|
def repeat(self, count=1):
'''Repeat the entire audio count times.
Parameters
----------
count : int, default=1
The number of times to repeat the audio.
'''
if not isinstance(count, int) or count < 1:
raise ValueError("count must be a postive integer.")
effect_args = ['repeat', '{}'.format(count)]
self.effects.extend(effect_args)
self.effects_log.append('repeat')
|
Repeat the entire audio count times.
Parameters
----------
count : int, default=1
The number of times to repeat the audio.
|
def _load_tcmps_lib():
"""
Load global singleton of tcmps lib handler.
This function is used not used at the top level, so
that the shared library is loaded lazily only when needed.
"""
global _g_TCMPS_LIB
if _g_TCMPS_LIB is None:
# This library requires macOS 10.14 or above
if _mac_ver() < (10, 14):
return None
# The symbols defined in libtcmps are now exposed directly by
# libunity_shared. Eventually the object_detector and
# activity_classifier toolkits will use the same Python/C++ bridge as
# the other toolkits, and this usage of ctypes will go away.
file_dir = _os.path.dirname(__file__)
lib_path = _os.path.abspath(_os.path.join(file_dir, _os.pardir, 'libunity_shared.dylib'))
try:
_g_TCMPS_LIB = _ctypes.CDLL(lib_path, _ctypes.RTLD_LOCAL)
except OSError:
pass
return _g_TCMPS_LIB
|
Load global singleton of tcmps lib handler.
This function is used not used at the top level, so
that the shared library is loaded lazily only when needed.
|
def determine_apache_port(public_port, singlenode_mode=False):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
|
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
|
def start_txn(self, txn_name=None):
'''
Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction
'''
# if no name provided, create one
if not txn_name:
txn_name = uuid.uuid4().hex
# request new transaction
txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None)
# if 201, transaction was created
if txn_response.status_code == 201:
txn_uri = txn_response.headers['Location']
logger.debug("spawning transaction: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = txn_response.headers['Expires'])
# append to self
self.txns[txn_name] = txn
# return
return txn
|
Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction
|
def _rebuffer(self):
"""
(very internal) refill the repeat buffer
"""
results = []
exceptions = []
for i in xrange(self.stride):
try:
results.append(self.iterable.next())
exceptions.append(False)
except Exception, excp:
results.append(excp)
exceptions.append(True)
self._repeat_buffer = repeat((results, exceptions), self.n)
|
(very internal) refill the repeat buffer
|
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group size of each group.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
|
Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group size of each group.
|
def finding_path(cls, organization, source, finding):
"""Return a fully-qualified finding string."""
return google.api_core.path_template.expand(
"organizations/{organization}/sources/{source}/findings/{finding}",
organization=organization,
source=source,
finding=finding,
)
|
Return a fully-qualified finding string.
|
def do_heavy_work(self, block):
"""
Note: Expects Compressor Block like objects
"""
destinations = self.destinations()
''' FIXME currently we return block whether it was correctly processed or not because MailSenders are chained
and not doing that would mean other wouldn't be able to try.'''
if not set(destinations).issubset(block.destinations):
self.log.debug("Block not for any of the associated destinations: %s", destinations)
else:
try:
self.do_send(block)
# mark the block as sent by this sender
block.send_destinations.extend(destinations)
verif_data = self.verification_data()
if verif_data is not None:
for destination in destinations:
block.destinations_verif_data[destination] = verif_data
except SendingError:
self.log.exception("Failed to send block (%s) to destination (%s)", block, destinations)
return block
|
Note: Expects Compressor Block like objects
|
def WithLimitedCallFrequency(min_time_between_calls):
"""Function call rate-limiting decorator.
This decorator ensures that the wrapped function will be called at most
once in min_time_between_calls time for the same set of arguments. For all
excessive calls a previous cached return value will be returned.
Suppose we use the decorator like this:
@cache.WithLimitedCallFrequency(rdfvalue.Duration("30s"))
def Foo(id):
...
If Foo(42) is called and then Foo(42) is called again within 30 seconds, then
the second call will simply return the cached return value of the first.
If Foo(42) is called and then Foo(43) is called within 30 seconds, the
wrapped function will be properly called in both cases, since these Foo calls
have different arguments sets.
If Foo(42) is called and takes a long time to finish, and another
Foo(42) call is done in another thread, then the latter call will wait for
the first one to finish and then return the cached result value. I.e. the
wrapped function will be called just once, thus keeping the guarantee of
at most 1 run in min_time_between_calls.
NOTE 1: this function becomes a trivial pass-through and does no caching if
module-level WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH variable is set to
True. This is used in testing.
NOTE 2: all decorated functions' arguments have to be hashable.
Args:
min_time_between_calls: An rdfvalue.Duration specifying the minimal time to
pass between 2 consecutive function calls with same arguments.
Returns:
A Python function decorator.
"""
def Decorated(f):
"""Actual decorator implementation."""
lock = threading.RLock()
prev_times = {}
prev_results = {}
result_locks = {}
@functools.wraps(f)
def Fn(*args, **kwargs):
"""Wrapper around the decorated function."""
if WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH:
# This effectively turns off the caching.
min_time = rdfvalue.Duration(0)
else:
min_time = min_time_between_calls
key = (args, tuple(sorted(kwargs.items())))
now = rdfvalue.RDFDatetime.Now()
with lock:
for k, prev_time in list(prev_times.items()):
if now - prev_time >= min_time:
prev_times.pop(k)
prev_results.pop(k, None)
result_locks.pop(k, None)
try:
# We eliminated all the old entries, so if the key is present
# in the cache, it means that the data is fresh enough to be used.
prev_time = prev_times[key]
return prev_results[key]
except KeyError:
prev_time = None
should_call = True
if not should_call:
return prev_results[key]
try:
result_lock = result_locks[key]
except KeyError:
result_lock = threading.RLock()
result_locks[key] = result_lock
with result_lock:
t = prev_times.get(key)
if t == prev_time:
result = f(*args, **kwargs)
with lock:
prev_times[key] = rdfvalue.RDFDatetime.Now()
prev_results[key] = result
return result
else:
return prev_results[key]
return Fn
return Decorated
|
Function call rate-limiting decorator.
This decorator ensures that the wrapped function will be called at most
once in min_time_between_calls time for the same set of arguments. For all
excessive calls a previous cached return value will be returned.
Suppose we use the decorator like this:
@cache.WithLimitedCallFrequency(rdfvalue.Duration("30s"))
def Foo(id):
...
If Foo(42) is called and then Foo(42) is called again within 30 seconds, then
the second call will simply return the cached return value of the first.
If Foo(42) is called and then Foo(43) is called within 30 seconds, the
wrapped function will be properly called in both cases, since these Foo calls
have different arguments sets.
If Foo(42) is called and takes a long time to finish, and another
Foo(42) call is done in another thread, then the latter call will wait for
the first one to finish and then return the cached result value. I.e. the
wrapped function will be called just once, thus keeping the guarantee of
at most 1 run in min_time_between_calls.
NOTE 1: this function becomes a trivial pass-through and does no caching if
module-level WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH variable is set to
True. This is used in testing.
NOTE 2: all decorated functions' arguments have to be hashable.
Args:
min_time_between_calls: An rdfvalue.Duration specifying the minimal time to
pass between 2 consecutive function calls with same arguments.
Returns:
A Python function decorator.
|
def refs(self, multihash, **kwargs):
"""Returns a list of hashes of objects referenced by the given hash.
.. code-block:: python
>>> c.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
[{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''},
…
{'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}]
Parameters
----------
multihash : str
Path to the object(s) to list refs from
Returns
-------
list
"""
args = (multihash,)
return self._client.request('/refs', args, decoder='json', **kwargs)
|
Returns a list of hashes of objects referenced by the given hash.
.. code-block:: python
>>> c.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
[{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''},
…
{'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}]
Parameters
----------
multihash : str
Path to the object(s) to list refs from
Returns
-------
list
|
def SystemCoin():
"""
Register AntCoin
Returns:
RegisterTransaction:
"""
amount = Fixed8.FromDecimal(sum(Blockchain.GENERATION_AMOUNT) * Blockchain.DECREMENT_INTERVAL)
owner = ECDSA.secp256r1().Curve.Infinity
precision = 8
admin = Crypto.ToScriptHash(PUSHF)
return RegisterTransaction([], [], AssetType.UtilityToken,
"[{\"lang\":\"zh-CN\",\"name\":\"小蚁币\"},{\"lang\":\"en\",\"name\":\"AntCoin\"}]",
amount, precision, owner, admin)
|
Register AntCoin
Returns:
RegisterTransaction:
|
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
# Install system requirements
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip")
run("mkdir -p /home/%s/logs" % env.user)
# Install Python requirements
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
# Set up virtualenv
run("mkdir -p %s" % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home,
env.user))
run("echo 'source /usr/local/bin/virtualenvwrapper.sh' >> "
"/home/%s/.bashrc" % env.user)
print(green("Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.", bold=True))
|
Installs the base system and Python requirements for the entire server.
|
def writeint2dnorm(filename, Intensity, Error=None):
"""Save the intensity and error matrices to a file
Inputs
------
filename: string
the name of the file
Intensity: np.ndarray
the intensity matrix
Error: np.ndarray, optional
the error matrix (can be ``None``, if no error matrix is to be saved)
Output
------
None
"""
whattosave = {'Intensity': Intensity}
if Error is not None:
whattosave['Error'] = Error
if filename.upper().endswith('.NPZ'):
np.savez(filename, **whattosave)
elif filename.upper().endswith('.MAT'):
scipy.io.savemat(filename, whattosave)
else: # text file
np.savetxt(filename, Intensity)
if Error is not None:
name, ext = os.path.splitext(filename)
np.savetxt(name + '_error' + ext, Error)
|
Save the intensity and error matrices to a file
Inputs
------
filename: string
the name of the file
Intensity: np.ndarray
the intensity matrix
Error: np.ndarray, optional
the error matrix (can be ``None``, if no error matrix is to be saved)
Output
------
None
|
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
|
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
|
def render_args(arglst, argdct):
'''Render arguments for command-line invocation.
arglst: A list of Argument objects (specifies order)
argdct: A mapping of argument names to values (specifies rendered values)
'''
out = ''
for arg in arglst:
if arg.name in argdct:
rendered = arg.render(argdct[arg.name])
if rendered:
out += ' '
out += rendered
return out
|
Render arguments for command-line invocation.
arglst: A list of Argument objects (specifies order)
argdct: A mapping of argument names to values (specifies rendered values)
|
def filtered(self, allowed):
"""
Return a new Options object that is filtered by the specified
list of keys. Mutating self.kwargs to filter is unsafe due to
the option expansion that occurs on initialization.
"""
kws = {k:v for k,v in self.kwargs.items() if k in allowed}
return self.__class__(key=self.key,
allowed_keywords=self.allowed_keywords,
merge_keywords=self.merge_keywords, **kws)
|
Return a new Options object that is filtered by the specified
list of keys. Mutating self.kwargs to filter is unsafe due to
the option expansion that occurs on initialization.
|
def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True)
|
Provide boxed values for a column.
|
def prev_close(self):
"""
[float] 昨日收盘价
"""
try:
return self._data['prev_close']
except (ValueError, KeyError):
pass
if self._prev_close is None:
trading_dt = Environment.get_instance().trading_dt
data_proxy = Environment.get_instance().data_proxy
self._prev_close = data_proxy.get_prev_close(self._instrument.order_book_id, trading_dt)
return self._prev_close
|
[float] 昨日收盘价
|
def gpp_soco(V,E):
"""gpp -- model for the graph partitioning problem in soco
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved.
"""
model = Model("gpp model -- soco")
x,s,z = {},{},{}
for i in V:
x[i] = model.addVar(vtype="B", name="x(%s)"%i)
for (i,j) in E:
s[i,j] = model.addVar(vtype="C", name="s(%s,%s)"%(i,j))
z[i,j] = model.addVar(vtype="C", name="z(%s,%s)"%(i,j))
model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition")
for (i,j) in E:
model.addCons((x[i] + x[j] -1)*(x[i] + x[j] -1) <= s[i,j], "S(%s,%s)"%(i,j))
model.addCons((x[j] - x[i])*(x[j] - x[i]) <= z[i,j], "Z(%s,%s)"%(i,j))
model.addCons(s[i,j] + z[i,j] == 1, "P(%s,%s)"%(i,j))
# # triangle inequalities (seem to make model slower)
# for i in V:
# for j in V:
# for k in V:
# if (i,j) in E and (j,k) in E and (i,k) in E:
# print("\t***",(i,j,k)
# model.addCons(z[i,j] + z[j,k] + z[i,k] <= 2, "T1(%s,%s,%s)"%(i,j,k))
# model.addCons(z[i,j] + s[j,k] + s[i,k] <= 2, "T2(%s,%s,%s)"%(i,j,k))
# model.addCons(s[i,j] + s[j,k] + z[i,k] <= 2, "T3(%s,%s,%s)"%(i,j,k))
# model.addCons(s[i,j] + z[j,k] + s[i,k] <= 2, "T4(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize")
model.data = x,s,z
return model
|
gpp -- model for the graph partitioning problem in soco
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved.
|
def manage_all(self, *args, **kwargs):
"""
Runs manage() across all unique site default databases.
"""
for site, site_data in self.iter_unique_databases(site='all'):
if self.verbose:
print('-'*80, file=sys.stderr)
print('site:', site, file=sys.stderr)
if self.env.available_sites_by_host:
hostname = self.current_hostname
sites_on_host = self.env.available_sites_by_host.get(hostname, [])
if sites_on_host and site not in sites_on_host:
self.vprint('skipping site:', site, sites_on_host, file=sys.stderr)
continue
self.manage(*args, **kwargs)
|
Runs manage() across all unique site default databases.
|
def get_config_items(self):
"""
Return current configuration as a :class:`tuple` with
option-value pairs.
::
(('option1', value1), ('option2', value2))
"""
return (
('settings', self.settings),
('context_class', self.context_class),
('interfaces', self.interfaces),
('logging', self.logging),
('name', self.name),
('init_handler', self.init_handler),
('sigusr1_handler', self.sigusr1_handler),
('sigusr2_handler', self.sigusr2_handler),
)
|
Return current configuration as a :class:`tuple` with
option-value pairs.
::
(('option1', value1), ('option2', value2))
|
def _set_authenticate(self, v, load=False):
"""
Setter method for authenticate, mapped from YANG variable /ntp/authenticate (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_authenticate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authenticate() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="authenticate", rest_name="authenticate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable NTP authentication. Default = Disabled', u'cli-full-command': None, u'callpoint': u'ntp_auth_cp', u'sort-priority': u'32', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """authenticate must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="authenticate", rest_name="authenticate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable NTP authentication. Default = Disabled', u'cli-full-command': None, u'callpoint': u'ntp_auth_cp', u'sort-priority': u'32', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='empty', is_config=True)""",
})
self.__authenticate = t
if hasattr(self, '_set'):
self._set()
|
Setter method for authenticate, mapped from YANG variable /ntp/authenticate (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_authenticate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authenticate() directly.
|
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
|
The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server.
|
def grab_hidden_properties(self):
# type: () -> dict
"""
A one-shot access to hidden properties (the field is then destroyed)
:return: A copy of the hidden properties dictionary on the first call
:raise AttributeError: On any call after the first one
"""
# Copy properties
result = self.__hidden_properties.copy()
# Destroy the field
self.__hidden_properties.clear()
del self.__hidden_properties
return result
|
A one-shot access to hidden properties (the field is then destroyed)
:return: A copy of the hidden properties dictionary on the first call
:raise AttributeError: On any call after the first one
|
def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True):
"""
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
"""
hashed = hashlib.new(hash_type)
with open(path, "rb") as infile:
buf = infile.read(block_size)
while len(buf) > 0:
hashed.update(buf)
buf = infile.read(block_size)
return hashed.hexdigest() if hex_digest else hashed.digest()
|
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
|
def text(self, value):
"""Set the text value.
Args:
value (str): Text value.
"""
self._text = value
self.timestamps.edited = datetime.datetime.utcnow()
self.touch(True)
|
Set the text value.
Args:
value (str): Text value.
|
def __setUpTrakers(self):
''' set symbols '''
for symbol in self.symbols:
self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio)
|
set symbols
|
def _POTUpdateBuilder(env, **kw):
""" Creates `POTUpdate` builder object """
import SCons.Action
from SCons.Tool.GettextCommon import _POTargetFactory
kw['action'] = SCons.Action.Action(_update_pot_file, None)
kw['suffix'] = '$POTSUFFIX'
kw['target_factory'] = _POTargetFactory(env, alias='$POTUPDATE_ALIAS').File
kw['emitter'] = _pot_update_emitter
return _POTBuilder(**kw)
|
Creates `POTUpdate` builder object
|
def api_exception(http_code):
"""Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function.
"""
def wrapper(*args):
code = args[0]
ErrorMapping.mapping[http_code] = code
return code
return wrapper
|
Convenience decorator to associate HTTP status codes with :class:`.ApiError` subclasses.
:param http_code: (int) HTTP status code.
:return: wrapper function.
|
def draw_state(ax, p, text='', l=0.5, alignment='left', label_displacement=1.0,
fontsize=25, atoms=None, atoms_h=0.125, atoms_size=5, **kwds):
r"""Draw a quantum state for energy level diagrams."""
ax.plot([p[0]-l/2.0, p[0]+l/2.0], [p[1], p[1]],
color='black', **kwds)
if text != '':
if alignment == 'left':
ax.text(p[0] - l/2.0 - label_displacement, p[1], text,
horizontalalignment='right', verticalalignment='center',
color='black', fontsize=fontsize)
elif alignment == 'right':
ax.text(p[0] + l/2.0 + label_displacement, p[1], text,
horizontalalignment='left', color='black',
fontsize=fontsize)
# We draw atoms.
if atoms is not None:
atoms_x = np.linspace(p[0]-l*0.5, p[0]+l*0.5, atoms)
atoms_y = [p[1] + atoms_h for i in range(atoms)]
# print l, atoms_x
ax.plot(atoms_x, atoms_y, "ko", ms=atoms_size)
|
r"""Draw a quantum state for energy level diagrams.
|
def create_directory(self):
"""
Creates a directory under the selected directory (if the selected item
is a file, the parent directory is used).
"""
src = self.get_current_path()
name, status = QtWidgets.QInputDialog.getText(
self.tree_view, _('Create directory'), _('Name:'),
QtWidgets.QLineEdit.Normal, '')
if status:
fatal_names = ['.', '..']
for i in fatal_names:
if i == name:
QtWidgets.QMessageBox.critical(
self.tree_view, _("Error"), _("Wrong directory name"))
return
if os.path.isfile(src):
src = os.path.dirname(src)
dir_name = os.path.join(src, name)
try:
os.makedirs(dir_name, exist_ok=True)
except OSError as e:
QtWidgets.QMessageBox.warning(
self.tree_view, _('Failed to create directory'),
_('Failed to create directory: "%s".\n\n%s') % (dir_name, str(e)))
|
Creates a directory under the selected directory (if the selected item
is a file, the parent directory is used).
|
def run(self, args):
"""
Remove permissions from the user with user_full_name or email on the remote project with project_name.
:param args Namespace arguments parsed from the command line
"""
email = args.email # email of person to remove permissions from (None if username specified)
username = args.username # username of person to remove permissions from (None if email is specified)
project = self.fetch_project(args, must_exist=True, include_children=False)
user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username)
self.remote_store.revoke_user_project_permission(project, user)
print(u'Removed permissions from user {} for project {}.'.format(user.full_name, project.name))
|
Remove permissions from the user with user_full_name or email on the remote project with project_name.
:param args Namespace arguments parsed from the command line
|
def allocate_resource_id(self):
"""id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids.
"""
self.resource_id_lock.acquire()
try:
i = self.last_resource_id
while i in self.resource_ids:
i = i + 1
if i > self.info.resource_id_mask:
i = 0
if i == self.last_resource_id:
raise error.ResourceIDError('out of resource ids')
self.resource_ids[i] = None
self.last_resource_id = i
return self.info.resource_id_base | i
finally:
self.resource_id_lock.release()
|
id = d.allocate_resource_id()
Allocate a new X resource id number ID.
Raises ResourceIDError if there are no free resource ids.
|
def batch_taxids(list_of_names):
"""
Opposite of batch_taxonomy():
Convert list of Latin names to taxids
"""
for name in list_of_names:
handle = Entrez.esearch(db='Taxonomy', term=name, retmode="xml")
records = Entrez.read(handle)
yield records["IdList"][0]
|
Opposite of batch_taxonomy():
Convert list of Latin names to taxids
|
def _set_vnetwork(self, v, load=False):
"""
Setter method for vnetwork, mapped from YANG variable /show/vnetwork (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vnetwork is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vnetwork() directly.
YANG Description: Shows virtual infrastructure information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vnetwork.vnetwork, is_container='container', presence=False, yang_name="vnetwork", rest_name="vnetwork", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Shows virtual infrastructure information', u'action': u'pgs'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vnetwork must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vnetwork.vnetwork, is_container='container', presence=False, yang_name="vnetwork", rest_name="vnetwork", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Shows virtual infrastructure information', u'action': u'pgs'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='container', is_config=True)""",
})
self.__vnetwork = t
if hasattr(self, '_set'):
self._set()
|
Setter method for vnetwork, mapped from YANG variable /show/vnetwork (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vnetwork is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vnetwork() directly.
YANG Description: Shows virtual infrastructure information
|
def pipe_worker(pipename, filename, object_type, query, format_string, unique=False):
"""
Starts the loop to provide the data from jackal.
"""
print_notification("[{}] Starting pipe".format(pipename))
object_type = object_type()
try:
while True:
uniq = set()
# Remove the previous file if it exists
if os.path.exists(filename):
os.remove(filename)
# Create the named pipe
os.mkfifo(filename)
# This function will block until a process opens it
with open(filename, 'w') as pipe:
print_success("[{}] Providing data".format(pipename))
# Search the database
objects = object_type.search(**query)
for obj in objects:
data = fmt.format(format_string, **obj.to_dict())
if unique:
if not data in uniq:
uniq.add(data)
pipe.write(data + '\n')
else:
pipe.write(data + '\n')
os.unlink(filename)
except KeyboardInterrupt:
print_notification("[{}] Shutting down named pipe".format(pipename))
except Exception as e:
print_error("[{}] Error: {}, stopping named pipe".format(e, pipename))
finally:
os.remove(filename)
|
Starts the loop to provide the data from jackal.
|
def value(self):
"""Get the value to filter on
:return: the value to filter on
"""
if self.filter_.get('field') is not None:
try:
result = getattr(self.model, self.filter_['field'])
except AttributeError:
raise InvalidFilters("{} has no attribute {}".format(self.model.__name__, self.filter_['field']))
else:
return result
else:
if 'val' not in self.filter_:
raise InvalidFilters("Can't find value or field in a filter")
return self.filter_['val']
|
Get the value to filter on
:return: the value to filter on
|
def unpublish(self):
"""
Un-publish the current object.
"""
if self.is_draft and self.publishing_linked:
publishing_signals.publishing_pre_unpublish.send(
sender=type(self), instance=self)
# Unlink draft and published copies then delete published.
# NOTE: This indirect dance is necessary to avoid triggering
# unwanted MPTT tree structure updates via `delete`.
type(self.publishing_linked).objects \
.filter(pk=self.publishing_linked.pk) \
.delete() # Instead of self.publishing_linked.delete()
# NOTE: We update and save the object *after* deleting the
# published version, in case the `save()` method does some
# validation that breaks when unlinked published objects exist.
self.publishing_linked = None
self.publishing_published_at = None
# Save the draft to remove its relationship with the published copy
publishing_signals.publishing_unpublish_save_draft.send(
sender=type(self), instance=self)
publishing_signals.publishing_post_unpublish.send(
sender=type(self), instance=self)
|
Un-publish the current object.
|
def _set_bfd(self, v, load=False):
"""
Setter method for bfd, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bfd.bfd, is_container='container', presence=True, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD parameters', u'callpoint': u'MplsBfd', u'cli-add-mode': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-router-mpls-bfd'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bfd must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bfd.bfd, is_container='container', presence=True, yang_name="bfd", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD parameters', u'callpoint': u'MplsBfd', u'cli-add-mode': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-router-mpls-bfd'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__bfd = t
if hasattr(self, '_set'):
self._set()
|
Setter method for bfd, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bfd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd() directly.
|
def to_iso(dt):
'''
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
'''
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt)
|
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
|
def print_traceback(with_colors=True):
"""
prints current stack
"""
#traceback.print_tb()
import traceback
stack = traceback.extract_stack()
stack_lines = traceback.format_list(stack)
tbtext = ''.join(stack_lines)
if with_colors:
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import TerminalFormatter
lexer = get_lexer_by_name('pytb', stripall=True)
formatter = TerminalFormatter(bg='dark')
formatted_text = highlight(tbtext, lexer, formatter)
print(formatted_text)
except Exception:
print(tbtext)
else:
print(tbtext)
|
prints current stack
|
def _draw_footer(self):
"""
Draw the key binds help bar at the bottom of the screen
"""
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(1, n_cols, self._row, 0)
window.erase()
window.bkgd(str(' '), self.term.attr('HelpBar'))
text = self.FOOTER.strip()
self.term.add_line(window, text, 0, 0)
self._row += 1
|
Draw the key binds help bar at the bottom of the screen
|
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
|
Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'warning_id') and self.warning_id is not None:
_dict['warning_id'] = self.warning_id
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
return _dict
|
Return a json dictionary representing this model.
|
def _get_date_facet_counts(self, timespan, date_field, start_date=None, end_date=None):
'''
Returns Range Facet counts based on
'''
if 'DAY' not in timespan:
raise ValueError("At this time, only DAY date range increment is supported. Aborting..... ")
#Need to do this a bit better later. Don't like the string and date concatenations.
if not start_date:
start_date = self._get_edge_date(date_field, 'asc')
start_date = datetime.strptime(start_date,'%Y-%m-%dT%H:%M:%S.%fZ').date().isoformat()+'T00:00:00.000Z'
else:
start_date = start_date+'T00:00:00.000Z'
if not end_date:
end_date = self._get_edge_date(date_field, 'desc')
end_date = datetime.strptime(end_date,'%Y-%m-%dT%H:%M:%S.%fZ').date()
end_date += timedelta(days=1)
end_date = end_date.isoformat()+'T00:00:00.000Z'
else:
end_date = end_date+'T00:00:00.000Z'
self.log.info("Processing Items from {} to {}".format(start_date, end_date))
#Get facet counts for source and destination collections
source_facet = self._source.query(self._source_coll,
self._get_date_range_query(timespan=timespan, start_date=start_date, end_date=end_date)
).get_facets_ranges()[date_field]
dest_facet = self._dest.query(
self._dest_coll, self._get_date_range_query(
timespan=timespan, start_date=start_date, end_date=end_date
)).get_facets_ranges()[date_field]
return source_facet, dest_facet
|
Returns Range Facet counts based on
|
def binning(keys, start, end, count, axes=None):
"""Perform binning over the given axes of the keys
Parameters
----------
keys : indexable or tuple of indexable
Examples
--------
binning(np.random.rand(100), 0, 1, 10)
"""
if isinstance(keys, tuple):
n_keys = len(keys)
else:
n_keys = 1
bins = np.linspace(start, end, count+1, endpoint=True)
idx = np.searchsorted(bins, keys)
if axes is None:
axes = [-1]
|
Perform binning over the given axes of the keys
Parameters
----------
keys : indexable or tuple of indexable
Examples
--------
binning(np.random.rand(100), 0, 1, 10)
|
def smart_insert(col, data, minimal_size=5):
"""An optimized Insert strategy.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
"""
if isinstance(data, list):
# 首先进行尝试bulk insert
try:
col.insert(data)
# 失败了
except pymongo.errors.DuplicateKeyError:
# 分析数据量
n = len(data)
# 如果数据条数多于一定数量
if n >= minimal_size ** 2:
# 则进行分包
n_chunk = math.floor(math.sqrt(n))
for chunk in grouper_list(data, n_chunk):
smart_insert(col, chunk, minimal_size)
# 否则则一条条地逐条插入
else:
for doc in data:
try:
col.insert(doc)
except pymongo.errors.DuplicateKeyError:
pass
else: # pragma: no cover
try:
col.insert(data)
except pymongo.errors.DuplicateKeyError:
pass
|
An optimized Insert strategy.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
|
def play(self, sox_effects=()):
""" Play a speech. """
# build the segments
preloader_threads = []
if self.text != "-":
segments = list(self)
# start preloader thread(s)
preloader_threads = [PreloaderThread(name="PreloaderThread-%u" % (i)) for i in range(PRELOADER_THREAD_COUNT)]
for preloader_thread in preloader_threads:
preloader_thread.segments = segments
preloader_thread.start()
else:
segments = iter(self)
# play segments
for segment in segments:
segment.play(sox_effects)
if self.text != "-":
# destroy preloader threads
for preloader_thread in preloader_threads:
preloader_thread.join()
|
Play a speech.
|
def surfplot(self, z, titletext):
"""
Plot if you want to - for troubleshooting - 1 figure
"""
if self.latlon:
plt.imshow(z, extent=(0, self.dx*z.shape[0], self.dy*z.shape[1], 0)) #,interpolation='nearest'
plt.xlabel('longitude [deg E]', fontsize=12, fontweight='bold')
plt.ylabel('latitude [deg N]', fontsize=12, fontweight='bold')
else:
plt.imshow(z, extent=(0, self.dx/1000.*z.shape[0], self.dy/1000.*z.shape[1], 0)) #,interpolation='nearest'
plt.xlabel('x [km]', fontsize=12, fontweight='bold')
plt.ylabel('y [km]', fontsize=12, fontweight='bold')
plt.colorbar()
plt.title(titletext,fontsize=16)
|
Plot if you want to - for troubleshooting - 1 figure
|
def update(self, quote_id, product_data, store_view=None):
"""
Allows you to update one or several products in the shopping cart
(quote).
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, see def add()
:param store_view: Store view ID or code
:return: boolean, True if the product is updated .
"""
return bool(
self.call('cart_product.update',
[quote_id, product_data, store_view])
)
|
Allows you to update one or several products in the shopping cart
(quote).
:param quote_id: Shopping cart ID (quote ID)
:param product_data, list of dicts of product details, see def add()
:param store_view: Store view ID or code
:return: boolean, True if the product is updated .
|
def mv_files(src, dst):
"""
Move all files from one directory to another
:param str src: Source directory
:param str dst: Destination directory
:return none:
"""
# list the files in the src directory
files = os.listdir(src)
# loop for each file found
for file in files:
# move the file from the src to the dst
shutil.move(os.path.join(src, file), os.path.join(dst, file))
return
|
Move all files from one directory to another
:param str src: Source directory
:param str dst: Destination directory
:return none:
|
def verify_rank_integrity(self, tax_id, rank, parent_id, children):
"""Confirm that for each node the parent ranks and children ranks are
coherent
"""
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError('rank "{}" is undefined'.format(rank))
parent_rank = self.rank(parent_id)
# undefined ranks can be placed anywhere in a lineage
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = ('New node "{}", rank "{}" has same or '
'higher rank than parent node "{}", rank "{}"')
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg = 'Child node {} has same or lower rank as new node {}'
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True
|
Confirm that for each node the parent ranks and children ranks are
coherent
|
def _reorder_fields(self, ordering):
"""
Test that the 'captcha' field is really present.
This could be broken by a bad FLUENT_COMMENTS_FIELD_ORDER configuration.
"""
if 'captcha' not in ordering:
raise ImproperlyConfigured(
"When using 'FLUENT_COMMENTS_FIELD_ORDER', "
"make sure the 'captcha' field included too to use '{}' form. ".format(
self.__class__.__name__
)
)
super(CaptchaFormMixin, self)._reorder_fields(ordering)
# Avoid making captcha required for previews.
if self.is_preview:
self.fields.pop('captcha')
|
Test that the 'captcha' field is really present.
This could be broken by a bad FLUENT_COMMENTS_FIELD_ORDER configuration.
|
def get_answer_begin_end(data):
'''
Get answer's index of begin and end.
'''
begin = []
end = []
for qa_pair in data:
tokens = qa_pair['passage_tokens']
char_begin = qa_pair['answer_begin']
char_end = qa_pair['answer_end']
word_begin = get_word_index(tokens, char_begin)
word_end = get_word_index(tokens, char_end)
begin.append(word_begin)
end.append(word_end)
return np.asarray(begin), np.asarray(end)
|
Get answer's index of begin and end.
|
def has_activity(graph: BELGraph, node: BaseEntity) -> bool:
"""Return true if over any of the node's edges, it has a molecular activity."""
return _node_has_modifier(graph, node, ACTIVITY)
|
Return true if over any of the node's edges, it has a molecular activity.
|
def multi_split(text, regexes):
"""
Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first separates the digits from their word
>>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+']))
'one|234|five| |678'
Splitting on words first keeps the word with digits intact.
>>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+']))
'one234five| |678'
"""
def make_regex(s):
return re.compile(s) if isinstance(s, basestring) else s
regexes = [make_regex(r) for r in regexes]
# Run the list of pieces through the regex split, splitting it into more
# pieces. Once a piece has been matched, add it to finished_pieces and
# don't split it again. The pieces should always join back together to form
# the original text.
piece_list = [text]
finished_pieces = set()
def apply_re(regex, piece_list):
for piece in piece_list:
if piece in finished_pieces:
yield piece
continue
for s in full_split(piece, regex):
if regex.match(s):
finished_pieces.add(s)
if s:
yield s
for regex in regexes:
piece_list = list(apply_re(regex, piece_list))
assert ''.join(piece_list) == text
return piece_list
|
Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first separates the digits from their word
>>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+']))
'one|234|five| |678'
Splitting on words first keeps the word with digits intact.
>>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+']))
'one234five| |678'
|
def lex(string):
"this is only used by tests"
safe_lexer = LEXER.clone() # reentrant? I can't tell, I hate implicit globals. do a threading test
safe_lexer.input(string)
a = []
while 1:
t = safe_lexer.token()
if t: a.append(t)
else: break
return a
|
this is only used by tests
|
def _apply_criteria(df, criteria, **kwargs):
"""Apply criteria individually to every model/scenario instance"""
idxs = []
for var, check in criteria.items():
_df = df[df['variable'] == var]
for group in _df.groupby(META_IDX):
grp_idxs = _check_rows(group[-1], check, **kwargs)
idxs.append(grp_idxs)
df = df.loc[itertools.chain(*idxs)]
return df
|
Apply criteria individually to every model/scenario instance
|
def get_result(self):
"""
raises *NoResult* exception if no result has been set
"""
self._process()
if not self.has_result:
raise NoResult
return self.res_queue.popleft()
|
raises *NoResult* exception if no result has been set
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entity') and self.entity is not None:
_dict['entity'] = self.entity
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
return _dict
|
Return a json dictionary representing this model.
|
def filterMapAttrs(records=getIndex(), **tags):
"""matches available maps if their attributes match as specified"""
if len(tags) == 0: return records # otherwise if unspecified, all given records match
ret = []
for record in records: # attempt to match attributes
if matchRecordAttrs(record, tags):
ret.append(record)
return ret
|
matches available maps if their attributes match as specified
|
def ObtenerTagXml(self, *tags):
"Busca en el Xml analizado y devuelve el tag solicitado"
# convierto el xml a un objeto
try:
if self.xml:
xml = self.xml
# por cada tag, lo busco segun su nombre o posición
for tag in tags:
xml = xml(tag) # atajo a getitem y getattr
# vuelvo a convertir a string el objeto xml encontrado
return str(xml)
except Exception, e:
self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0]
|
Busca en el Xml analizado y devuelve el tag solicitado
|
def search(self, filters=None, start_index=0, limit=100):
"""
Search for a list of notes that can be invested in.
(similar to searching for notes in the Browse section on the site)
Parameters
----------
filters : lendingclub.filters.*, optional
The filter to use to search for notes. If no filter is passed, a wildcard search
will be performed.
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
Returns
-------
dict
A dictionary object with the list of matching loans under the `loans` key.
"""
assert filters is None or isinstance(filters, Filter), 'filter is not a lendingclub.filters.Filter'
# Set filters
if filters:
filter_string = filters.search_string()
else:
filter_string = 'default'
payload = {
'method': 'search',
'filter': filter_string,
'startindex': start_index,
'pagesize': limit
}
# Make request
response = self.session.post('/browse/browseNotesAj.action', data=payload)
json_response = response.json()
if self.session.json_success(json_response):
results = json_response['searchresult']
# Normalize results by converting loanGUID -> loan_id
for loan in results['loans']:
loan['loan_id'] = int(loan['loanGUID'])
# Validate that fractions do indeed match the filters
if filters is not None:
filters.validate(results['loans'])
return results
return False
|
Search for a list of notes that can be invested in.
(similar to searching for notes in the Browse section on the site)
Parameters
----------
filters : lendingclub.filters.*, optional
The filter to use to search for notes. If no filter is passed, a wildcard search
will be performed.
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
Returns
-------
dict
A dictionary object with the list of matching loans under the `loans` key.
|
def fetch_by_name(self, name):
"""
Gets service for given ``name`` from mongodb storage.
"""
service = self.collection.find_one({'name': name})
if not service:
raise ServiceNotFound
return Service(service)
|
Gets service for given ``name`` from mongodb storage.
|
def mod_c(self):
"""Complex modulus"""
r12, r22 = self.z1*self.z1, self.z2*self.z2
r = np.sqrt(r12 + r22)
return r
|
Complex modulus
|
def executable(self):
"""Connection against which statements will be executed."""
if not hasattr(self.local, 'conn'):
self.local.conn = self.engine.connect()
return self.local.conn
|
Connection against which statements will be executed.
|
def propagate_name_down(self, col_name, df_name, verbose=False):
"""
Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example.
"""
if df_name not in self.tables:
table = self.add_magic_table(df_name)[1]
if is_null(table):
return
df = self.tables[df_name].df
if col_name in df.columns:
if all(df[col_name].apply(not_null)):
#print('{} already in {}'.format(col_name, df_name))
return df
# otherwise, do necessary merges to get col_name into df
# get names for each level
grandparent_table_name = col_name.split('_')[0] + "s"
grandparent_name = grandparent_table_name[:-1]
ind = self.ancestry.index(grandparent_table_name) - 1
#
parent_table_name, parent_name = self.get_table_name(ind)
child_table_name, child_name = self.get_table_name(ind - 1)
bottom_table_name, bottom_name = self.get_table_name(ind - 2)
# merge in bottom level
if child_name not in df.columns:
# add child table if missing
if bottom_table_name not in self.tables:
result = self.add_magic_table(bottom_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data for data propagation".format(bottom_table_name))
return df
# add child_name to df
add_df = self.tables[bottom_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=bottom_name)
if child_name not in df.columns:
if verbose:
print("-W- Cannot complete propagation, {} table is missing {} column".format(df_name, child_name))
else:
add_df = stringify_col(add_df, child_name)
df = stringify_col(df, bottom_name)
df = df.merge(add_df[[child_name]],
left_on=[bottom_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in one level above
if parent_name not in df.columns:
# add parent_table if missing
if child_table_name not in self.tables:
result = self.add_magic_table(child_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(child_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add parent_name to df
add_df = self.tables[child_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=child_name)
if parent_name not in add_df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(child_table_name, parent_name))
elif parent_name not in df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, parent_name)
df = stringify_col(df, child_name)
df = df.merge(add_df[[parent_name]],
left_on=[child_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in two levels above
if grandparent_name not in df.columns:
# add grandparent table if it is missing
if parent_table_name not in self.tables:
result = self.add_magic_table(parent_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(parent_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add grandparent name to df
add_df = self.tables[parent_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=parent_name)
if grandparent_name not in add_df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(parent_table_name, grandparent_name))
elif parent_name not in df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, grandparent_name)
df = stringify_col(df, parent_name)
df = df.merge(add_df[[grandparent_name]],
left_on=[parent_name],
right_index=True, how="left")
df = stringify_col(df, grandparent_name)
# update the Contribution
self.tables[df_name].df = df
return df
|
Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example.
|
def connect(self):
"""
Connect to the REST API, authenticating with a JWT for the current user.
"""
if JwtBuilder is None:
raise NotConnectedToOpenEdX("This package must be installed in an OpenEdX environment.")
now = int(time())
jwt = JwtBuilder.create_jwt_for_user(self.user)
self.client = EdxRestApiClient(
self.API_BASE_URL, append_slash=self.APPEND_SLASH, jwt=jwt,
)
self.expires_at = now + self.expires_in
|
Connect to the REST API, authenticating with a JWT for the current user.
|
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
join_output = '\n'.join(output)
if isTimeout:
return 'TIMEOUT'
if returncode == 2:
return 'ERROR - Pre-run'
if join_output is None:
return 'ERROR - no output'
elif 'Safe.'in join_output:
return result.RESULT_TRUE_PROP
elif 'Error state' in join_output:
return result.RESULT_FALSE_REACH
else:
return result.RESULT_UNKNOWN
|
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
|
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = ['%s:%s'%(n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign]
l.sort()
return '\n'.join(l)
|
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
|
def _interchange_level_from_filename(fullname):
# type: (bytes) -> int
'''
A function to determine the ISO interchange level from the filename.
In theory, there are 3 levels, but in practice we only deal with level 1
and level 3.
Parameters:
name - The name to use to determine the interchange level.
Returns:
The interchange level determined from this filename.
'''
(name, extension, version) = _split_iso9660_filename(fullname)
interchange_level = 1
if version != b'' and (int(version) < 1 or int(version) > 32767):
interchange_level = 3
if b';' in name or b';' in extension:
interchange_level = 3
if len(name) > 8 or len(extension) > 3:
interchange_level = 3
try:
_check_d1_characters(name)
_check_d1_characters(extension)
except pycdlibexception.PyCdlibInvalidInput:
interchange_level = 3
return interchange_level
|
A function to determine the ISO interchange level from the filename.
In theory, there are 3 levels, but in practice we only deal with level 1
and level 3.
Parameters:
name - The name to use to determine the interchange level.
Returns:
The interchange level determined from this filename.
|
def select_whole_line(self, line=None, apply_selection=True):
"""
Selects an entire line.
:param line: Line to select. If None, the current line will be selected
:param apply_selection: True to apply selection on the text editor
widget, False to just return the text cursor without setting it
on the editor.
:return: QTextCursor
"""
if line is None:
line = self.current_line_nbr()
return self.select_lines(line, line, apply_selection=apply_selection)
|
Selects an entire line.
:param line: Line to select. If None, the current line will be selected
:param apply_selection: True to apply selection on the text editor
widget, False to just return the text cursor without setting it
on the editor.
:return: QTextCursor
|
def get_full_name(src):
"""Gets full class or function name."""
if hasattr(src, "_full_name_"):
return src._full_name_
if hasattr(src, "is_decorator"):
# Our own decorator or binder
if hasattr(src, "decorator"):
# Our own binder
_full_name_ = str(src.decorator)
# It's a short-living object, so we don't cache result
else:
# Our own decorator
_full_name_ = str(src)
try:
src._full_name_ = _full_name_
except AttributeError:
pass
except TypeError:
pass
elif hasattr(src, "im_class"):
# Bound method
cls = src.im_class
_full_name_ = get_full_name(cls) + "." + src.__name__
# It's a short-living object, so we don't cache result
elif hasattr(src, "__module__") and hasattr(src, "__name__"):
# Func or class
_full_name_ = (
("<unknown module>" if src.__module__ is None else src.__module__)
+ "."
+ src.__name__
)
try:
src._full_name_ = _full_name_
except AttributeError:
pass
except TypeError:
pass
else:
# Something else
_full_name_ = str(get_original_fn(src))
return _full_name_
|
Gets full class or function name.
|
def _init_plot_handles(self):
"""
Find all requested plotting handles and cache them along
with the IDs of the models the callbacks will be attached to.
"""
plots = [self.plot]
if self.plot.subplots:
plots += list(self.plot.subplots.values())
handles = {}
for plot in plots:
for k, v in plot.handles.items():
handles[k] = v
self.plot_handles = handles
requested = {}
for h in self.models+self.extra_models:
if h in self.plot_handles:
requested[h] = handles[h]
elif h in self.extra_models:
print("Warning %s could not find the %s model. "
"The corresponding stream may not work."
% (type(self).__name__, h))
self.handle_ids.update(self._get_stream_handle_ids(requested))
return requested
|
Find all requested plotting handles and cache them along
with the IDs of the models the callbacks will be attached to.
|
def get_server_premaster_secret(self, password_verifier, server_private, client_public, common_secret):
"""S = (A * v^u) ^ b % N
:param int password_verifier:
:param int server_private:
:param int client_public:
:param int common_secret:
:rtype: int
"""
return pow((client_public * pow(password_verifier, common_secret, self._prime)), server_private, self._prime)
|
S = (A * v^u) ^ b % N
:param int password_verifier:
:param int server_private:
:param int client_public:
:param int common_secret:
:rtype: int
|
def symlink_bundles(self, app, bundle_dir):
"""For each bundle in the given app, symlinks relevant matched paths.
Validates that at least one path was matched by a bundle.
"""
for bundle_counter, bundle in enumerate(app.bundles):
count = 0
for path, relpath in bundle.filemap.items():
bundle_path = os.path.join(bundle_dir, relpath)
count += 1
if os.path.exists(bundle_path):
continue
if os.path.isfile(path):
safe_mkdir(os.path.dirname(bundle_path))
os.symlink(path, bundle_path)
elif os.path.isdir(path):
safe_mkdir(bundle_path)
if count == 0:
raise TargetDefinitionException(app.target,
'Bundle index {} of "bundles" field '
'does not match any files.'.format(bundle_counter))
|
For each bundle in the given app, symlinks relevant matched paths.
Validates that at least one path was matched by a bundle.
|
def validate_ltsv_label(label):
"""
Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param str label: Label to validate.
:raises pathvalidate.NullNameError: If the ``label`` is empty.
:raises pathvalidate.InvalidCharError:
If invalid character(s) found in the ``label`` for a LTSV format label.
"""
validate_null_string(label, error_msg="label is empty")
match_list = __RE_INVALID_LTSV_LABEL.findall(preprocess(label))
if match_list:
raise InvalidCharError(
"invalid character found for a LTSV format label: {}".format(match_list)
)
|
Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param str label: Label to validate.
:raises pathvalidate.NullNameError: If the ``label`` is empty.
:raises pathvalidate.InvalidCharError:
If invalid character(s) found in the ``label`` for a LTSV format label.
|
def set_popup_menu(self, menu):
'''set a popup menu on the frame'''
self.popup_menu = menu
self.in_queue.put(MPImagePopupMenu(menu))
|
set a popup menu on the frame
|
def do_scan_range(self, line):
"""Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range"""
self.application.master.ScanRange(opendnp3.GroupVariationID(1, 2), 0, 3, opendnp3.TaskConfig().Default())
|
Do an ad-hoc scan of a range of points (group 1, variation 2, indexes 0-3). Command syntax is: scan_range
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.