code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def hset(self, key, value):
"""Create key/value pair in Redis.
Args:
key (string): The key to create in Redis.
value (any): The value to store in Redis.
Returns:
(string): The response from Redis.
"""
return self.r.hset(self.hash, key, value)
|
Create key/value pair in Redis.
Args:
key (string): The key to create in Redis.
value (any): The value to store in Redis.
Returns:
(string): The response from Redis.
|
def initDatabase(self, db_file):
"""
" initialize the database for search
" param: dbFile
"""
try:
self.__f = io.open(db_file, "rb")
except IOError, e:
print "[Error]: ", e
sys.exit()
|
" initialize the database for search
" param: dbFile
|
def _extract_info(archive, info):
"""
Extracts the contents of an archive info object
;param archive:
An archive from _open_archive()
:param info:
An info object from _list_archive_members()
:return:
None, or a byte string of the file contents
"""
if isinstance(archive, zipfile.ZipFile):
fn = info.filename
is_dir = fn.endswith('/') or fn.endswith('\\')
out = archive.read(info)
if is_dir and out == b'':
return None
return out
info_file = archive.extractfile(info)
if info_file:
return info_file.read()
return None
|
Extracts the contents of an archive info object
;param archive:
An archive from _open_archive()
:param info:
An info object from _list_archive_members()
:return:
None, or a byte string of the file contents
|
def create_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
'''
Create a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.create_cache_security_group mycachesecgrp Description='My Cache Security Group'
'''
return _create_resource(name, name_param='CacheSecurityGroupName', desc='cache security group',
res_type='cache_security_group',
region=region, key=key, keyid=keyid, profile=profile, **args)
|
Create a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.create_cache_security_group mycachesecgrp Description='My Cache Security Group'
|
def import_checks(path):
"""
Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example).
"""
dir = internal.check_dir / path
file = internal.load_config(dir)["checks"]
mod = internal.import_file(dir.name, (dir / file).resolve())
sys.modules[dir.name] = mod
return mod
|
Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example).
|
def client_mechanisms(self):
"""List of available :class:`ClientMechanism` objects."""
return [mech for mech in self.mechs.values()
if isinstance(mech, ClientMechanism)]
|
List of available :class:`ClientMechanism` objects.
|
def major_axis_endpoints(self):
"""Return the endpoints of the major axis."""
i = np.argmax(self.axlens) # find the major axis
v = self.paxes[:, i] # vector from center to major axis endpoint
return self.ctr - v, self.ctr + v
|
Return the endpoints of the major axis.
|
def _classify_no_operation(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify no-operation gadgets.
"""
# TODO: Flags should be taken into account
matches = []
# Check that registers didn't change their value.
regs_changed = any(regs_init[r] != regs_fini[r] for r in regs_init)
# Check that flags didn't change their value.
flags_changed = False
# Check that memory didn't change.
mem_changed = mem_fini.get_write_count() != 0
if not regs_changed and not flags_changed and not mem_changed:
matches.append({
"op": "nop",
})
return matches
|
Classify no-operation gadgets.
|
def polarity_scores(self, text):
"""
Return a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative
valence.
"""
# convert emojis to their textual descriptions
text_token_list = text.split()
text_no_emoji_lst = []
for token in text_token_list:
if token in self.emojis:
# get the textual description
description = self.emojis[token]
text_no_emoji_lst.append(description)
else:
text_no_emoji_lst.append(token)
text = " ".join(x for x in text_no_emoji_lst)
sentitext = SentiText(text)
sentiments = []
words_and_emoticons = sentitext.words_and_emoticons
for item in words_and_emoticons:
valence = 0
i = words_and_emoticons.index(item)
# check for vader_lexicon words that may be used as modifiers or negations
if item.lower() in BOOSTER_DICT:
sentiments.append(valence)
continue
if (i < len(words_and_emoticons) - 1 and item.lower() == "kind" and
words_and_emoticons[i + 1].lower() == "of"):
sentiments.append(valence)
continue
sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)
sentiments = self._but_check(words_and_emoticons, sentiments)
valence_dict = self.score_valence(sentiments, text)
return valence_dict
|
Return a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative
valence.
|
def _run_submission(self, metadata):
"""Runs submission inside Docker container.
Args:
metadata: dictionary with submission metadata
Returns:
True if status code of Docker command was success (i.e. zero),
False otherwise.
"""
if self._use_gpu:
docker_binary = 'nvidia-docker'
container_name = metadata['container_gpu']
else:
docker_binary = 'docker'
container_name = metadata['container']
if metadata['type'] == 'defense':
cmd = [docker_binary, 'run',
'--network=none',
'-m=24g',
'-v', '{0}:/input_images:ro'.format(self._sample_input_dir),
'-v', '{0}:/output_data'.format(self._sample_output_dir),
'-v', '{0}:/code'.format(self._extracted_submission_dir),
'-w', '/code',
container_name,
'./' + metadata['entry_point'],
'/input_images',
'/output_data/result.csv']
else:
epsilon = np.random.choice(ALLOWED_EPS)
cmd = [docker_binary, 'run',
'--network=none',
'-m=24g',
'-v', '{0}:/input_images:ro'.format(self._sample_input_dir),
'-v', '{0}:/output_images'.format(self._sample_output_dir),
'-v', '{0}:/code'.format(self._extracted_submission_dir),
'-w', '/code',
container_name,
'./' + metadata['entry_point'],
'/input_images',
'/output_images',
str(epsilon)]
logging.info('Command to run submission: %s', ' '.join(cmd))
return shell_call(cmd)
|
Runs submission inside Docker container.
Args:
metadata: dictionary with submission metadata
Returns:
True if status code of Docker command was success (i.e. zero),
False otherwise.
|
def compressed_bytes2ibytes(compressed, size):
"""
CONVERT AN ARRAY OF BYTES TO A BYTE-BLOCK GENERATOR
USEFUL IN THE CASE WHEN WE WANT TO LIMIT HOW MUCH WE FEED ANOTHER
GENERATOR (LIKE A DECOMPRESSOR)
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
for i in range(0, mo_math.ceiling(len(compressed), size), size):
try:
block = compressed[i: i + size]
yield decompressor.decompress(block)
except Exception as e:
Log.error("Not expected", e)
|
CONVERT AN ARRAY OF BYTES TO A BYTE-BLOCK GENERATOR
USEFUL IN THE CASE WHEN WE WANT TO LIMIT HOW MUCH WE FEED ANOTHER
GENERATOR (LIKE A DECOMPRESSOR)
|
def load_map_projection(filename,
center=None, center_right=None, radius=None, method='orthographic',
registration='native', chirality=None, sphere_radius=None,
pre_affine=None, post_affine=None, meta_data=None):
'''
load_map_projection(filename) yields the map projection indicated by the given file name. Map
projections define the parameters of a projection to the 2D cortical surface via a
registartion name and projection parameters.
This function is primarily a wrapper around the MapProjection.load() function; for information
about options, see MapProjection.load.
'''
return MapProjection.load(filename,
center=center, center_right=center_right, radius=radius,
method=method, registration=registration, chirality=chirality,
sphere_radius=sphere_radius, pre_affine=pre_affine,
post_affine=post_affine)
|
load_map_projection(filename) yields the map projection indicated by the given file name. Map
projections define the parameters of a projection to the 2D cortical surface via a
registartion name and projection parameters.
This function is primarily a wrapper around the MapProjection.load() function; for information
about options, see MapProjection.load.
|
def merge_map(a, b):
"""Recursively merge elements of argument b into argument a.
Primarly used for merging two dictionaries together, where dict b takes
precedence over dict a. If 2 lists are provided, they are concatenated.
"""
if isinstance(a, list) and isinstance(b, list):
return a + b
if not isinstance(a, dict) or not isinstance(b, dict):
return b
for key in b:
a[key] = merge_map(a[key], b[key]) if key in a else b[key]
return a
|
Recursively merge elements of argument b into argument a.
Primarly used for merging two dictionaries together, where dict b takes
precedence over dict a. If 2 lists are provided, they are concatenated.
|
def scheduling_time_index(J,p,r,w):
"""
scheduling_time_index: model for the one machine total weighted tardiness problem
Model for the one machine total weighted tardiness problem
using the time index formulation
Parameters:
- J: set of jobs
- p[j]: processing time of job j
- r[j]: earliest start time of job j
- w[j]: weighted of job j; the objective is the sum of the weighted completion time
Returns a model, ready to be solved.
"""
model = Model("scheduling: time index")
T = max(r.values()) + sum(p.values())
X = {} # X[j,t]=1 if job j starts processing at time t, 0 otherwise
for j in J:
for t in range(r[j], T-p[j]+2):
X[j,t] = model.addVar(vtype="B", name="x(%s,%s)"%(j,t))
for j in J:
model.addCons(quicksum(X[j,t] for t in range(1,T+1) if (j,t) in X) == 1, "JobExecution(%s)"%(j))
for t in range(1,T+1):
ind = [(j,t2) for j in J for t2 in range(t-p[j]+1,t+1) if (j,t2) in X]
if ind != []:
model.addCons(quicksum(X[j,t2] for (j,t2) in ind) <= 1, "MachineUB(%s)"%t)
model.setObjective(quicksum((w[j] * (t - 1 + p[j])) * X[j,t] for (j,t) in X), "minimize")
model.data = X
return model
|
scheduling_time_index: model for the one machine total weighted tardiness problem
Model for the one machine total weighted tardiness problem
using the time index formulation
Parameters:
- J: set of jobs
- p[j]: processing time of job j
- r[j]: earliest start time of job j
- w[j]: weighted of job j; the objective is the sum of the weighted completion time
Returns a model, ready to be solved.
|
def result(self):
"""
The result from realising the future
If the result is not available, block until done.
:return: result of the future
:raises: any exception encountered during realising the future
"""
if self._result is None:
self.await_result()
chunks, exception = self._result
if exception is None:
return chunks
raise exception
|
The result from realising the future
If the result is not available, block until done.
:return: result of the future
:raises: any exception encountered during realising the future
|
def compile(code, silent=True, ignore_errors=False, optimize=True):
"""Compiles subroutine-forms into a complete working code.
A program such as:
: sub1 <sub1 code ...> ;
: sub2 <sub2 code ...> ;
sub1 foo sub2 bar
is compiled into:
<sub1 address> call
foo
<sub2 address> call
exit
<sub1 code ...> return
<sub2 code ...> return
Optimizations are first done on subroutine bodies, then on the main loop
and finally, symbols are resolved (i.e., placeholders for subroutine
addresses are replaced with actual addresses).
Args:
silent: If set to False, will print optimization messages.
ignore_errors: Only applies to the optimization engine, if set to False
it will not raise any exceptions. The actual compilatio will still
raise errors.
optimize: Flag to control whether to optimize code.
Raises:
CompilationError - Raised if invalid code is detected.
Returns:
An array of code that can be run by a Machine. Typically, you want to
pass this to a Machine without doing optimizations.
Usage:
source = parse("<source code>")
code = compile(source)
machine = Machine(code, optimize=False)
machine.run()
"""
assert(isinstance(code, list))
output = []
subroutine = {}
builtins = Machine([]).instructions
# Gather up subroutines
try:
it = code.__iter__()
while True:
word = next(it)
if word == ":":
name = next(it)
if name in builtins:
raise CompileError("Cannot shadow internal word definition '%s'." % name)
if name in [":", ";"]:
raise CompileError("Invalid word name '%s'." % name)
subroutine[name] = []
while True:
op = next(it)
if op == ";":
subroutine[name].append(instructions.lookup(instructions.return_))
break
else:
subroutine[name].append(op)
else:
output.append(word)
except StopIteration:
pass
# Expand all subroutine words to ["<name>", "call"]
for name, code in subroutine.items():
# For subroutines
xcode = []
for op in code:
xcode.append(op)
if op in subroutine:
xcode.append(instructions.lookup(instructions.call))
subroutine[name] = xcode
# Compile main code (code outside of subroutines)
xcode = []
for op in output:
xcode.append(op)
if op in subroutine:
xcode.append(instructions.lookup(instructions.call))
# Because main code comes before subroutines, we need to explicitly add an
# exit instruction
output = xcode
if len(subroutine) > 0:
output += [instructions.lookup(instructions.exit)]
# Optimize main code
if optimize:
output = optimizer.optimized(output, silent=silent, ignore_errors=False)
# Add subroutines to output, track their locations
location = {}
for name, code in subroutine.items():
location[name] = len(output)
if optimize:
output += optimizer.optimized(code, silent=silent, ignore_errors=False)
else:
output += code
# Resolve all subroutine references
for i, op in enumerate(output):
if op in location:
output[i] = location[op]
output = native_types(output)
if not ignore_errors:
check(output)
return output
|
Compiles subroutine-forms into a complete working code.
A program such as:
: sub1 <sub1 code ...> ;
: sub2 <sub2 code ...> ;
sub1 foo sub2 bar
is compiled into:
<sub1 address> call
foo
<sub2 address> call
exit
<sub1 code ...> return
<sub2 code ...> return
Optimizations are first done on subroutine bodies, then on the main loop
and finally, symbols are resolved (i.e., placeholders for subroutine
addresses are replaced with actual addresses).
Args:
silent: If set to False, will print optimization messages.
ignore_errors: Only applies to the optimization engine, if set to False
it will not raise any exceptions. The actual compilatio will still
raise errors.
optimize: Flag to control whether to optimize code.
Raises:
CompilationError - Raised if invalid code is detected.
Returns:
An array of code that can be run by a Machine. Typically, you want to
pass this to a Machine without doing optimizations.
Usage:
source = parse("<source code>")
code = compile(source)
machine = Machine(code, optimize=False)
machine.run()
|
def get_object(self, ObjectClass, id):
""" Retrieve object of type ``ObjectClass`` by ``id``.
| Returns object on success.
| Returns None otherwise.
"""
try:
object = ObjectClass.objects.get(id=id)
except (ObjectClass.DoesNotExist, ObjectClass.MultipleObjectsReturned):
object = None
return object
|
Retrieve object of type ``ObjectClass`` by ``id``.
| Returns object on success.
| Returns None otherwise.
|
def db_log(self, transition, from_state, instance, *args, **kwargs):
"""Logs the transition into the database."""
if self.log_model:
model_class = self._get_log_model_class()
extras = {}
for db_field, transition_arg, default in model_class.EXTRA_LOG_ATTRIBUTES:
extras[db_field] = kwargs.get(transition_arg, default)
return model_class.log_transition(
modified_object=instance,
transition=transition.name,
from_state=from_state.name,
to_state=transition.target.name,
**extras)
|
Logs the transition into the database.
|
def build_article_from_xml(article_xml_filename, detail="brief",
build_parts=None, remove_tags=None):
"""
Parse JATS XML with elifetools parser, and populate an
eLifePOA article object
Basic data crossref needs: article_id, doi, title, contributors with names set
detail="brief" is normally enough,
detail="full" will populate all the contributor affiliations that are linked by xref tags
"""
build_part = lambda part: build_part_check(part, build_parts)
error_count = 0
soup = parser.parse_document(article_xml_filename)
# Get DOI
doi = parser.doi(soup)
# Create the article object
article = ea.Article(doi, title=None)
# article version from the filename if possible
utils.set_attr_if_value(article, 'version',
utils.version_from_xml_filename(article_xml_filename))
# journal title
if build_part('basic'):
article.journal_title = parser.journal_title(soup)
# issn
if build_part('basic'):
article.journal_issn = parser.journal_issn(soup, "electronic")
if article.journal_issn is None:
article.journal_issn = parser.journal_issn(soup)
# Related articles
if build_part('related_articles'):
article.related_articles = build_related_articles(parser.related_article(soup))
# Get publisher_id pii
if build_part('basic'):
article.pii = parser.publisher_id(soup)
# set object manuscript value
if build_part('basic'):
manuscript = parser.publisher_id(soup)
if not manuscript and doi:
# try to get it from the DOI
manuscript = doi.split('.')[-1]
article.manuscript = manuscript
# Set the articleType
if build_part('basic'):
article_type = parser.article_type(soup)
if article_type:
article.article_type = article_type
# title
if build_part('basic'):
article.title = parser.full_title(soup)
#print article.title
# publisher_name
if build_part('basic'):
article.publisher_name = parser.publisher(soup)
# abstract
if build_part('abstract'):
article.abstract = clean_abstract(parser.full_abstract(soup), remove_tags)
# digest
if build_part('abstract'):
article.digest = clean_abstract(parser.full_digest(soup), remove_tags)
# elocation-id
if build_part('basic'):
article.elocation_id = parser.elocation_id(soup)
# issue
if build_part('basic'):
article.issue = parser.issue(soup)
# self-uri
if build_part('basic'):
article.self_uri_list = build_self_uri_list(parser.self_uri(soup))
# contributors
if build_part('contributors'):
# get the competing interests if available
competing_interests = parser.competing_interests(soup, None)
all_contributors = parser.contributors(soup, detail)
author_contributors = [con for con in all_contributors
if con.get('type') in ['author', 'on-behalf-of']]
contrib_type = "author"
contributors = build_contributors(author_contributors, contrib_type, competing_interests)
contrib_type = "author non-byline"
authors = parser.authors_non_byline(soup, detail)
contributors_non_byline = build_contributors(authors, contrib_type, competing_interests)
article.contributors = contributors + contributors_non_byline
# license href
if build_part('license'):
license_object = ea.License()
license_object.href = parser.license_url(soup)
license_object.copyright_statement = parser.copyright_statement(soup)
article.license = license_object
# article_category
if build_part('categories'):
article.article_categories = parser.category(soup)
# keywords
if build_part('keywords'):
article.author_keywords = parser.keywords(soup)
# research organisms
if build_part('research_organisms'):
article.research_organisms = parser.research_organism(soup)
# funding awards
if build_part('funding'):
article.funding_awards = build_funding(parser.full_award_groups(soup))
# datasets
if build_part('datasets'):
datasets_json = parser.datasets_json(soup)
article.datasets = build_datasets(datasets_json)
article.data_availability = build_data_availability(datasets_json)
# references or citations
if build_part('references'):
article.ref_list = build_ref_list(parser.refs(soup))
# components with component DOI
if build_part('components'):
article.component_list = build_components(parser.components(soup))
# History dates
if build_part('history'):
date_types = ["received", "accepted"]
for date_type in date_types:
history_date = parser.history_date(soup, date_type)
if history_date:
date_instance = ea.ArticleDate(date_type, history_date)
article.add_date(date_instance)
# Pub date
if build_part('pub_dates'):
build_pub_dates(article, parser.pub_dates(soup))
# Set the volume if present
if build_part('volume'):
volume = parser.volume(soup)
if volume:
article.volume = volume
if build_part('is_poa'):
article.is_poa = parser.is_poa(soup)
return article, error_count
|
Parse JATS XML with elifetools parser, and populate an
eLifePOA article object
Basic data crossref needs: article_id, doi, title, contributors with names set
detail="brief" is normally enough,
detail="full" will populate all the contributor affiliations that are linked by xref tags
|
def wait_for_participant_newbalance(
raiden: 'RaidenService',
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
partner_address: Address,
target_address: Address,
target_balance: TokenAmount,
retry_timeout: float,
) -> None:
"""Wait until a given channels balance exceeds the target balance.
Note:
This does not time out, use gevent.Timeout.
"""
if target_address == raiden.address:
balance = lambda channel_state: channel_state.our_state.contract_balance
elif target_address == partner_address:
balance = lambda channel_state: channel_state.partner_state.contract_balance
else:
raise ValueError('target_address must be one of the channel participants')
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
while balance(channel_state) < target_balance:
gevent.sleep(retry_timeout)
channel_state = views.get_channelstate_for(
views.state_from_raiden(raiden),
payment_network_id,
token_address,
partner_address,
)
|
Wait until a given channels balance exceeds the target balance.
Note:
This does not time out, use gevent.Timeout.
|
def items(self, section):
"""Retrieve all key/value pairs for a given section."""
for line in self.iter_lines(section):
if line.kind == ConfigLine.KIND_DATA:
yield line.key, line.value
|
Retrieve all key/value pairs for a given section.
|
def reset(self):
""" Resets terminal screen"""
self._cli.reset()
self._cli.buffers[DEFAULT_BUFFER].reset()
self._cli.renderer.request_absolute_cursor_position()
self._cli._redraw()
|
Resets terminal screen
|
def paintEvent( self, event ):
"""
Overloads the paint event to draw rounded edges on this widget.
:param event | <QPaintEvent>
"""
super(XRolloutItem, self).paintEvent(event)
with XPainter(self) as painter:
w = self.width() - 3
h = self.height() - 3
color = self.palette().color(QPalette.Midlight)
color = color.darker(180)
pen = QPen(color)
pen.setWidthF(0.5)
painter.setPen(pen)
painter.setBrush(self.palette().color(QPalette.Midlight))
painter.setRenderHint(XPainter.Antialiasing)
painter.drawRoundedRect(1, 1, w, h, 10, 10)
|
Overloads the paint event to draw rounded edges on this widget.
:param event | <QPaintEvent>
|
def play(self, wav=None, data=None, rate=16000, channels=1, width=2, block=True, spectrum=None):
"""
play wav file or raw audio (string or generator)
Args:
wav: wav file path
data: raw audio data, str or iterator
rate: sample rate, only for raw audio
channels: channel number, only for raw data
width: raw audio data width, 16 bit is 2, only for raw data
block: if true, block until audio is played.
spectrum: if true, use a spectrum analyzer thread to analyze data
"""
if wav:
f = wave.open(wav, 'rb')
rate = f.getframerate()
channels = f.getnchannels()
width = f.getsampwidth()
def gen(w):
d = w.readframes(CHUNK_SIZE)
while d:
yield d
d = w.readframes(CHUNK_SIZE)
w.close()
data = gen(f)
self.stop_event.clear()
if block:
self._play(data, rate, channels, width, spectrum)
else:
thread = threading.Thread(target=self._play, args=(data, rate, channels, width, spectrum))
thread.start()
|
play wav file or raw audio (string or generator)
Args:
wav: wav file path
data: raw audio data, str or iterator
rate: sample rate, only for raw audio
channels: channel number, only for raw data
width: raw audio data width, 16 bit is 2, only for raw data
block: if true, block until audio is played.
spectrum: if true, use a spectrum analyzer thread to analyze data
|
def get_commands_from_file(self, mission_file, role):
"""Get commands from xml file as a list of (command_type:int, turnbased:boolean, command:string)"""
doc = etree.parse(mission_file)
mission = doc.getroot()
return self.get_commands_from_xml(mission, role)
|
Get commands from xml file as a list of (command_type:int, turnbased:boolean, command:string)
|
def return_real_id_base(dbpath, set_object):
"""
Generic function which returns a list of real_id's
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
Returns
-------
return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
return_list = []
for i in session.query(set_object).order_by(set_object.id):
return_list.append(i.real_id)
session.close()
return return_list
|
Generic function which returns a list of real_id's
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
Returns
-------
return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
|
def kill(args):
'''kill is a helper function to call the "kill" function of the client,
meaning we bring down an instance.
'''
from sregistry.main import Client as cli
if len(args.commands) > 0:
for name in args.commands:
cli.destroy(name)
sys.exit(0)
|
kill is a helper function to call the "kill" function of the client,
meaning we bring down an instance.
|
def make_grasp_phenotype_file(fn, pheno, out):
"""
Subset the GRASP database on a specific phenotype.
Parameters
----------
fn : str
Path to GRASP database file.
pheno : str
Phenotype to extract from database.
out : sttr
Path to output file for subset of GRASP database.
"""
import subprocess
c = 'awk -F "\\t" \'NR == 1 || $12 == "{}" \' {} > {}'.format(
pheno.replace("'", '\\x27'), fn, out)
subprocess.check_call(c, shell=True)
|
Subset the GRASP database on a specific phenotype.
Parameters
----------
fn : str
Path to GRASP database file.
pheno : str
Phenotype to extract from database.
out : sttr
Path to output file for subset of GRASP database.
|
def validate_args(self, qubits: Sequence[Qid]) -> None:
"""Checks if this gate can be applied to the given qubits.
By default checks if input is of type Qid and qubit count.
Child classes can override.
Args:
qubits: The collection of qubits to potentially apply the gate to.
Throws:
ValueError: The gate can't be applied to the qubits.
"""
if len(qubits) == 0:
raise ValueError(
"Applied a gate to an empty set of qubits. Gate: {}".format(
repr(self)))
if len(qubits) != self.num_qubits():
raise ValueError(
'Wrong number of qubits for <{!r}>. '
'Expected {} qubits but got <{!r}>.'.format(
self,
self.num_qubits(),
qubits))
if any([not isinstance(qubit, Qid)
for qubit in qubits]):
raise ValueError(
'Gate was called with type different than Qid.')
|
Checks if this gate can be applied to the given qubits.
By default checks if input is of type Qid and qubit count.
Child classes can override.
Args:
qubits: The collection of qubits to potentially apply the gate to.
Throws:
ValueError: The gate can't be applied to the qubits.
|
def cal_frame_according_boundaries(left, right, top, bottom, parent_size, gaphas_editor=True, group=True):
""" Generate margin and relative position and size handed boundary parameter and parent size """
# print("parent_size ->", parent_size)
margin = cal_margin(parent_size)
# Add margin and ensure that the upper left corner is within the state
if group:
# frame of grouped state
rel_pos = max(left - margin, 0), max(top - margin, 0)
# Add margin and ensure that the lower right corner is within the state
size = (min(right - left + 2 * margin, parent_size[0] - rel_pos[0]),
min(bottom - top + 2 * margin, parent_size[1] - rel_pos[1]))
else:
# frame inside of state
# rel_pos = max(margin, 0), max(margin, 0)
rel_pos = left, top
size = right - left, bottom - top
return margin, rel_pos, size
|
Generate margin and relative position and size handed boundary parameter and parent size
|
def create_filter_predicate(self):
'''Creates a filter predicate.
The list of available filters is given by calls to
``add_filter``, and the list of filters to use is given by
parameters in ``params``.
In this default implementation, multiple filters can be
specified with the ``filter`` parameter. Each filter is
initialized with the same set of query parameters given to the
search engine.
The returned function accepts a ``(content_id, FC)`` and
returns ``True`` if and only if every selected predicate
returns ``True`` on the same input.
'''
assert self.query_content_id is not None, \
'must call SearchEngine.set_query_id first'
filter_names = self.query_params.getlist('filter')
if len(filter_names) == 0 and 'already_labeled' in self._filters:
filter_names = ['already_labeled']
init_filters = [(n, self._filters[n]) for n in filter_names]
preds = [lambda _: True]
for name, p in init_filters:
preds.append(p.set_query_id(self.query_content_id)
.set_query_params(self.query_params)
.create_predicate())
return lambda (cid, fc): fc is not None and all(p((cid, fc))
for p in preds)
|
Creates a filter predicate.
The list of available filters is given by calls to
``add_filter``, and the list of filters to use is given by
parameters in ``params``.
In this default implementation, multiple filters can be
specified with the ``filter`` parameter. Each filter is
initialized with the same set of query parameters given to the
search engine.
The returned function accepts a ``(content_id, FC)`` and
returns ``True`` if and only if every selected predicate
returns ``True`` on the same input.
|
def _redirect_output(self, statement: Statement) -> Tuple[bool, utils.RedirectionSavedState]:
"""Handles output redirection for >, >>, and |.
:param statement: a parsed statement from the user
:return: A bool telling if an error occurred and a utils.RedirectionSavedState object
"""
import io
import subprocess
redir_error = False
# Initialize the saved state
saved_state = utils.RedirectionSavedState(self.stdout, sys.stdout, self.cur_pipe_proc_reader)
if not self.allow_redirection:
return redir_error, saved_state
if statement.pipe_to:
# Create a pipe with read and write sides
read_fd, write_fd = os.pipe()
# Open each side of the pipe
subproc_stdin = io.open(read_fd, 'r')
new_stdout = io.open(write_fd, 'w')
# We want Popen to raise an exception if it fails to open the process. Thus we don't set shell to True.
try:
# Set options to not forward signals to the pipe process. If a Ctrl-C event occurs,
# our sigint handler will forward it only to the most recent pipe process. This makes
# sure pipe processes close in the right order (most recent first).
if sys.platform == 'win32':
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
start_new_session = False
else:
creationflags = 0
start_new_session = True
# For any stream that is a StdSim, we will use a pipe so we can capture its output
proc = \
subprocess.Popen(statement.pipe_to,
stdin=subproc_stdin,
stdout=subprocess.PIPE if isinstance(self.stdout, utils.StdSim) else self.stdout,
stderr=subprocess.PIPE if isinstance(sys.stderr, utils.StdSim) else sys.stderr,
creationflags=creationflags,
start_new_session=start_new_session)
saved_state.redirecting = True
saved_state.pipe_proc_reader = utils.ProcReader(proc, self.stdout, sys.stderr)
sys.stdout = self.stdout = new_stdout
except Exception as ex:
self.perror('Failed to open pipe because - {}'.format(ex), traceback_war=False)
subproc_stdin.close()
new_stdout.close()
redir_error = True
elif statement.output:
import tempfile
if (not statement.output_to) and (not self.can_clip):
self.perror("Cannot redirect to paste buffer; install 'pyperclip' and re-run to enable",
traceback_war=False)
redir_error = True
elif statement.output_to:
# going to a file
mode = 'w'
# statement.output can only contain
# REDIRECTION_APPEND or REDIRECTION_OUTPUT
if statement.output == constants.REDIRECTION_APPEND:
mode = 'a'
try:
new_stdout = open(statement.output_to, mode)
saved_state.redirecting = True
sys.stdout = self.stdout = new_stdout
except OSError as ex:
self.perror('Failed to redirect because - {}'.format(ex), traceback_war=False)
redir_error = True
else:
# going to a paste buffer
new_stdout = tempfile.TemporaryFile(mode="w+")
saved_state.redirecting = True
sys.stdout = self.stdout = new_stdout
if statement.output == constants.REDIRECTION_APPEND:
self.poutput(get_paste_buffer())
return redir_error, saved_state
|
Handles output redirection for >, >>, and |.
:param statement: a parsed statement from the user
:return: A bool telling if an error occurred and a utils.RedirectionSavedState object
|
def _AddDependencyEdges(self, rdf_artifact):
"""Add an edge for every dependency of the given artifact.
This method gets the attribute names for a given artifact and for every
attribute it adds a directed edge from the attribute node to the artifact
node. If an artifact does not have any dependencies it is added to the set
of reachable nodes.
Args:
rdf_artifact: The artifact object.
"""
artifact_dependencies = artifact_registry.GetArtifactPathDependencies(
rdf_artifact)
if artifact_dependencies:
for attribute in artifact_dependencies:
self._AddEdge(attribute, rdf_artifact.name)
else:
self.reachable_nodes.add(rdf_artifact.name)
self.graph[rdf_artifact.name].is_provided = True
|
Add an edge for every dependency of the given artifact.
This method gets the attribute names for a given artifact and for every
attribute it adds a directed edge from the attribute node to the artifact
node. If an artifact does not have any dependencies it is added to the set
of reachable nodes.
Args:
rdf_artifact: The artifact object.
|
def DbGetDeviceMemberList(self, argin):
""" Get a list of device name members for device name matching the
specified filter
:param argin: The filter
:type: tango.DevString
:return: Device names member list
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetDeviceMemberList()")
argin = replace_wildcard(argin)
return self.db.get_device_member_list(argin)
|
Get a list of device name members for device name matching the
specified filter
:param argin: The filter
:type: tango.DevString
:return: Device names member list
:rtype: tango.DevVarStringArray
|
def heartbeat(self):
'''Renew the heartbeat, if possible, and optionally update the job's
user data.'''
logger.debug('Heartbeating %s (ttl = %s)', self.jid, self.ttl)
try:
self.expires_at = float(self.client('heartbeat', self.jid,
self.client.worker_name, json.dumps(self.data)) or 0)
except QlessException:
raise LostLockException(self.jid)
logger.debug('Heartbeated %s (ttl = %s)', self.jid, self.ttl)
return self.expires_at
|
Renew the heartbeat, if possible, and optionally update the job's
user data.
|
def fingerprint(self):
"""A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)"""
if self.num_vertices == 0:
return np.zeros(20, np.ubyte)
else:
return sum(self.vertex_fingerprints)
|
A total graph fingerprint
The result is invariant under permutation of the vertex indexes. The
chance that two different (molecular) graphs yield the same
fingerprint is small but not zero. (See unit tests.)
|
def set(self, path, value, version=-1):
""" wraps the default set() and handles encoding (Py3k) """
value = to_bytes(value)
super(XClient, self).set(path, value, version)
|
wraps the default set() and handles encoding (Py3k)
|
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
|
Start here.
|
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites and weights (orders) of bonds for a given
atom.
:param molecule: input Molecule.
:param n: index of site for which to determine near neighbors.
:return: [dict] representing a neighboring site and the type of
bond present between site n and the neighboring site.
"""
from pymatgen.io.babel import BabelMolAdaptor
obmol = BabelMolAdaptor(structure).openbabel_mol
siw = []
# Get only the atom of interest
site_atom = [a for i, a in enumerate(ob.OBMolAtomDFSIter(obmol))
if [a.GetX(), a.GetY(), a.GetZ()] == list(
structure[n].coords)][0]
for neighbor in ob.OBAtomAtomIter(site_atom):
coords = [neighbor.GetX(), neighbor.GetY(), neighbor.GetZ()]
site = [a for a in structure if list(a.coords) == coords][0]
index = structure.index(site)
bond = site_atom.GetBond(neighbor)
if self.order:
obmol.PerceiveBondOrders()
weight = bond.GetBondOrder()
else:
weight = bond.GetLength()
siw.append({"site": site,
"image": (0, 0, 0),
"weight": weight,
"site_index": index})
return siw
|
Get all near-neighbor sites and weights (orders) of bonds for a given
atom.
:param molecule: input Molecule.
:param n: index of site for which to determine near neighbors.
:return: [dict] representing a neighboring site and the type of
bond present between site n and the neighboring site.
|
def getHomoloGene(taxfile="build_inputs/taxid_taxname",\
genefile="homologene.data",\
proteinsfile="build_inputs/all_proteins.data",\
proteinsclusterfile="build_inputs/proteins_for_clustering.data",\
baseURL="http://ftp.ncbi.nih.gov/pub/HomoloGene/current/"):
"""
Returns NBCI's Homolog Gene tables.
:param taxfile: path to local file or to baseURL/taxfile
:param genefile: path to local file or to baseURL/genefile
:param proteinsfile: path to local file or to baseURL/proteinsfile
:param proteinsclusterfile: path to local file or to baseURL/proteinsclusterfile
:param baseURL: baseURL for downloading files
:returns genedf: Homolog gene Pandas dataframe
:returns protclusdf: Pandas dataframe. Lists one protein per gene that were used for homologene clustering.
If a gene has multiple protein accessions derived from alternative splicing,
only one protein isoform that give most protein alignment to proteins in other species
was selected for clustering and it is listed in this file.
:returns proteinsdf: Pandas dataframe. Lists all proteins and their gene information.
If a gene has multple protein accessions derived from alternative splicing event,
each protein accession is list in a separate line.
"""
def getDf(inputfile):
if os.path.isfile(inputfile):
df=pd.read_table(inputfile,header=None)
else:
df = urllib2.urlopen(baseURL+inputfile)
df=df.read().split("\n")
df=[ s for s in df if len(s) > 0 ]
df=[s.split("\t") for s in df]
df=pd.DataFrame(df)
return df
taxdf=getDf(taxfile)
taxdf.set_index([0],inplace=True)
taxdi=taxdf.to_dict().get(1)
genedf=getDf(genefile)
genecols=["HID","Taxonomy ID","Gene ID","Gene Symbol","Protein gi","Protein accession"]
genedf.columns=genecols
genedf["organism"]=genedf["Taxonomy ID"].apply(lambda x:taxdi.get(x))
proteinsdf=getDf(proteinsfile)
proteinscols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\
"length of protein listed in column 5","-11) contains data about gene location on the genome",\
"starting position of gene in 0-based coordinate",\
"end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"]
proteinsdf.columns=proteinscols
proteinsdf["organism"]=proteinsdf["taxid"].apply(lambda x:taxdi.get(x))
protclusdf=getDf(proteinsclusterfile)
protclustercols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\
"length of protein listed in column 5","-11) contains data about gene location on the genome",\
"starting position of gene in 0-based coordinate",\
"end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"]
protclusdf.columns=proteinscols
protclusdf["organism"]=protclusdf["taxid"].apply(lambda x:taxdi.get(x))
return genedf, protclusdf, proteinsdf
|
Returns NBCI's Homolog Gene tables.
:param taxfile: path to local file or to baseURL/taxfile
:param genefile: path to local file or to baseURL/genefile
:param proteinsfile: path to local file or to baseURL/proteinsfile
:param proteinsclusterfile: path to local file or to baseURL/proteinsclusterfile
:param baseURL: baseURL for downloading files
:returns genedf: Homolog gene Pandas dataframe
:returns protclusdf: Pandas dataframe. Lists one protein per gene that were used for homologene clustering.
If a gene has multiple protein accessions derived from alternative splicing,
only one protein isoform that give most protein alignment to proteins in other species
was selected for clustering and it is listed in this file.
:returns proteinsdf: Pandas dataframe. Lists all proteins and their gene information.
If a gene has multple protein accessions derived from alternative splicing event,
each protein accession is list in a separate line.
|
def get_area(self):
'''
Calculates the area of the fault (km ** 2.) as the product of length
(km) and downdip width (km)
'''
d_z = self.lower_depth - self.upper_depth
self.downdip_width = d_z / np.sin(self.dip * np.pi / 180.)
self.surface_width = self.downdip_width * np.cos(self.dip *
np.pi / 180.)
self.area = self.length * self.downdip_width
return self.area
|
Calculates the area of the fault (km ** 2.) as the product of length
(km) and downdip width (km)
|
def check_contract_allowed(func):
"""Check if Contract is allowed by token
"""
@wraps(func)
def decorator(*args, **kwargs):
contract = kwargs.get('contract')
if (contract and current_user.is_authenticated()
and not current_user.allowed(contract)):
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorator
|
Check if Contract is allowed by token
|
def add_files(self, *filenames, **kw):
"""
Include added and/or removed files in the working tree in the next commit.
:param filenames: The filenames of the files to include in the next
commit (zero or more strings). If no arguments are
given all untracked files are added.
:param kw: Keyword arguments are ignored (instead of raising
:exc:`~exceptions.TypeError`) to enable backwards
compatibility with older versions of `vcs-repo-mgr`
where the keyword argument `all` was used.
"""
# Make sure the local repository exists and supports a working tree.
self.create()
self.ensure_working_tree()
# Include added and/or removed files in the next commit.
logger.info("Staging changes to be committed in %s ..", format_path(self.local))
self.context.execute(*self.get_add_files_command(*filenames))
|
Include added and/or removed files in the working tree in the next commit.
:param filenames: The filenames of the files to include in the next
commit (zero or more strings). If no arguments are
given all untracked files are added.
:param kw: Keyword arguments are ignored (instead of raising
:exc:`~exceptions.TypeError`) to enable backwards
compatibility with older versions of `vcs-repo-mgr`
where the keyword argument `all` was used.
|
def run(self, endpoint, data=None, headers=None, extra_options=None):
"""
Performs the request
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:type endpoint: str
:param data: payload to be uploaded or request parameters
:type data: dict
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non
2XX or 3XX status codes
:type extra_options: dict
"""
extra_options = extra_options or {}
session = self.get_conn(headers)
if self.base_url and not self.base_url.endswith('/') and \
endpoint and not endpoint.startswith('/'):
url = self.base_url + '/' + endpoint
else:
url = (self.base_url or '') + (endpoint or '')
req = None
if self.method == 'GET':
# GET uses params
req = requests.Request(self.method,
url,
params=data,
headers=headers)
elif self.method == 'HEAD':
# HEAD doesn't use params
req = requests.Request(self.method,
url,
headers=headers)
else:
# Others use data
req = requests.Request(self.method,
url,
data=data,
headers=headers)
prepped_request = session.prepare_request(req)
self.log.info("Sending '%s' to url: %s", self.method, url)
return self.run_and_check(session, prepped_request, extra_options)
|
Performs the request
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:type endpoint: str
:param data: payload to be uploaded or request parameters
:type data: dict
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non
2XX or 3XX status codes
:type extra_options: dict
|
def get_language_pack(locale):
"""Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale)
try:
with open(filename) as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack
except Exception:
# Assuming english, client side falls back on english
pass
return pack
|
Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
|
def bash_complete(self, path, cmd, *cmds):
"""Write bash complete script.
Args:
path (path-like): desired path of the complete script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
"""
path = pathlib.Path(path)
subcmds = list(self.subcmds.keys())
with path.open('w') as bcf:
# main function
print('_{}() {{'.format(cmd), file=bcf)
print('COMPREPLY=()', file=bcf)
print(r'local cur=${COMP_WORDS[COMP_CWORD]}', end='\n\n', file=bcf)
optstr = ' '.join(self._bash_comp_command(None))
print(r'local options="{}"'.format(optstr), end='\n\n', file=bcf)
if subcmds:
print('local commands="{}"'.format(' '.join(subcmds)),
file=bcf)
print('declare -A suboptions', file=bcf)
for sub in subcmds:
optstr = ' '.join(self._bash_comp_command(sub))
print('suboptions[{}]="{}"'.format(sub, optstr), file=bcf)
condstr = 'if'
for sub in subcmds:
print(condstr, r'[[ "${COMP_LINE}" == *"', sub, '"* ]] ; then',
file=bcf)
print(r'COMPREPLY=( `compgen -W "${suboptions[', sub,
r']}" -- ${cur}` )', sep='', file=bcf)
condstr = 'elif'
print(condstr, r'[[ ${cur} == -* ]] ; then', file=bcf)
print(r'COMPREPLY=( `compgen -W "${options}" -- ${cur}`)',
file=bcf)
if subcmds:
print(r'else', file=bcf)
print(r'COMPREPLY=( `compgen -W "${commands}" -- ${cur}`)',
file=bcf)
print('fi', file=bcf)
print('}', end='\n\n', file=bcf)
print('complete -F _{0} {0}'.format(cmd), *cmds, file=bcf)
|
Write bash complete script.
Args:
path (path-like): desired path of the complete script.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
|
def connect(self):
""" :meth:`.WNetworkClientProto.connect` method implementation
"""
exceptions = list(__basic_ftp_exceptions__)
exceptions.append(OSError) # OSError for "no route to host" issue
exceptions.append(ConnectionRefusedError) # for unavailable service on a host
try:
self.ftp_client().connect(**self.__ftp_connect_args)
self.ftp_client().login(**self.__ftp_auth_args)
except tuple(exceptions) as e:
raise WClientConnectionError('Unable to connect to the server') from e
try:
path = self.uri().path()
if path is None:
path = self.directory_sep()
self.change_directory(path)
except WClientCapabilityError as e:
raise WClientConnectionError(
'Unable to change current working directory to the specified one'
) from e
|
:meth:`.WNetworkClientProto.connect` method implementation
|
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
obj = self._clone()
obj._facets = [] #reset facets
from django.db.models import Avg, Max, Min
for arg in args:
if isinstance(arg, (Avg, Max, Min)):
field, djfield = self._django_to_es_field(arg.lookup)
if not djfield:
obj = obj.annotate(field)
from pyes.facets import StatisticalFacet
obj = obj.annotate(StatisticalFacet(field, field))
facets = obj.get_facets()
#collecting results
result = {}
for name, values in facets.items():
for k, v in values.items():
if k.startswith("_"):
continue
result[u'%s__%s' % (name, k)] = v
return result
|
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
|
def set_cloexec(fd):
"""Set the file descriptor `fd` to automatically close on
:func:`os.execve`. This has no effect on file descriptors inherited across
:func:`os.fork`, they must be explicitly closed through some other means,
such as :func:`mitogen.fork.on_fork`."""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert fd > 2
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
|
Set the file descriptor `fd` to automatically close on
:func:`os.execve`. This has no effect on file descriptors inherited across
:func:`os.fork`, they must be explicitly closed through some other means,
such as :func:`mitogen.fork.on_fork`.
|
async def container_load(self, container_type, params=None, container=None, obj=None):
"""
Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
:param container_type:
:param params:
:param container:
:param obj:
:return:
"""
if isinstance(obj, IModel):
obj = obj.val
if obj is None:
return NoSetSentinel()
c_len = len(obj)
elem_type = params[0] if params else None
if elem_type is None:
elem_type = container_type.ELEM_TYPE
res = container if container else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self._load_field(elem_type,
params[1:] if params else None,
x.eref(res, i) if container else None, obj=obj[i])
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not container and not isinstance(fvalue, NoSetSentinel):
res.append(fvalue)
return res
|
Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
:param container_type:
:param params:
:param container:
:param obj:
:return:
|
def T_dependent_property(self, T):
r'''Method to calculate the property with sanity checking and without
specifying a specific method. `select_valid_methods` is used to obtain
a sorted list of methods to try. Methods are then tried in order until
one succeeds. The methods are allowed to fail, and their results are
checked with `test_property_validity`. On success, the used method
is stored in the variable `method`.
If `method` is set, this method is first checked for validity with
`test_method_validity` for the specified temperature, and if it is
valid, it is then used to calculate the property. The result is checked
for validity, and returned if it is valid. If either of the checks fail,
the function retrieves a full list of valid methods with
`select_valid_methods` and attempts them as described above.
If no methods are found which succeed, returns None.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
Returns
-------
prop : float
Calculated property, [`units`]
'''
# Optimistic track, with the already set method
if self.method:
# retest within range
if self.test_method_validity(T, self.method):
try:
prop = self.calculate(T, self.method)
if self.test_property_validity(prop):
return prop
except: # pragma: no cover
pass
# get valid methods at T, and try them until one yields a valid
# property; store the method and return the answer
self.sorted_valid_methods = self.select_valid_methods(T)
for method in self.sorted_valid_methods:
try:
prop = self.calculate(T, method)
if self.test_property_validity(prop):
self.method = method
return prop
except: # pragma: no cover
pass
# Function returns None if it does not work.
return None
|
r'''Method to calculate the property with sanity checking and without
specifying a specific method. `select_valid_methods` is used to obtain
a sorted list of methods to try. Methods are then tried in order until
one succeeds. The methods are allowed to fail, and their results are
checked with `test_property_validity`. On success, the used method
is stored in the variable `method`.
If `method` is set, this method is first checked for validity with
`test_method_validity` for the specified temperature, and if it is
valid, it is then used to calculate the property. The result is checked
for validity, and returned if it is valid. If either of the checks fail,
the function retrieves a full list of valid methods with
`select_valid_methods` and attempts them as described above.
If no methods are found which succeed, returns None.
Parameters
----------
T : float
Temperature at which to calculate the property, [K]
Returns
-------
prop : float
Calculated property, [`units`]
|
def valid_hacluster_config():
'''
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
must be set.
Note: ha-bindiface and ha-macastport both have defaults and will always
be set. We only care that either vip or dns-ha is set.
:returns: boolean: valid config returns true.
raises: HAIncompatibileConfig if settings conflict.
raises: HAIncompleteConfig if settings are missing.
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)
raise HAIncorrectConfig(msg)
# If dns-ha then one of os-*-hostname must be set
if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname', 'os-access-hostname']
# At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is
# the minimum required.
for setting in dns_settings:
if config_get(setting):
log('DNS HA: At least one hostname is set {}: {}'
''.format(setting, config_get(setting)),
level=DEBUG)
return True
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
'DNS HA')
status_set('blocked', msg)
raise HAIncompleteConfig(msg)
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
return True
|
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
must be set.
Note: ha-bindiface and ha-macastport both have defaults and will always
be set. We only care that either vip or dns-ha is set.
:returns: boolean: valid config returns true.
raises: HAIncompatibileConfig if settings conflict.
raises: HAIncompleteConfig if settings are missing.
|
def add(overlay):
'''
Add the given overlay from the cached remote list to your locally
installed overlays. Specify 'ALL' to add all overlays from the
remote list.
Return a list of the new overlay(s) added:
CLI Example:
.. code-block:: bash
salt '*' layman.add <overlay name>
'''
ret = list()
old_overlays = list_local()
cmd = 'layman --quietness=0 --add {0}'.format(overlay)
add_attempt = __salt__['cmd.run_all'](cmd, python_shell=False, stdin='y')
if add_attempt['retcode'] != 0:
raise salt.exceptions.CommandExecutionError(add_attempt['stdout'])
new_overlays = list_local()
# If we did not have any overlays before and we successfully added
# a new one. We need to ensure the make.conf is sourcing layman's
# make.conf so emerge can see the overlays
if not old_overlays and new_overlays:
srcline = 'source /var/lib/layman/make.conf'
makeconf = _get_makeconf()
if not __salt__['file.contains'](makeconf, 'layman'):
__salt__['file.append'](makeconf, srcline)
ret = [overlay for overlay in new_overlays if overlay not in old_overlays]
return ret
|
Add the given overlay from the cached remote list to your locally
installed overlays. Specify 'ALL' to add all overlays from the
remote list.
Return a list of the new overlay(s) added:
CLI Example:
.. code-block:: bash
salt '*' layman.add <overlay name>
|
def process_priority(self, process_priority):
"""
Sets the process priority.
:param process_priority: string
"""
log.info('QEMU VM "{name}" [{id}] has set the process priority to {priority}'.format(name=self._name,
id=self._id,
priority=process_priority))
self._process_priority = process_priority
|
Sets the process priority.
:param process_priority: string
|
def _AlignUncompressedDataOffset(self, uncompressed_data_offset):
"""Aligns the compressed file with the uncompressed data offset.
Args:
uncompressed_data_offset (int): uncompressed data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
while compressed_data_offset < compressed_data_size:
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if read_count == 0:
break
compressed_data_offset += read_count
if uncompressed_data_offset < self._uncompressed_data_size:
self._uncompressed_data_offset = uncompressed_data_offset
break
uncompressed_data_offset -= self._uncompressed_data_size
|
Aligns the compressed file with the uncompressed data offset.
Args:
uncompressed_data_offset (int): uncompressed data offset.
|
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith(os.extsep):
ext = ext[1:] # strip a dot to avoid annoying mistakes
if ext != "":
filename = filename + os.extsep + ext
return filename
|
Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
|
def create(self, request):
"""
Add an item to the basket
"""
variant_id = request.data.get("variant_id", None)
if variant_id is not None:
variant = ProductVariant.objects.get(id=variant_id)
quantity = int(request.data.get("quantity", 1))
items, bid = utils.get_basket_items(request)
# Check if the variant is already in the basket
in_basket = False
for item in items:
if item.variant.id == variant.id:
item.increase_quantity(quantity)
in_basket = True
break
if not in_basket:
item = BasketItem(variant=variant, quantity=quantity, basket_id=bid)
item.save()
serializer = BasketItemSerializer(self.get_queryset(request), many=True)
response = Response(data=serializer.data,
status=status.HTTP_201_CREATED)
else:
response = Response(
{"message": "Missing 'variant_id'"},
status=status.HTTP_400_BAD_REQUEST)
return response
|
Add an item to the basket
|
def removeEventListener(self, event: str, listener: _EventListenerType
) -> None:
"""Remove an event listener of this node.
The listener is removed only when both event type and listener is
matched.
"""
self._remove_event_listener(event, listener)
|
Remove an event listener of this node.
The listener is removed only when both event type and listener is
matched.
|
def _read_file(file_name):
"""Read the file content and load it as JSON.
Arguments:
file_name (:py:class:`str`): The filename.
Returns:
:py:class:`dict`: The loaded JSON data.
Raises:
:py:class:`FileNotFoundError`: If the file is not found.
"""
with open(file_name) as config_file:
data = json.load(config_file)
return data
|
Read the file content and load it as JSON.
Arguments:
file_name (:py:class:`str`): The filename.
Returns:
:py:class:`dict`: The loaded JSON data.
Raises:
:py:class:`FileNotFoundError`: If the file is not found.
|
def nlmsg_attrlen(nlh, hdrlen):
"""Length of attributes data.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L154
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family specific header (integer).
Returns:
Integer.
"""
return max(nlmsg_len(nlh) - libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen), 0)
|
Length of attributes data.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L154
nlh -- Netlink message header (nlmsghdr class instance).
hdrlen -- length of family specific header (integer).
Returns:
Integer.
|
def get_file_id(db, user_id, api_path):
"""
Get the value in the 'id' column for the file with the given
user_id and path.
"""
return _get_file(
db,
user_id,
api_path,
[files.c.id],
unused_decrypt_func,
)['id']
|
Get the value in the 'id' column for the file with the given
user_id and path.
|
def format_stats(stats):
"""Given a dictionary following this layout:
{
'encoded:label': 'Encoded',
'encoded:value': 'Yes',
'encoded:description': 'Indicates if the column is encoded',
'encoded:include': True,
'size:label': 'Size',
'size:value': 128,
'size:description': 'Size of the table in MB',
'size:include': True,
}
format_stats will convert the dict into this structure:
{
'encoded': {
'id': 'encoded',
'label': 'Encoded',
'value': 'Yes',
'description': 'Indicates if the column is encoded',
'include': True
},
'size': {
'id': 'size',
'label': 'Size',
'value': 128,
'description': 'Size of the table in MB',
'include': True
}
}
"""
stats_collector = {}
for stat_key, stat_value in stats.items():
stat_id, stat_field = stat_key.split(":")
stats_collector.setdefault(stat_id, {"id": stat_id})
stats_collector[stat_id][stat_field] = stat_value
# strip out all the stats we don't want
stats_collector = {
stat_id: stats
for stat_id, stats in stats_collector.items()
if stats.get('include', False)
}
# we always have a 'has_stats' field, it's never included
has_stats = {
'id': 'has_stats',
'label': 'Has Stats?',
'value': len(stats_collector) > 0,
'description': 'Indicates whether there are statistics for this table',
'include': False,
}
stats_collector['has_stats'] = has_stats
return stats_collector
|
Given a dictionary following this layout:
{
'encoded:label': 'Encoded',
'encoded:value': 'Yes',
'encoded:description': 'Indicates if the column is encoded',
'encoded:include': True,
'size:label': 'Size',
'size:value': 128,
'size:description': 'Size of the table in MB',
'size:include': True,
}
format_stats will convert the dict into this structure:
{
'encoded': {
'id': 'encoded',
'label': 'Encoded',
'value': 'Yes',
'description': 'Indicates if the column is encoded',
'include': True
},
'size': {
'id': 'size',
'label': 'Size',
'value': 128,
'description': 'Size of the table in MB',
'include': True
}
}
|
def _bstar_1effect(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):
"""
Same as :func:`_bstar_set` but for single-effect.
"""
from numpy_sugar import epsilon
from numpy_sugar.linalg import dotd
from numpy import sum
r = full(MTBM[0].shape[0], yTBy)
r -= 2 * add.reduce([dot(i, beta) for i in yTBX])
r -= 2 * add.reduce([i * alpha for i in yTBM])
r += add.reduce([dotd(beta.T, dot(i, beta)) for i in XTBX])
r += add.reduce([dotd(beta.T, i * alpha) for i in XTBM])
r += add.reduce([sum(alpha * i * beta, axis=0) for i in XTBM])
r += add.reduce([alpha * i.ravel() * alpha for i in MTBM])
return clip(r, epsilon.tiny, inf)
|
Same as :func:`_bstar_set` but for single-effect.
|
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {c for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()}
return super()._dir_additions().union(additions)
|
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
|
def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english):
"""Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.
Example:
>>> decryptions = crack("KHOOR", fitness.english.quadgrams)
>>> print(''.join(decryptions[0].plaintext))
HELLO
Args:
ciphertext (iterable): The symbols to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
min_key (int): Key to start with
max_key (int): Key to stop at (exclusive)
shift_function (function(shift, symbol)): Shift function to use
Returns:
Sorted list of decryptions
Raises:
ValueError: If min_key exceeds max_key
ValueError: If no fitness_functions are given
"""
if min_key >= max_key:
raise ValueError("min_key cannot exceed max_key")
decryptions = []
for key in range(min_key, max_key):
plaintext = decrypt(key, ciphertext, shift_function=shift_function)
decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions)))
return sorted(decryptions, reverse=True)
|
Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``.
Example:
>>> decryptions = crack("KHOOR", fitness.english.quadgrams)
>>> print(''.join(decryptions[0].plaintext))
HELLO
Args:
ciphertext (iterable): The symbols to decrypt
*fitness_functions (variable length argument list): Functions to score decryption with
Keyword Args:
min_key (int): Key to start with
max_key (int): Key to stop at (exclusive)
shift_function (function(shift, symbol)): Shift function to use
Returns:
Sorted list of decryptions
Raises:
ValueError: If min_key exceeds max_key
ValueError: If no fitness_functions are given
|
def get_operation_mtf_dimension_names(self, operation_name):
"""The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
"""
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names
|
The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
|
def add_flatten(self, name, mode, input_name, output_name):
"""
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
Parameters
----------
name: str
The name of this layer.
mode: int
- If mode == 0, the flatten layer is in CHANNEL_FIRST mode.
- If mode == 1, the flatten layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_permute, add_reshape
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.flatten
# Set the parameters
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_FIRST')
elif mode == 1:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_LAST')
else:
raise NotImplementedError(
'Unknown flatten mode %d ' % mode)
|
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
Parameters
----------
name: str
The name of this layer.
mode: int
- If mode == 0, the flatten layer is in CHANNEL_FIRST mode.
- If mode == 1, the flatten layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_permute, add_reshape
|
def load_module_functions(module):
""" load python module functions.
Args:
module: python module
Returns:
dict: functions mapping for specified python module
{
"func1_name": func1,
"func2_name": func2
}
"""
module_functions = {}
for name, item in vars(module).items():
if validator.is_function(item):
module_functions[name] = item
return module_functions
|
load python module functions.
Args:
module: python module
Returns:
dict: functions mapping for specified python module
{
"func1_name": func1,
"func2_name": func2
}
|
def save_dataset(self, dataset, filename=None, fill_value=None,
compute=True, **kwargs):
"""Saves the ``dataset`` to a given ``filename``.
This method must be overloaded by the subclass.
Args:
dataset (xarray.DataArray): Dataset to save using this writer.
filename (str): Optionally specify the filename to save this
dataset to. If not provided then `filename`
which can be provided to the init method will be
used and formatted by dataset attributes.
fill_value (int or float): Replace invalid values in the dataset
with this fill value if applicable to
this writer.
compute (bool): If `True` (default), compute and save the dataset.
If `False` return either a `dask.delayed.Delayed`
object or tuple of (source, target). See the
return values below for more information.
**kwargs: Other keyword arguments for this particular writer.
Returns:
Value returned depends on `compute`. If `compute` is `True` then
the return value is the result of computing a
`dask.delayed.Delayed` object or running `dask.array.store`. If
`compute` is `False` then the returned value is either a
`dask.delayed.Delayed` object that can be computed using
`delayed.compute()` or a tuple of (source, target) that should be
passed to `dask.array.store`. If target is provided the the caller
is responsible for calling `target.close()` if the target has
this method.
"""
raise NotImplementedError(
"Writer '%s' has not implemented dataset saving" % (self.name, ))
|
Saves the ``dataset`` to a given ``filename``.
This method must be overloaded by the subclass.
Args:
dataset (xarray.DataArray): Dataset to save using this writer.
filename (str): Optionally specify the filename to save this
dataset to. If not provided then `filename`
which can be provided to the init method will be
used and formatted by dataset attributes.
fill_value (int or float): Replace invalid values in the dataset
with this fill value if applicable to
this writer.
compute (bool): If `True` (default), compute and save the dataset.
If `False` return either a `dask.delayed.Delayed`
object or tuple of (source, target). See the
return values below for more information.
**kwargs: Other keyword arguments for this particular writer.
Returns:
Value returned depends on `compute`. If `compute` is `True` then
the return value is the result of computing a
`dask.delayed.Delayed` object or running `dask.array.store`. If
`compute` is `False` then the returned value is either a
`dask.delayed.Delayed` object that can be computed using
`delayed.compute()` or a tuple of (source, target) that should be
passed to `dask.array.store`. If target is provided the the caller
is responsible for calling `target.close()` if the target has
this method.
|
def sentence_starts(self):
"""The list of start positions representing ``sentences`` layer elements."""
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES)
|
The list of start positions representing ``sentences`` layer elements.
|
def lookup_character_keycode(self, character):
"""
Looks up the keysym for the character then returns the keycode mapping
for that keysym.
"""
keysym = Xlib.XK.string_to_keysym(character)
if not keysym:
try:
keysym = getattr(Xlib.keysymdef.xkb, 'XK_' + character, 0)
except:
keysym = 0
if not keysym:
keysym = Xlib.XK.string_to_keysym(KEYSYMS[character])
return self.display.keysym_to_keycode(keysym)
|
Looks up the keysym for the character then returns the keycode mapping
for that keysym.
|
def check_register(self, arg):
"""
Is the parameter a register in the form of 'R<d>',
and if so is it within the bounds of registers defined
Raises an exception if
1. The parameter is not in the form of 'R<d>'
2. <d> is outside the range of registers defined in the init value
registers or _max_registers
:param arg: The parameter to check
:return: The number of the register
"""
self.check_parameter(arg)
match = re.search(self.REGISTER_REGEX, arg)
if match is None:
raise iarm.exceptions.RuleError("Parameter {} is not a register".format(arg))
try:
r_num = int(match.groups()[0])
except ValueError:
r_num = int(match.groups()[0], 16)
except TypeError:
if arg in 'lr|LR':
return 14
elif arg in 'sp|SP':
return 13
elif arg in 'fp|FP':
return 7 ## TODO this could be 7 or 11 depending on THUMB and ARM mode http://www.keil.com/support/man/docs/armcc/armcc_chr1359124947957.htm
else:
raise
if r_num > self._max_registers:
raise iarm.exceptions.RuleError(
"Register {} is greater than defined registers of {}".format(arg, self._max_registers))
return r_num
|
Is the parameter a register in the form of 'R<d>',
and if so is it within the bounds of registers defined
Raises an exception if
1. The parameter is not in the form of 'R<d>'
2. <d> is outside the range of registers defined in the init value
registers or _max_registers
:param arg: The parameter to check
:return: The number of the register
|
def save_module(self, obj):
"""
Save a module as an import
"""
mod_name = obj.__name__
# If module is successfully found then it is not a dynamically created module
if hasattr(obj, '__file__'):
is_dynamic = False
else:
try:
_find_module(mod_name)
is_dynamic = False
except ImportError:
is_dynamic = True
self.modules.add(obj)
if is_dynamic:
self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj)
else:
self.save_reduce(subimport, (obj.__name__,), obj=obj)
|
Save a module as an import
|
def idle_task(self):
'''called on idle'''
if self.module('console') is not None and not self.menu_added_console:
self.menu_added_console = True
self.module('console').add_menu(self.menu)
|
called on idle
|
def profile_function(self):
"""Calculates heatmap for function."""
with _CodeHeatmapCalculator() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs)
code_lines, start_line = inspect.getsourcelines(self._run_object)
source_lines = []
for line in code_lines:
source_lines.append(('line', start_line, line))
start_line += 1
filename = os.path.abspath(inspect.getsourcefile(self._run_object))
heatmap = prof.heatmap[filename]
run_time = sum(time for time in heatmap.values())
return {
'objectName': self._object_name,
'runTime': run_time,
'result': result,
'timestamp': int(time.time()),
'heatmaps': [{
'name': self._object_name,
'heatmap': heatmap,
'executionCount': prof.execution_count[filename],
'srcCode': source_lines,
'runTime': run_time
}]
}
|
Calculates heatmap for function.
|
def _connect(obj):
'''
Tries to get the _conn attribute from a model. Barring that, gets the
global default connection using other methods.
'''
from .columns import MODELS
if isinstance(obj, MODELS['Model']):
obj = obj.__class__
if hasattr(obj, '_conn'):
return obj._conn
if hasattr(obj, 'CONN'):
return obj.CONN
return get_connection()
|
Tries to get the _conn attribute from a model. Barring that, gets the
global default connection using other methods.
|
def _add_tabular_layer(self, tabular_layer, layer_name, save_style=False):
"""Add a tabular layer to the folder.
:param tabular_layer: The layer to add.
:type tabular_layer: QgsVectorLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0
"""
output = QFileInfo(
self.uri.filePath(layer_name + '.csv'))
QgsVectorFileWriter.writeAsVectorFormat(
tabular_layer,
output.absoluteFilePath(),
'utf-8',
QgsCoordinateTransform(),
'CSV')
if save_style:
style_path = QFileInfo(self.uri.filePath(layer_name + '.qml'))
tabular_layer.saveNamedStyle(style_path.absoluteFilePath())
assert output.exists()
return True, output.baseName()
|
Add a tabular layer to the folder.
:param tabular_layer: The layer to add.
:type tabular_layer: QgsVectorLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0
|
def set_composition(self, composition_id=None):
"""Sets the composition.
:param composition_id: a composition
:type composition_id: ``osid.id.Id``
:raise: ``InvalidArgument`` -- ``composition_id`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``composition_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if composition_id is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['composition_id'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(composition_id, metadata, array=False):
self._my_map['compositionId'] = str(composition_id)
else:
raise InvalidArgument()
|
Sets the composition.
:param composition_id: a composition
:type composition_id: ``osid.id.Id``
:raise: ``InvalidArgument`` -- ``composition_id`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``composition_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if mels.ndim:
# If we have vector data, vectorize
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs
|
Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
|
def set_one_time_boot(self, device):
"""Configures a single boot from a specific device.
:param device: Device to be set as a one time boot device
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
# Check if the input is valid
if device.upper() not in DEVICE_COMMON_TO_REDFISH:
msg = (self._('Invalid input "%(device)s". Valid devices: '
'NETWORK, HDD, ISCSI or CDROM.') %
{'device': device})
raise exception.IloInvalidInputError(msg)
try:
sushy_system.update_persistent_boot(
[device], persistent=False)
except sushy.exceptions.SushyError as e:
msg = (self._('The Redfish controller failed to set '
'one time boot device %(device)s. '
'Error: %(error)s') %
{'device': device, 'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
|
Configures a single boot from a specific device.
:param device: Device to be set as a one time boot device
:raises: IloError, on an error from iLO.
:raises: IloInvalidInputError, if the given input is not valid.
|
def get_all(self, security):
"""
Get all available quote data for the given ticker security.
Returns a dictionary.
"""
url = 'http://www.google.com/finance?q=%s' % security
page = self._request(url)
soup = BeautifulSoup(page)
snapData = soup.find("table", {"class": "snap-data"})
if snapData is None:
raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security)
data = {}
for row in snapData.findAll('tr'):
keyTd, valTd = row.findAll('td')
data[keyTd.getText()] = valTd.getText()
return data
|
Get all available quote data for the given ticker security.
Returns a dictionary.
|
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
ns_hdrs = headers.get_all("Set-Cookie", [])
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
|
Return sequence of Cookie objects extracted from response object.
|
def replace(self, str1, str2):
""" Set verbatim code replacement
It is strongly recommended to use function['$foo'] = 'bar' where
possible because template variables are less likely to changed
than the code itself in future versions of vispy.
Parameters
----------
str1 : str
String to replace
str2 : str
String to replace str1 with
"""
if str2 != self._replacements.get(str1, None):
self._replacements[str1] = str2
self.changed(code_changed=True)
|
Set verbatim code replacement
It is strongly recommended to use function['$foo'] = 'bar' where
possible because template variables are less likely to changed
than the code itself in future versions of vispy.
Parameters
----------
str1 : str
String to replace
str2 : str
String to replace str1 with
|
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
url,
with_grains=False):
'''
Read pillar data from HTTP response.
:param str url: Url to request.
:param bool with_grains: Whether to substitute strings in the url with their grain values.
:return: A dictionary of the pillar data to add.
:rtype: dict
'''
url = url.replace('%s', _quote(minion_id))
grain_pattern = r'<(?P<grain_name>.*?)>'
if with_grains:
# Get the value of the grain and substitute each grain
# name for the url-encoded version of its grain value.
for match in re.finditer(grain_pattern, url):
grain_name = match.group('grain_name')
grain_value = __salt__['grains.get'](grain_name, None)
if not grain_value:
log.error("Unable to get minion '%s' grain: %s", minion_id, grain_name)
return {}
grain_value = _quote(six.text_type(grain_value))
url = re.sub('<{0}>'.format(grain_name), grain_value, url)
log.debug('Getting url: %s', url)
data = __salt__['http.query'](url=url, decode=True, decode_type='json')
if 'dict' in data:
return data['dict']
log.error("Error on minion '%s' http query: %s\nMore Info:\n", minion_id, url)
for key in data:
log.error('%s: %s', key, data[key])
return {}
|
Read pillar data from HTTP response.
:param str url: Url to request.
:param bool with_grains: Whether to substitute strings in the url with their grain values.
:return: A dictionary of the pillar data to add.
:rtype: dict
|
def get_comic_format(filename):
"""Return the comic format if it is a comic archive."""
image_format = None
filename_ext = os.path.splitext(filename)[-1].lower()
if filename_ext in _COMIC_EXTS:
if zipfile.is_zipfile(filename):
image_format = _CBZ_FORMAT
elif rarfile.is_rarfile(filename):
image_format = _CBR_FORMAT
return image_format
|
Return the comic format if it is a comic archive.
|
def pause(self):
"""Pauses playback"""
if self.isPlaying is True:
self._execute("pause")
self._changePlayingState(False)
|
Pauses playback
|
def _apply_rules_no_recurse(expr, rules):
"""Non-recursively match expr again all rules"""
try:
# `rules` is an OrderedDict key => (pattern, replacement)
items = rules.items()
except AttributeError:
# `rules` is a list of (pattern, replacement) tuples
items = enumerate(rules)
for key, (pat, replacement) in items:
matched = pat.match(expr)
if matched:
try:
return replacement(**matched)
except CannotSimplify:
pass
return expr
|
Non-recursively match expr again all rules
|
def get(cls, id_):
"""Return a workflow object from id."""
with db.session.no_autoflush:
query = cls.dbmodel.query.filter_by(id=id_)
try:
model = query.one()
except NoResultFound:
raise WorkflowsMissingObject("No object for for id {0}".format(
id_
))
return cls(model)
|
Return a workflow object from id.
|
def get_numwords():
"""Convert number words to integers in a given text."""
numwords = {'and': (1, 0), 'a': (1, 1), 'an': (1, 1)}
for idx, word in enumerate(UNITS):
numwords[word] = (1, idx)
for idx, word in enumerate(TENS):
numwords[word] = (1, idx * 10)
for idx, word in enumerate(SCALES):
numwords[word] = (10 ** (idx * 3 or 2), 0)
all_numbers = ur'|'.join(ur'\b%s\b' % i for i in numwords.keys() if i)
return all_numbers, numwords
|
Convert number words to integers in a given text.
|
def remove_child_vault(self, vault_id, child_id):
"""Removes a child from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``vault_id`` not parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=vault_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=vault_id, child_id=child_id)
|
Removes a child from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``vault_id`` not parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
async def pixy_set_servos(self, s0, s1):
"""
Sends the setServos Pixy command.
This method sets the pan/tilt servos that are plugged into Pixy's two servo ports.
:param s0: value 0 to 1000
:param s1: value 0 to 1000
:returns: No return value.
"""
data = [PrivateConstants.PIXY_SET_SERVOS, s0 & 0x7f, (s0 >> 7) & 0x7f,
s1 & 0x7f, (s1 >> 7) & 0x7f]
await self._send_sysex(PrivateConstants.PIXY_CONFIG, data)
|
Sends the setServos Pixy command.
This method sets the pan/tilt servos that are plugged into Pixy's two servo ports.
:param s0: value 0 to 1000
:param s1: value 0 to 1000
:returns: No return value.
|
def create_observations(params: Dict[str, Dict[str, Any]], access_token: str) -> List[Dict[str, Any]]:
"""Create a single or several (if passed an array) observations).
:param params:
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity)
if it rejects the observation data (for example an observation date in the future or a latitude > 90. In
that case the exception's `response` attribute give details about the errors.
allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations
Example:
params = {'observation':
{'species_guess': 'Pieris rapae'},
}
TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in
renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
"""
response = requests.post(url="{base_url}/observations.json".format(base_url=INAT_BASE_URL),
json=params,
headers=_build_auth_header(access_token))
response.raise_for_status()
return response.json()
|
Create a single or several (if passed an array) observations).
:param params:
:param access_token: the access token, as returned by :func:`get_access_token()`
:return: iNaturalist's JSON response, as a Python object
:raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity)
if it rejects the observation data (for example an observation date in the future or a latitude > 90. In
that case the exception's `response` attribute give details about the errors.
allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations
Example:
params = {'observation':
{'species_guess': 'Pieris rapae'},
}
TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in
renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
|
def get_stripe_dashboard_url(self):
"""Get the stripe dashboard url for this object."""
if not self.stripe_dashboard_item_name or not self.id:
return ""
else:
return "{base_url}{item}/{id}".format(
base_url=self._get_base_stripe_dashboard_url(),
item=self.stripe_dashboard_item_name,
id=self.id,
)
|
Get the stripe dashboard url for this object.
|
def simple_logging_config(func):
"""Decorator to allow a simple logging configuration.
This encompasses giving a `log_folder`, `logger_names` as well as `log_levels`.
"""
@functools.wraps(func)
def new_func(self, *args, **kwargs):
if use_simple_logging(kwargs):
if 'log_config' in kwargs:
raise ValueError('Please do not specify `log_config` '
'if you want to use the simple '
'way of providing logging configuration '
'(i.e using `log_folder`, `logger_names` and/or `log_levels`).')
_change_logging_kwargs(kwargs)
return func(self, *args, **kwargs)
return new_func
|
Decorator to allow a simple logging configuration.
This encompasses giving a `log_folder`, `logger_names` as well as `log_levels`.
|
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If juju is sufficiently new and leadership election is supported,
the is_leader command will be used.
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
"""
try:
return juju_is_leader()
except NotImplementedError:
log('Juju leadership election feature not enabled'
', using fallback support',
level=WARNING)
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
|
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If juju is sufficiently new and leadership election is supported,
the is_leader command will be used.
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
|
def sibling(self, name: InstanceName) -> "ObjectMember":
"""Return an instance node corresponding to a sibling member.
Args:
name: Instance name of the sibling member.
Raises:
NonexistentSchemaNode: If member `name` is not permitted by the
schema.
NonexistentInstance: If sibling member `name` doesn't exist.
"""
ssn = self.parinst._member_schema_node(name)
try:
sibs = self.siblings.copy()
newval = sibs.pop(name)
sibs[self.name] = self.value
return ObjectMember(name, sibs, newval, self.parinst,
ssn, self.timestamp)
except KeyError:
raise NonexistentInstance(self.json_pointer(),
f"member '{name}'") from None
|
Return an instance node corresponding to a sibling member.
Args:
name: Instance name of the sibling member.
Raises:
NonexistentSchemaNode: If member `name` is not permitted by the
schema.
NonexistentInstance: If sibling member `name` doesn't exist.
|
def data_from_archive(self):
"""Appends setup.py specific metadata to archive_data."""
archive_data = super(SetupPyMetadataExtractor, self).data_from_archive
archive_data['has_packages'] = self.has_packages
archive_data['packages'] = self.packages
archive_data['has_bundled_egg_info'] = self.has_bundled_egg_info
sphinx_dir = self.sphinx_dir
if sphinx_dir:
archive_data['sphinx_dir'] = "/".join(sphinx_dir.split("/")[1:])
archive_data['build_deps'].append(
['BuildRequires', self.name_convertor.rpm_name(
"sphinx", self.base_python_version)])
return archive_data
|
Appends setup.py specific metadata to archive_data.
|
def count_frames(frame, count_start=0):
"Return a count of the number of frames"
count = -count_start
while frame:
count += 1
frame = frame.f_back
return count
|
Return a count of the number of frames
|
def search(self, filterstr, attrlist):
"""Query the configured LDAP server."""
return self._paged_search_ext_s(self.settings.BASE, ldap.SCOPE_SUBTREE, filterstr=filterstr,
attrlist=attrlist, page_size=self.settings.PAGE_SIZE)
|
Query the configured LDAP server.
|
def _handle_reference_cable(self, reference, handler, reifier):
"""\
"""
cable_ref = psis.cable_psi(reference.value)
self._assoc(psis.REFERENCES_TYPE,
psis.SOURCE_TYPE, self._cable_psi,
psis.TARGET_TYPE, cable_ref,
reifier)
handler.startTopic(cable_ref)
handler.isa(psis.CABLE_TYPE)
self._name(reference.value)
handler.endTopic()
self._sent_by(psis.origin_psi_by_cable_id(reference.value), cable_ref)
|
\
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.