code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def cancel_completion(self):
"""Cancel the completion
should be called when the completer have to be dismissed
This reset internal variable, clearing the temporary buffer
of the console where the completion are shown.
"""
self._consecutive_tab = 0
self._slice_start = 0
self._console_widget._clear_temporary_buffer()
self._index = (0, 0)
if(self._sliding_interval):
self._sliding_interval = None
|
Cancel the completion
should be called when the completer have to be dismissed
This reset internal variable, clearing the temporary buffer
of the console where the completion are shown.
|
def commitreturn(self, cursor, qstring, vals=()):
"careful: don't pass cursor (it's from decorator)"
cursor.execute(qstring, vals)
return cursor.fetchall()[0]
|
careful: don't pass cursor (it's from decorator)
|
def _string_to_sign(item, table_name, attribute_actions):
# type: (dynamodb_types.ITEM, Text, AttributeActions) -> bytes
"""Generate the string to sign from an encrypted item and configuration.
:param dict item: Encrypted DynamoDB item
:param str table_name: Table name to use when generating the string to sign
:param AttributeActions attribute_actions: Actions to take for item
"""
hasher = hashes.Hash(hashes.SHA256(), backend=default_backend())
data_to_sign = bytearray()
data_to_sign.extend(_hash_data(hasher=hasher, data="TABLE>{}<TABLE".format(table_name).encode(TEXT_ENCODING)))
for key in sorted(item.keys()):
action = attribute_actions.action(key)
if action is CryptoAction.DO_NOTHING:
continue
data_to_sign.extend(_hash_data(hasher=hasher, data=key.encode(TEXT_ENCODING)))
if action is CryptoAction.SIGN_ONLY:
data_to_sign.extend(SignatureValues.PLAINTEXT.sha256)
else:
data_to_sign.extend(SignatureValues.ENCRYPTED.sha256)
data_to_sign.extend(_hash_data(hasher=hasher, data=serialize_attribute(item[key])))
return bytes(data_to_sign)
|
Generate the string to sign from an encrypted item and configuration.
:param dict item: Encrypted DynamoDB item
:param str table_name: Table name to use when generating the string to sign
:param AttributeActions attribute_actions: Actions to take for item
|
def plot_correlation(self, freq=None, title=None,
figsize=(12, 6), **kwargs):
"""
Utility function to plot correlations.
Args:
* freq (str): Pandas data frequency alias string
* title (str): Plot title
* figsize (tuple (x,y)): figure size
* kwargs: passed to Pandas' plot_corr_heatmap function
"""
if title is None:
title = self._get_default_plot_title(
freq, 'Return Correlation Matrix')
rets = self._get_series(freq).to_returns().dropna()
return rets.plot_corr_heatmap(title=title, figsize=figsize, **kwargs)
|
Utility function to plot correlations.
Args:
* freq (str): Pandas data frequency alias string
* title (str): Plot title
* figsize (tuple (x,y)): figure size
* kwargs: passed to Pandas' plot_corr_heatmap function
|
def verify(self, key):
"""
Verifies a signature on a certificate request.
:param PKey key: The public key that signature is supposedly from.
:return: ``True`` if the signature is correct.
:rtype: bool
:raises OpenSSL.crypto.Error: If the signature is invalid, or there was
a problem verifying the signature.
"""
answer = _lib.NETSCAPE_SPKI_verify(self._spki, key._pkey)
if answer <= 0:
_raise_current_error()
return True
|
Verifies a signature on a certificate request.
:param PKey key: The public key that signature is supposedly from.
:return: ``True`` if the signature is correct.
:rtype: bool
:raises OpenSSL.crypto.Error: If the signature is invalid, or there was
a problem verifying the signature.
|
def JUMPI(self, dest, cond):
"""Conditionally alter the program counter"""
self.pc = Operators.ITEBV(256, cond != 0, dest, self.pc + self.instruction.size)
#This set ups a check for JMPDEST in the next instruction if cond != 0
self._set_check_jmpdest(cond != 0)
|
Conditionally alter the program counter
|
def deobfuscate(cls, data):
"""
Reverses the obfuscation done by the :meth:`obfuscate` method.
If an identifier arrives without correct base64 padding this
function will append it to the end.
"""
# the str() call is necessary to convert the unicode string
# to an ascii string since the urlsafe_b64decode method
# sometimes chokes on unicode strings
return base64.urlsafe_b64decode(str((
data + b'A=='[(len(data) - 1) % 4:])))
|
Reverses the obfuscation done by the :meth:`obfuscate` method.
If an identifier arrives without correct base64 padding this
function will append it to the end.
|
def write_pdf(pdf_obj, destination):
"""
Write PDF object to file
:param pdf_obj: PDF object to be written to file
:param destination: Desintation path
"""
reader = PdfFileReader(pdf_obj) # Create new PDF object
writer = PdfFileWriter()
page_count = reader.getNumPages()
# add the "watermark" (which is the new pdf) on the existing page
for page_number in range(page_count):
page = reader.getPage(page_number)
writer.addPage(page)
# finally, write "output" to a real file
with open(destination, "wb") as outputStream:
writer.write(outputStream)
|
Write PDF object to file
:param pdf_obj: PDF object to be written to file
:param destination: Desintation path
|
def alpha_(self,x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users."""
def alpha(xmin,x=x):
"""
given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
"""
x = [i for i in x if i>=xmin]
n = sum(x)
divsum = sum([math.log(i/xmin) for i in x])
if divsum == 0:
return float('inf')
# the "1+" here is unimportant because alpha_ is only used for minimization
a = 1 + float(n) / divsum
return a
return alpha
|
Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
|
def resize(self, new_data_size):
"""Resize the file and update the chunk sizes"""
resize_bytes(
self.__fileobj, self.data_size, new_data_size, self.data_offset)
self._update_size(new_data_size)
|
Resize the file and update the chunk sizes
|
def SdkSetup(self):
"""
Microsoft Windows SDK Setup
"""
if self.vc_ver > 9.0:
return []
return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
|
Microsoft Windows SDK Setup
|
def post_mortem(tb=None, host='', port=5555, patch_stdstreams=False):
"""
Start post-mortem debugging for the provided traceback object
If no traceback is provided the debugger tries to obtain a traceback
for the last unhandled exception.
Example::
try:
# Some error-prone code
assert ham == spam
except:
web_pdb.post_mortem()
:param tb: traceback for post-mortem debugging
:type tb: types.TracebackType
:param host: web-UI hostname or IP-address
:type host: str
:param port: web-UI port. If ``port=-1``, choose a random port value
between 32768 and 65536.
:type port: int
:param patch_stdstreams: redirect all standard input and output
streams to the web-UI.
:type patch_stdstreams: bool
:raises ValueError: if no valid traceback is provided and the Python
interpreter is not handling any exception
"""
# handling the default
if tb is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns (None, None, None)
t, v, tb = sys.exc_info()
exc_data = traceback.format_exception(t, v, tb)
else:
exc_data = traceback.format_tb(tb)
if tb is None:
raise ValueError('A valid traceback must be passed if no '
'exception is being handled')
pdb = WebPdb.active_instance
if pdb is None:
pdb = WebPdb(host, port, patch_stdstreams)
else:
pdb.remove_trace()
pdb.console.writeline('*** Web-PDB post-mortem ***\n')
pdb.console.writeline(''.join(exc_data))
pdb.reset()
pdb.interaction(None, tb)
|
Start post-mortem debugging for the provided traceback object
If no traceback is provided the debugger tries to obtain a traceback
for the last unhandled exception.
Example::
try:
# Some error-prone code
assert ham == spam
except:
web_pdb.post_mortem()
:param tb: traceback for post-mortem debugging
:type tb: types.TracebackType
:param host: web-UI hostname or IP-address
:type host: str
:param port: web-UI port. If ``port=-1``, choose a random port value
between 32768 and 65536.
:type port: int
:param patch_stdstreams: redirect all standard input and output
streams to the web-UI.
:type patch_stdstreams: bool
:raises ValueError: if no valid traceback is provided and the Python
interpreter is not handling any exception
|
def create(text,score,prompt_string, dump_data=False):
"""
Creates a machine learning model from input text, associated scores, a prompt, and a path to the model
TODO: Remove model path argument, it is needed for now to support legacy code
text - A list of strings containing the text of the essays
score - a list of integers containing score values
prompt_string - the common prompt for the set of essays
"""
if dump_data:
dump_input_data(text, score)
algorithm = select_algorithm(score)
#Initialize a results dictionary to return
results = {'errors': [],'success' : False, 'cv_kappa' : 0, 'cv_mean_absolute_error': 0,
'feature_ext' : "", 'classifier' : "", 'algorithm' : algorithm,
'score' : score, 'text' : text, 'prompt' : prompt_string}
if len(text)!=len(score):
msg = "Target and text lists must be same length."
results['errors'].append(msg)
log.exception(msg)
return results
try:
#Create an essay set object that encapsulates all the essays and alternate representations (tokens, etc)
e_set = model_creator.create_essay_set(text, score, prompt_string)
except:
msg = "essay set creation failed."
results['errors'].append(msg)
log.exception(msg)
try:
#Gets features from the essay set and computes error
feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model(e_set, algorithm = algorithm)
results['cv_kappa']=cv_error_results['kappa']
results['cv_mean_absolute_error']=cv_error_results['mae']
results['feature_ext']=feature_ext
results['classifier']=classifier
results['algorithm'] = algorithm
results['success']=True
except:
msg = "feature extraction and model creation failed."
results['errors'].append(msg)
log.exception(msg)
return results
|
Creates a machine learning model from input text, associated scores, a prompt, and a path to the model
TODO: Remove model path argument, it is needed for now to support legacy code
text - A list of strings containing the text of the essays
score - a list of integers containing score values
prompt_string - the common prompt for the set of essays
|
def _expand_json(self, j):
"""Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added
"""
decompressed_json = copy.copy(j)
decompressed_json.pop('blob', None) # Remove BLOB portion of JSON
# Decode and decompress JSON
compressed_data = base64.b64decode(j['blob'])
original_json = zlib.decompress(compressed_data).decode('utf-8')
decompressed_json['users'] = json.loads(original_json) # Insert users
return decompressed_json
|
Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added
|
def resize(self, new_size):
"""
Resizes this disk. The Linode Instance this disk belongs to must have
sufficient space available to accommodate the new size, and must be
offline.
**NOTE** If resizing a disk down, the filesystem on the disk must still
fit on the new disk size. You may need to resize the filesystem on the
disk first before performing this action.
:param new_size: The intended new size of the disk, in MB
:type new_size: int
:returns: True if the resize was initiated successfully.
:rtype: bool
"""
self._client.post('{}/resize'.format(Disk.api_endpoint), model=self, data={"size": new_size})
return True
|
Resizes this disk. The Linode Instance this disk belongs to must have
sufficient space available to accommodate the new size, and must be
offline.
**NOTE** If resizing a disk down, the filesystem on the disk must still
fit on the new disk size. You may need to resize the filesystem on the
disk first before performing this action.
:param new_size: The intended new size of the disk, in MB
:type new_size: int
:returns: True if the resize was initiated successfully.
:rtype: bool
|
def cli(ctx, config, quiet):
"""AWS ECS Docker Deployment Tool"""
ctx.obj = {}
ctx.obj['config'] = load_config(config.read()) # yaml.load(config.read())
ctx.obj['quiet'] = quiet
log(ctx, ' * ' + rnd_scotty_quote() + ' * ')
|
AWS ECS Docker Deployment Tool
|
def populate(self, blueprint, documents):
"""Populate the database with documents"""
# Finish the documents
documents = self.finish(blueprint, documents)
# Convert the documents to frame instances
frames = []
for document in documents:
# Separate out any meta fields
meta_document = {}
for field_name in blueprint._meta_fields:
meta_document[field_name] = document[field_name]
document.pop(field_name)
# Initialize the frame
frame = blueprint.get_frame_cls()(document)
# Apply any meta fields
for key, value in meta_document.items():
setattr(frame, key, value)
frames.append(frame)
# Insert the documents
blueprint.on_fake(frames)
frames = blueprint.get_frame_cls().insert_many(frames)
blueprint.on_faked(frames)
return frames
|
Populate the database with documents
|
def update(self):
"""Called before the listing renders
"""
super(AnalysisRequestsView, self).update()
self.workflow = api.get_tool("portal_workflow")
self.member = self.mtool.getAuthenticatedMember()
self.roles = self.member.getRoles()
setup = api.get_bika_setup()
# remove `to_be_sampled` filter
if not setup.getSamplingWorkflowEnabled():
self.review_states = filter(
lambda x: x.get("id") != "to_be_sampled", self.review_states)
# remove `scheduled_sampling` filter
if not setup.getScheduleSamplingEnabled():
self.review_states = filter(
lambda x: x.get("id") != "scheduled_sampling",
self.review_states)
# remove `to_be_preserved` filter
if not setup.getSamplePreservationEnabled():
self.review_states = filter(
lambda x: x.get("id") != "to_be_preserved", self.review_states)
# remove `rejected` filter
if not setup.getRejectionReasons():
self.review_states = filter(
lambda x: x.get("id") != "rejected", self.review_states)
self.hideclientlink = "RegulatoryInspector" in self.roles \
and "Manager" not in self.roles \
and "LabManager" not in self.roles \
and "LabClerk" not in self.roles
if self.context.portal_type == "AnalysisRequestsFolder" and \
(self.mtool.checkPermission(AddAnalysisRequest, self.context)):
self.context_actions[_("Add")] = \
{"url": "ar_add?ar_count=1",
'permission': 'Add portal content',
"icon": "++resource++bika.lims.images/add.png"}
self.editresults = -1
self.clients = {}
# self.user_is_preserver = "Preserver" in self.roles
# Printing workflow enabled?
# If not, remove the Column
self.printwfenabled = \
self.context.bika_setup.getPrintingWorkflowEnabled()
printed_colname = "Printed"
if not self.printwfenabled and printed_colname in self.columns:
# Remove "Printed" columns
del self.columns[printed_colname]
tmprvs = []
for rs in self.review_states:
tmprs = rs
tmprs["columns"] = [c for c in rs.get("columns", []) if
c != printed_colname]
tmprvs.append(tmprs)
self.review_states = tmprvs
elif self.printwfenabled:
# Print button to choose multiple ARs and print them.
review_states = []
for review_state in self.review_states:
review_state.get("custom_transitions", []).extend(
[{"id": "print_sample",
"title": _("Print"),
"url": "workflow_action?action=print_sample"}, ])
review_states.append(review_state)
self.review_states = review_states
# Only "senaite.core: ManageAnalysisRequests" may see the copy to new button.
# elsewhere it is hacked in where required.
if self.copy_to_new_allowed:
review_states = []
for review_state in self.review_states:
review_state.get("custom_transitions", []).extend(
[{"id": "copy_to_new",
"title": _("Copy to new"),
"url": "workflow_action?action=copy_to_new"}, ])
review_states.append(review_state)
self.review_states = review_states
|
Called before the listing renders
|
def _create_entry(self, name, values, fbterm=False):
''' Render first values as string and place as first code,
save, and return attr.
'''
if fbterm:
attr = _PaletteEntryFBTerm(self, name.upper(), ';'.join(values))
else:
attr = _PaletteEntry(self, name.upper(), ';'.join(values))
setattr(self, name, attr) # now cached
return attr
|
Render first values as string and place as first code,
save, and return attr.
|
def welcome_if_new(self, node):
"""
Given a new node, send it all the keys/values it should be storing,
then add it to the routing table.
@param node: A new node that just joined (or that we just found out
about).
Process:
For each key in storage, get k closest nodes. If newnode is closer
than the furtherst in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
"""
if not self.router.is_new_node(node):
return
log.info("never seen %s before, adding to router", node)
for key, value in self.storage:
keynode = Node(digest(key))
neighbors = self.router.find_neighbors(keynode)
if neighbors:
last = neighbors[-1].distance_to(keynode)
new_node_close = node.distance_to(keynode) < last
first = neighbors[0].distance_to(keynode)
this_closest = self.source_node.distance_to(keynode) < first
if not neighbors or (new_node_close and this_closest):
asyncio.ensure_future(self.call_store(node, key, value))
self.router.add_contact(node)
|
Given a new node, send it all the keys/values it should be storing,
then add it to the routing table.
@param node: A new node that just joined (or that we just found out
about).
Process:
For each key in storage, get k closest nodes. If newnode is closer
than the furtherst in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
|
def run(self):
r"""
Overrides the default run() method.
Performs the complete analysis on the model specified during initialisation.
:return: an ODE problem which can be further used in inference and simulation.
:rtype: :class:`~means.core.problems.ODEProblem`
"""
max_order = self.__max_order
stoichiometry_matrix = self.model.stoichiometry_matrix
propensities = self.model.propensities
species = self.model.species
# compute n_counter and k_counter; the "n" and "k" vectors in equations, respectively.
n_counter, k_counter = generate_n_and_k_counters(max_order, species)
# dmu_over_dt has row per species and one col per element of n_counter (eq. 6)
dmu_over_dt = generate_dmu_over_dt(species, propensities, n_counter, stoichiometry_matrix)
# Calculate expressions to use in central moments equations (eq. 9)
central_moments_exprs = eq_central_moments(n_counter, k_counter, dmu_over_dt, species, propensities, stoichiometry_matrix, max_order)
# Expresses central moments in terms of raw moments (and central moments) (eq. 8)
central_from_raw_exprs = raw_to_central(n_counter, species, k_counter)
# Substitute raw moment, in central_moments, with expressions depending only on central moments
central_moments_exprs = self._substitute_raw_with_central(central_moments_exprs, central_from_raw_exprs, n_counter, k_counter)
# Get final right hand side expressions for each moment in a vector
mfk = self._generate_mass_fluctuation_kinetics(central_moments_exprs, dmu_over_dt, n_counter)
# Applies moment expansion closure, that is replaces last order central moments by parametric expressions
mfk = self.closure.close(mfk, central_from_raw_exprs, n_counter, k_counter)
# These are the left hand sign symbols referring to the mfk
prob_lhs = self._generate_problem_left_hand_side(n_counter, k_counter)
# Finally, we build the problem
out_problem = ODEProblem("MEA", prob_lhs, mfk, sp.Matrix(self.model.parameters))
return out_problem
|
r"""
Overrides the default run() method.
Performs the complete analysis on the model specified during initialisation.
:return: an ODE problem which can be further used in inference and simulation.
:rtype: :class:`~means.core.problems.ODEProblem`
|
def geo_area(arg, use_spheroid=None):
"""
Compute area of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid: default None
Returns
-------
area : double scalar
"""
op = ops.GeoArea(arg, use_spheroid)
return op.to_expr()
|
Compute area of a geo spatial data
Parameters
----------
arg : geometry or geography
use_spheroid: default None
Returns
-------
area : double scalar
|
def random_string_array(max_len=1, min_len=1,
elem_max_len=1, elem_min_len=1,
strings=string.ascii_letters, **kwargs):
"""
:param max_len: max value of len(array)
:param min_len: min value of len(array)
:param elem_max_len: max value of len(array[index])
:param elem_min_len: min value of len(array[index])
:param strings: allowed string characters in each element of array,
or predefined list of strings, or function pointer
:param **kwargs: keyworded arguments for strings if it's a function pointer
:return: SeedStringArray
"""
string_array = list()
for _ in range(random.randint(min_len, max_len)):
string_array.append(Randomize.random_string(max_len=elem_max_len, min_len=elem_min_len,
chars=strings, **kwargs).value)
return SeedStringArray(string_array)
|
:param max_len: max value of len(array)
:param min_len: min value of len(array)
:param elem_max_len: max value of len(array[index])
:param elem_min_len: min value of len(array[index])
:param strings: allowed string characters in each element of array,
or predefined list of strings, or function pointer
:param **kwargs: keyworded arguments for strings if it's a function pointer
:return: SeedStringArray
|
def ParseContainersTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
"""Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheContainersEventData()
event_data.container_identifier = record_values.get('ContainerId', None)
event_data.directory = record_values.get('Directory', None)
event_data.name = record_values.get('Name', None)
event_data.set_identifier = record_values.get('SetId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get('LastAccessTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
container_identifier = record_values.get('ContainerId', None)
container_name = record_values.get('Name', None)
if not container_identifier or not container_name:
continue
table_name = 'Container_{0:d}'.format(container_identifier)
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
'Missing table: {0:s}'.format(table_name))
continue
self._ParseContainerTable(parser_mediator, esedb_table, container_name)
|
Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
|
def clear(self):
"""Clear current state."""
# Adapted from http://stackoverflow.com/a/13103617/1198772
for i in reversed(list(range(self.extra_keywords_layout.count()))):
self.extra_keywords_layout.itemAt(i).widget().setParent(None)
self.widgets_dict = OrderedDict()
|
Clear current state.
|
def delete_key(self, key_to_delete):
"""Deletes the specified key
:param key_to_delete:
:return:
"""
log = logging.getLogger(self.cls_logger + '.delete_key')
log.info('Attempting to delete key: {k}'.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, trace = sys.exc_info()
log.error('ClientError: Unable to delete key: {k}\n{e}'.format(k=key_to_delete, e=str(ex)))
return False
else:
log.info('Successfully deleted key: {k}'.format(k=key_to_delete))
return True
|
Deletes the specified key
:param key_to_delete:
:return:
|
def query_boost_version(boost_root):
'''
Read in the Boost version from a given boost_root.
'''
boost_version = None
if os.path.exists(os.path.join(boost_root,'Jamroot')):
with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f:
for line in f.readlines():
parts = line.split()
if len(parts) >= 5 and parts[1] == 'BOOST_VERSION':
boost_version = parts[3]
break
if not boost_version:
boost_version = 'default'
return boost_version
|
Read in the Boost version from a given boost_root.
|
def merge_conf(to_hash, other_hash, path=[]):
"merges other_hash into to_hash"
for key in other_hash:
if (key in to_hash and isinstance(to_hash[key], dict)
and isinstance(other_hash[key], dict)):
merge_conf(to_hash[key], other_hash[key], path + [str(key)])
else:
to_hash[key] = other_hash[key]
return to_hash
|
merges other_hash into to_hash
|
def split_python_text_into_lines(text):
"""
# TODO: make it so this function returns text so one statment is on one
# line that means no splitting up things like function definitions into
# multiple lines
"""
#import jedi
#script = jedi.Script(text, line=1, column=None, path='')
def parentesis_are_balanced(line):
"""
helper
References:
http://stackoverflow.com/questions/18007995/recursive-paren-balance
"""
def balanced(str_, i=0, cnt=0, left='(', right=')'):
if i == len(str_):
return cnt == 0
if cnt < 0:
return False
if str_[i] == left:
return balanced(str_, i + 1, cnt + 1)
elif str_[i] == right:
return balanced(str_, i + 1, cnt - 1)
return balanced(str_, i + 1, cnt)
return balanced(line)
lines = text.split('\n')
new_lines = []
current_line = ''
for line in lines:
current_line += line
if parentesis_are_balanced(current_line):
new_lines.append(current_line)
current_line = ''
return lines
|
# TODO: make it so this function returns text so one statment is on one
# line that means no splitting up things like function definitions into
# multiple lines
|
def load_dict(self, source, namespace='', make_namespaces=False):
''' Import values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}})
{'name.space.key': 'value'}
'''
stack = [(namespace, source)]
while stack:
prefix, source = stack.pop()
if not isinstance(source, dict):
raise TypeError('Source is not a dict (r)' % type(key))
for key, value in source.items():
if not isinstance(key, str):
raise TypeError('Key is not a string (%r)' % type(key))
full_key = prefix + '.' + key if prefix else key
if isinstance(value, dict):
stack.append((full_key, value))
if make_namespaces:
self[full_key] = self.Namespace(self, full_key)
else:
self[full_key] = value
return self
|
Import values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}})
{'name.space.key': 'value'}
|
def list_enrollment_claims(self, **kwargs):
"""List"""
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, EnrollmentClaim)
api = self._get_api(enrollment.PublicAPIApi)
return PaginatedResponse(
api.get_device_enrollments,
lwrap_type=EnrollmentClaim,
**kwargs
)
|
List
|
def to_dict(self, remove_nones=False):
"""Return a dict representation of the `DidlResource`.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representing the `DidlResource`
"""
content = {
'uri': self.uri,
'protocol_info': self.protocol_info,
'import_uri': self.import_uri,
'size': self.size,
'duration': self.duration,
'bitrate': self.bitrate,
'sample_frequency': self.sample_frequency,
'bits_per_sample': self.bits_per_sample,
'nr_audio_channels': self.nr_audio_channels,
'resolution': self.resolution,
'color_depth': self.color_depth,
'protection': self.protection,
}
if remove_nones:
# delete any elements that have a value of None to optimize size
# of the returned structure
nones = [k for k in content if content[k] is None]
for k in nones:
del content[k]
return content
|
Return a dict representation of the `DidlResource`.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representing the `DidlResource`
|
def interleave(*args):
'''Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
'''
result = []
for array in zip(*args):
result.append(tuple(flatten(array)))
return result
|
Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
|
def wait_port_open(server, port, timeout=None):
""" Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
import time
sleep_s = 0
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
logging.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
logging.info("connect %s %d", server, port)
s.connect((server, port))
except ConnectionError as err:
logging.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
except socket.gaierror as err:
logging.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise
else:
s.close()
logging.info("wait_port_open: port %s:%s is open", server, port)
return True
|
Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
|
def get_image(self, size=SIZE_EXTRA_LARGE):
"""
Returns the user's avatar
size can be one of:
SIZE_EXTRA_LARGE
SIZE_LARGE
SIZE_MEDIUM
SIZE_SMALL
"""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract_all(doc, "image")[size]
|
Returns the user's avatar
size can be one of:
SIZE_EXTRA_LARGE
SIZE_LARGE
SIZE_MEDIUM
SIZE_SMALL
|
def _handle_pagerange(pagerange):
"""
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
"""
try:
pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)")
start, end = re.findall(pr, pagerange)[0]
except IndexError:
start = end = 0
return unicode(start), unicode(end)
|
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
|
def add_port_profile(self, profile_name, vlan_id, device_id):
"""Adds a port profile and its vlan_id to the table."""
if not self.get_port_profile_for_vlan(vlan_id, device_id):
port_profile = ucsm_model.PortProfile(profile_id=profile_name,
vlan_id=vlan_id,
device_id=device_id,
created_on_ucs=False)
with self.session.begin(subtransactions=True):
self.session.add(port_profile)
return port_profile
|
Adds a port profile and its vlan_id to the table.
|
def ConsultarCertificacionUltNroOrden(self, pto_emision=1):
"Consulta el último No de orden registrado para CG"
ret = self.client.cgConsultarUltimoNroOrden(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
ptoEmision=pto_emision,
)
ret = ret['liqUltNroOrdenReturn']
self.__analizar_errores(ret)
self.NroOrden = ret['nroOrden']
return True
|
Consulta el último No de orden registrado para CG
|
def mbar_log_W_nk(u_kn, N_k, f_k):
"""Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
return logW
|
Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
logW_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized log weights.
Notes
-----
Equation (9) in JCP MBAR paper.
|
def _enum_member_error(err, eid, name, value, bitmask):
"""Format enum member error."""
exception, msg = ENUM_ERROR_MAP[err]
enum_name = idaapi.get_enum_name(eid)
return exception(('add_enum_member(enum="{}", member="{}", value={}, bitmask=0x{:08X}) '
'failed: {}').format(
enum_name,
name,
value,
bitmask,
msg
))
|
Format enum member error.
|
def set_next_week_day(val, week_day, iso=False):
"""
Set week day.
New date will be greater or equal than input date.
:param val: datetime or date
:type val: datetime.datetime | datetime.date
:param week_day: Week day to set
:type week_day: int
:param iso: week_day in ISO format, or not
:type iso: bool
:return: datetime.datetime | datetime.date
"""
return _set_week_day(val, week_day,
val.isoweekday() if iso else val.weekday(), sign=1)
|
Set week day.
New date will be greater or equal than input date.
:param val: datetime or date
:type val: datetime.datetime | datetime.date
:param week_day: Week day to set
:type week_day: int
:param iso: week_day in ISO format, or not
:type iso: bool
:return: datetime.datetime | datetime.date
|
def _register_bindings(self, data):
"""
connection_handler method which is called when we connect to pusher.
Responsible for binding callbacks to channels before we connect.
:return:
"""
self._register_diff_order_book_channels()
self._register_live_orders_channels()
self._register_live_trades_channels()
self._register_order_book_channels()
|
connection_handler method which is called when we connect to pusher.
Responsible for binding callbacks to channels before we connect.
:return:
|
def blend_html_colour_to_white(html_colour, alpha):
"""
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
"""
html_colour = html_colour.upper()
has_hash = False
if html_colour[0] == '#':
has_hash = True
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
r = int(alpha * r + (1 - alpha) * 255)
g = int(alpha * g + (1 - alpha) * 255)
b = int(alpha * b + (1 - alpha) * 255)
out = '{:02X}{:02X}{:02X}'.format(r, g, b)
if has_hash:
out = '#' + out
return out
|
:param html_colour: Colour string like FF552B or #334455
:param alpha: Alpha value
:return: Html colour alpha blended onto white
|
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang
|
Predicts the Language of a given text.
:param text: Unicode text to be classified.
|
def update_resources_from_resfile(self, srcpath, types=None, names=None,
languages=None):
"""
Update or add resources from dll/exe file srcpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
"""
UpdateResourcesFromResFile(self.filename, srcpath, types, names,
languages)
|
Update or add resources from dll/exe file srcpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
|
def mount_status_send(self, target_system, target_component, pointing_a, pointing_b, pointing_c, force_mavlink1=False):
'''
Message with some status from APM to GCS about camera or antenna mount
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
pointing_a : pitch(deg*100) (int32_t)
pointing_b : roll(deg*100) (int32_t)
pointing_c : yaw(deg*100) (int32_t)
'''
return self.send(self.mount_status_encode(target_system, target_component, pointing_a, pointing_b, pointing_c), force_mavlink1=force_mavlink1)
|
Message with some status from APM to GCS about camera or antenna mount
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
pointing_a : pitch(deg*100) (int32_t)
pointing_b : roll(deg*100) (int32_t)
pointing_c : yaw(deg*100) (int32_t)
|
def set(cls, obj, keys, value, fill_list_value=None):
"""
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
"""
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
# Validate this key works with a list.
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
# This is the last key, so set the value.
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
# done.
return
# More keys left, ensure we have a container for this key.
if type(key) == int:
try:
current[key]
except IndexError:
# Create a list for this key.
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
# Create a list for this key.
current[key] = container_for_key(keys_list[idx])
# Move on to the next key.
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key)
|
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
|
def first_spark_call():
"""
Return a CallSite representing the first Spark call in the current call stack.
"""
tb = traceback.extract_stack()
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return CallSite(function=fun, file=file, linenum=line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame - 1]
return CallSite(function=sfun, file=ufile, linenum=uline)
|
Return a CallSite representing the first Spark call in the current call stack.
|
def is_recording():
"""Get status on recording/not recording.
Returns
-------
Current state of recording.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsRecording(ctypes.byref(curr)))
return curr.value
|
Get status on recording/not recording.
Returns
-------
Current state of recording.
|
def uuids(self):
""" Extract uuid from each item of specified ``seq``.
"""
for f in self._seq:
if isinstance(f, File):
yield f.uuid
elif isinstance(f, six.string_types):
yield f
else:
raise ValueError(
'Invalid type for sequence item: {0}'.format(type(f)))
|
Extract uuid from each item of specified ``seq``.
|
def satosa_logging(logger, level, message, state, **kwargs):
"""
Adds a session ID to the message.
:type logger: logging
:type level: int
:type message: str
:type state: satosa.state.State
:param logger: Logger to use
:param level: Logger level (ex: logging.DEBUG/logging.WARN/...)
:param message: Message
:param state: The current state
:param kwargs: set exc_info=True to get an exception stack trace in the log
"""
if state is None:
session_id = "UNKNOWN"
else:
try:
session_id = state[LOGGER_STATE_KEY]
except KeyError:
session_id = uuid4().urn
state[LOGGER_STATE_KEY] = session_id
logger.log(level, "[{id}] {msg}".format(id=session_id, msg=message), **kwargs)
|
Adds a session ID to the message.
:type logger: logging
:type level: int
:type message: str
:type state: satosa.state.State
:param logger: Logger to use
:param level: Logger level (ex: logging.DEBUG/logging.WARN/...)
:param message: Message
:param state: The current state
:param kwargs: set exc_info=True to get an exception stack trace in the log
|
def loop(self, *tags):
""" Iterates over the tags in the entire Sentence,
For example, Sentence.loop(POS, LEMMA) yields tuples of the part-of-speech tags and lemmata.
Possible tags: WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag.
Any order or combination of tags can be supplied.
"""
for i in range(len(self.words)):
yield tuple([self.get(i, tag=tag) for tag in tags])
|
Iterates over the tags in the entire Sentence,
For example, Sentence.loop(POS, LEMMA) yields tuples of the part-of-speech tags and lemmata.
Possible tags: WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag.
Any order or combination of tags can be supplied.
|
def InitSiteCheck(self):
"""
make an interactive grid in which users can edit site names
as well as which location a site belongs to
"""
# propagate average lat/lon info from samples table if
# available in samples and missing in sites
self.contribution.propagate_average_up(cols=['lat', 'lon', 'height'],
target_df_name='sites',
source_df_name='samples')
# propagate lithology columns
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables['sites'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'sites', 'sites', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return
|
make an interactive grid in which users can edit site names
as well as which location a site belongs to
|
def failed_hosts(self) -> Dict[str, "MultiResult"]:
"""
Hosts that failed to complete the task
"""
return {k: v for k, v in self.result.items() if v.failed}
|
Hosts that failed to complete the task
|
def run(command, *args):
""" run command """
# show all clusters
if command == 'clusters':
return clusters.run(command, *args)
# show topologies
elif command == 'topologies':
return topologies.run(command, *args)
# physical plan
elif command == 'containers':
return physicalplan.run_containers(command, *args)
elif command == 'metrics':
return physicalplan.run_metrics(command, *args)
# logical plan
elif command == 'components':
return logicalplan.run_components(command, *args)
elif command == 'spouts':
return logicalplan.run_spouts(command, *args)
elif command == 'bolts':
return logicalplan.run_bolts(command, *args)
# help
elif command == 'help':
return help.run(command, *args)
# version
elif command == 'version':
return version.run(command, *args)
return 1
|
run command
|
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
|
Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
|
def update_ssl_termination(self, securePort=None, enabled=None,
secureTrafficOnly=None):
"""
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
"""
return self.manager.update_ssl_termination(self, securePort=securePort,
enabled=enabled, secureTrafficOnly=secureTrafficOnly)
|
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
|
def handle_left_double_click(self, info):
"""Whatever we want to do, when the VideoWidget has been double-clicked with the left button
"""
if (self.double_click_focus == False): # turn focus on
print(self.pre, "handle_left_double_click: focus on")
self.cb_focus()
else: # turn focus off
print(self.pre, "handle_left_double_click: focus off")
self.cb_unfocus()
self.double_click_focus = not(
self.double_click_focus)
|
Whatever we want to do, when the VideoWidget has been double-clicked with the left button
|
def _parse_guild_disband_info(self, info_container):
"""
Parses the guild's disband info, if available.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
"""
m = disband_regex.search(info_container.text)
if m:
self.disband_condition = m.group(2)
self.disband_date = parse_tibia_date(m.group(1).replace("\xa0", " "))
|
Parses the guild's disband info, if available.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
|
def register(self, resource_class, content_type, configuration=None):
"""
Registers a representer factory for the given combination of resource
class and content type.
:param configuration: representer configuration. A default instance
will be created if this is not given.
:type configuration:
:class:`everest.representers.config.RepresenterConfiguration`
"""
if not issubclass(resource_class, Resource):
raise ValueError('Representers can only be registered for '
'resource classes (got: %s).' % resource_class)
if not content_type in self.__rpr_classes:
raise ValueError('No representer class has been registered for '
'content type "%s".' % content_type)
# Register a factory resource -> representer for the given combination
# of resource class and content type.
rpr_cls = self.__rpr_classes[content_type]
self.__rpr_factories[(resource_class, content_type)] = \
rpr_cls.create_from_resource_class
if issubclass(rpr_cls, MappingResourceRepresenter):
# Create or update an attribute mapping.
mp_reg = self.__mp_regs[content_type]
mp = mp_reg.find_mapping(resource_class)
if mp is None:
# No mapping was registered yet for this resource class or any
# of its base classes; create a new one on the fly.
new_mp = mp_reg.create_mapping(resource_class, configuration)
elif not configuration is None:
if resource_class is mp.mapped_class:
# We have additional configuration for an existing mapping.
mp.configuration.update(configuration)
new_mp = mp
else:
# We have a derived class with additional configuration.
new_mp = mp_reg.create_mapping(
resource_class,
configuration=mp.configuration)
new_mp.configuration.update(configuration)
elif not resource_class is mp.mapped_class:
# We have a derived class without additional configuration.
new_mp = mp_reg.create_mapping(resource_class,
configuration=mp.configuration)
else:
# We found a dynamically created mapping for the right class
# without additional configuration; do not create a new one.
new_mp = None
if not new_mp is None:
# Store the new (or updated) mapping.
mp_reg.set_mapping(new_mp)
|
Registers a representer factory for the given combination of resource
class and content type.
:param configuration: representer configuration. A default instance
will be created if this is not given.
:type configuration:
:class:`everest.representers.config.RepresenterConfiguration`
|
def speaker_durations(utterances: List[Utterance]) -> List[Tuple[str, int]]:
""" Takes a list of utterances and itemizes them by speaker, returning a
list of tuples of the form (Speaker Name, duration).
"""
speaker_utters = make_speaker_utters(utterances)
speaker_duration_tuples = [] # type: List[Tuple[str, int]]
for speaker in speaker_utters:
speaker_duration_tuples.append((speaker, total_duration(speaker_utters[speaker])))
return speaker_duration_tuples
|
Takes a list of utterances and itemizes them by speaker, returning a
list of tuples of the form (Speaker Name, duration).
|
def sof(self):
"""
First start of frame (SOFn) marker in this sequence.
"""
for m in self._markers:
if m.marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES:
return m
raise KeyError('no start of frame (SOFn) marker in image')
|
First start of frame (SOFn) marker in this sequence.
|
def execute(self, env, args):
""" Creates a new task.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
"""
task_name = args.task_name
clone_task = args.clone_task
if not env.task.create(task_name, clone_task):
raise errors.FocusError(u'Could not create task "{0}"'
.format(task_name))
# open in task config in editor
if not args.skip_edit:
task_config = env.task.get_config_path(task_name)
if not _edit_task_config(env, task_config, confirm=True):
raise errors.FocusError(u'Could not open task config: {0}'
.format(task_config))
|
Creates a new task.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
|
def swap_twitter_subject(subject, body):
"""If subject starts from 'Tweet from...'
then we need to get first meaning line from the body."""
if subject.startswith('Tweet from'):
lines = body.split('\n')
for idx, line in enumerate(lines):
if re.match(r'.*, ?\d{2}:\d{2}]]', line) is not None:
try:
subject = lines[idx + 1]
except IndexError:
pass
break
return subject, body
|
If subject starts from 'Tweet from...'
then we need to get first meaning line from the body.
|
def get_profile_dir ():
"""Return path where all profiles of current user are stored."""
if os.name == 'nt':
basedir = unicode(os.environ["APPDATA"], nt_filename_encoding)
dirpath = os.path.join(basedir, u"Mozilla", u"Firefox", u"Profiles")
elif os.name == 'posix':
basedir = unicode(os.environ["HOME"])
dirpath = os.path.join(basedir, u".mozilla", u"firefox")
return dirpath
|
Return path where all profiles of current user are stored.
|
def setItemStyle(self, itemStyle):
"""
Sets the item style that will be used for this widget. If you are
trying to set a style on an item that has children, make sure to turn
off the useGroupStyleWithChildren option, or it will always display as
a group.
:param itemStyle | <XGanttWidgetItem.ItemStyle>
"""
self._itemStyle = itemStyle
# initialize the group icon for group style
if itemStyle == XGanttWidgetItem.ItemStyle.Group and \
self.icon(0).isNull():
ico = projexui.resources.find('img/folder_close.png')
expand_ico = projexui.resources.find('img/folder_open.png')
self.setIcon(0, QIcon(ico))
self.setExpandedIcon(0, QIcon(expand_ico))
|
Sets the item style that will be used for this widget. If you are
trying to set a style on an item that has children, make sure to turn
off the useGroupStyleWithChildren option, or it will always display as
a group.
:param itemStyle | <XGanttWidgetItem.ItemStyle>
|
def clamped(self, point_or_rect):
"""
Returns the point or rectangle clamped to this rectangle.
"""
if isinstance(point_or_rect, Rect):
return Rect(np.minimum(self.mins, point_or_rect.mins),
np.maximum(self.maxes, point_or_rect.maxes))
return np.clip(point_or_rect, self.mins, self.maxes)
|
Returns the point or rectangle clamped to this rectangle.
|
def _bracket_exact_exec(self, symbol):
"""Checks builtin, local and global executable collections for the
specified symbol and returns it as soon as it is found."""
if symbol in self.context.module.executables:
return self.context.module.executables[symbol]
if symbol in self.context.module.interfaces:
return self.context.module.interfaces[symbol]
if symbol in cache.builtin:
return cache.builtin[symbol]
#Loop through all the dependencies of the current module and see
#if one of them is the method we are looking for.
return self.context.module.get_dependency_element(symbol)
|
Checks builtin, local and global executable collections for the
specified symbol and returns it as soon as it is found.
|
def save_project(self, project):
""" Called when project is saved/updated. """
pid = project.pid
# project created
# project updated
if project.is_active:
# project is not deleted
logger.debug("project is active")
ds_project = self.get_project(pid)
if ds_project is None:
self._create_project(pid)
# update project meta information
name = self._truncate(project.name, 40)
self._set_project(pid, name, project.institute)
else:
# project is deleted
logger.debug("project is not active")
ds_project = self.get_project(pid)
if ds_project is not None:
self._delete_project(pid)
return
|
Called when project is saved/updated.
|
def vline_score(self, x, ymin, ymax):
"""Returns the number of unbroken paths of qubits
>>> [(x,y,1,k) for y in range(ymin,ymax+1)]
for :math:`k = 0,1,\cdots,L-1`. This is precomputed for speed.
"""
return self._vline_score[x, ymin, ymax]
|
Returns the number of unbroken paths of qubits
>>> [(x,y,1,k) for y in range(ymin,ymax+1)]
for :math:`k = 0,1,\cdots,L-1`. This is precomputed for speed.
|
def convert_to(obj, ac_ordered=False, ac_dict=None, **options):
"""
Convert a mapping objects to a dict or object of 'to_type' recursively.
Borrowed basic idea and implementation from bunch.unbunchify. (bunch is
distributed under MIT license same as this.)
:param obj: A mapping objects or other primitive object
:param ac_ordered: Use OrderedDict instead of dict to keep order of items
:param ac_dict: Callable to convert 'obj' to mapping object
:param options: Optional keyword arguments.
:return: A dict or OrderedDict or object of 'cls'
>>> OD = anyconfig.compat.OrderedDict
>>> convert_to(OD((('a', 1) ,)), cls=dict)
{'a': 1}
>>> convert_to(OD((('a', OD((('b', OD((('c', 1), ))), ))), )), cls=dict)
{'a': {'b': {'c': 1}}}
"""
options.update(ac_ordered=ac_ordered, ac_dict=ac_dict)
if anyconfig.utils.is_dict_like(obj):
return _make_recur(obj, convert_to, **options)
if anyconfig.utils.is_list_like(obj):
return _make_iter(obj, convert_to, **options)
return obj
|
Convert a mapping objects to a dict or object of 'to_type' recursively.
Borrowed basic idea and implementation from bunch.unbunchify. (bunch is
distributed under MIT license same as this.)
:param obj: A mapping objects or other primitive object
:param ac_ordered: Use OrderedDict instead of dict to keep order of items
:param ac_dict: Callable to convert 'obj' to mapping object
:param options: Optional keyword arguments.
:return: A dict or OrderedDict or object of 'cls'
>>> OD = anyconfig.compat.OrderedDict
>>> convert_to(OD((('a', 1) ,)), cls=dict)
{'a': 1}
>>> convert_to(OD((('a', OD((('b', OD((('c', 1), ))), ))), )), cls=dict)
{'a': {'b': {'c': 1}}}
|
def get_child_repository_ids(self, repository_id):
"""Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=repository_id)
return self._hierarchy_session.get_children(id_=repository_id)
|
Gets the ``Ids`` of the children of the given repository.
arg: repository_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def _preoptimize_model(self, initials, method):
""" Preoptimizes the model by estimating a static model, then a quick search of good dynamic parameters
Parameters
----------
initials : np.array
A vector of inital values
method : str
One of 'MLE' or 'PML' (the optimization options)
Returns
----------
Y_exp : np.array
Vector of past values and predictions
"""
# Random search for good starting values
start_values = []
start_values.append(np.ones(len(self.X_names))*-2.0)
start_values.append(np.ones(len(self.X_names))*-3.0)
start_values.append(np.ones(len(self.X_names))*-4.0)
start_values.append(np.ones(len(self.X_names))*-5.0)
best_start = self.latent_variables.get_z_starting_values()
best_lik = self.neg_loglik(self.latent_variables.get_z_starting_values())
proposal_start = best_start.copy()
for start in start_values:
proposal_start[:len(self.X_names)] = start
proposal_likelihood = self.neg_loglik(proposal_start)
if proposal_likelihood < best_lik:
best_lik = proposal_likelihood
best_start = proposal_start.copy()
return best_start
|
Preoptimizes the model by estimating a static model, then a quick search of good dynamic parameters
Parameters
----------
initials : np.array
A vector of inital values
method : str
One of 'MLE' or 'PML' (the optimization options)
Returns
----------
Y_exp : np.array
Vector of past values and predictions
|
def getcellvalue(self, window_name, object_name, row_index, column=0):
"""
Get cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: cell value on success.
@rtype: string
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
count = len(object_handle.AXRows)
if row_index < 0 or row_index > count:
raise LdtpServerException('Row index out of range: %d' % row_index)
cell = object_handle.AXRows[row_index]
count = len(cell.AXChildren)
if column < 0 or column > count:
raise LdtpServerException('Column index out of range: %d' % column)
obj = cell.AXChildren[column]
if not re.search("AXColumn", obj.AXRole):
obj = cell.AXChildren[column]
return obj.AXValue
|
Get cell value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param row_index: Row index to get
@type row_index: integer
@param column: Column index to get, default value 0
@type column: integer
@return: cell value on success.
@rtype: string
|
def batched_expiration_maintenance(self, elapsed_time):
""" Batched version of expiration_maintenance()
Cython version
"""
num_iterations = self.num_batched_maintenance(elapsed_time)
self.refresh_head, nonzero = maintenance(self.cellarray, self.nbr_bits, num_iterations, self.refresh_head)
if num_iterations != 0:
self.estimate_z = float(nonzero) / float(num_iterations)
self._estimate_count()
processed_interval = num_iterations * self.compute_refresh_time()
return processed_interval
|
Batched version of expiration_maintenance()
Cython version
|
def normalize_arxiv_category(category):
"""Normalize arXiv category to be schema compliant.
This properly capitalizes the category and replaces the dash by a dot if
needed. If the category is obsolete, it also gets converted it to its
current equivalent.
Example:
>>> from inspire_schemas.utils import normalize_arxiv_category
>>> normalize_arxiv_category('funct-an') # doctest: +SKIP
u'math.FA'
"""
category = _NEW_CATEGORIES.get(category.lower(), category)
for valid_category in valid_arxiv_categories():
if (category.lower() == valid_category.lower() or
category.lower().replace('-', '.') == valid_category.lower()):
return valid_category
return category
|
Normalize arXiv category to be schema compliant.
This properly capitalizes the category and replaces the dash by a dot if
needed. If the category is obsolete, it also gets converted it to its
current equivalent.
Example:
>>> from inspire_schemas.utils import normalize_arxiv_category
>>> normalize_arxiv_category('funct-an') # doctest: +SKIP
u'math.FA'
|
def func_from_info(self):
"""Find and return a callable object from a task info dictionary"""
info = self.funcinfo
functype = info['func_type']
if functype in ['instancemethod', 'classmethod', 'staticmethod']:
the_modelclass = get_module_member_by_dottedpath(info['class_path'])
if functype == 'instancemethod':
the_modelobject = the_modelclass.objects.get(pk=info['model_pk'])
the_callable = get_member(the_modelobject, info['func_name'])
else:
the_callable = get_member(the_modelclass, info['func_name'])
return the_callable
elif functype == 'function':
mod = import_module(info['module_name'])
the_callable = get_member(mod, info['func_name'])
return the_callable
else:
raise ValueError(f"Unknown functype '{functype} in task {self.pk} ({self.label})")
|
Find and return a callable object from a task info dictionary
|
def exit_fullscreen(self):
"""
Invoke before printing out anything.
This method should be replaced by or merged to blessings package
"""
self.term.stream.write(self.term.exit_fullscreen)
self.term.stream.write(self.term.normal_cursor)
|
Invoke before printing out anything.
This method should be replaced by or merged to blessings package
|
def has_successor(self, u, v, t=None):
"""Return True if node u has successor v at time t (optional).
This is true if graph has the edge u->v.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : snapshot id (default=None)
If None will be returned the presence of the interaction on the flattened graph.
"""
return self.has_interaction(u, v, t)
|
Return True if node u has successor v at time t (optional).
This is true if graph has the edge u->v.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : snapshot id (default=None)
If None will be returned the presence of the interaction on the flattened graph.
|
def one_or_more(
schema: dict, unique_items: bool = True, min: int = 1, max: int = None
) -> dict:
"""
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
"""
multi_schema = {
"type": "array",
"items": schema,
"minItems": min,
"uniqueItems": unique_items,
}
if max:
multi_schema["maxItems"] = max
return {"oneOf": [multi_schema, schema]}
|
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
|
def _calc_inst_pmf(self):
"""Calculate the epsilon-greedy instrumental distribution"""
# Easy vars
t = self.t_
epsilon = self.epsilon
alpha = self.alpha
preds = self._preds_avg_in_strata
weights = self.strata.weights_[:,np.newaxis]
p1 = self._BB_model.theta_[:,np.newaxis]
p0 = 1 - p1
if t==0:
F = self._F_guess[self.opt_class]
else:
F = self._estimate[t - 1, self.opt_class]
# Fill in non-finite estimates with the initial guess
nonfinite = ~np.isfinite(F)
F[nonfinite] = self._F_guess[self.opt_class][nonfinite]
# Calculate optimal instrumental pmf
sqrt_arg = np.sum(preds * (alpha**2 * F**2 * p0 + (1 - F)**2 * p1) + \
(1 - preds) * (1 - alpha)**2 * F**2 * p1, \
axis=1, keepdims=True) #: sum is over classifiers
inst_pmf = weights * np.sqrt(sqrt_arg)
# Normalize
inst_pmf /= np.sum(inst_pmf)
# Epsilon-greedy: (1 - epsilon) q + epsilon * p
inst_pmf *= (1 - epsilon)
inst_pmf += epsilon * weights
if self.record_inst_hist:
self._inst_pmf[:,t] = inst_pmf.ravel()
else:
self._inst_pmf = inst_pmf.ravel()
|
Calculate the epsilon-greedy instrumental distribution
|
def reloadFileAtIndex(self, itemIndex, rtiClass=None):
""" Reloads the item at the index by removing the repo tree item and inserting a new one.
The new item will have by of type rtiClass. If rtiClass is None (the default), the
new rtiClass will be the same as the old one.
"""
fileRtiParentIndex = itemIndex.parent()
fileRti = self.getItem(itemIndex)
position = fileRti.childNumber()
fileName = fileRti.fileName
if rtiClass is None:
rtiClass = type(fileRti)
# Delete old RTI and Insert a new one instead.
self.deleteItemAtIndex(itemIndex) # this will close the items resources.
return self.loadFile(fileName, rtiClass, position=position, parentIndex=fileRtiParentIndex)
|
Reloads the item at the index by removing the repo tree item and inserting a new one.
The new item will have by of type rtiClass. If rtiClass is None (the default), the
new rtiClass will be the same as the old one.
|
def update_config(self):
"""
Update the configuration files according to the current
in-memory SExtractor configuration.
"""
# -- Write filter configuration file
# First check the filter itself
filter = self.config['FILTER_MASK']
rows = len(filter)
cols = len(filter[0]) # May raise ValueError, OK
filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
filter_f.write("CONV NORM\n")
filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
(rows, cols))
for row in filter:
filter_f.write(" ".join(map(repr, row)))
filter_f.write("\n")
filter_f.close()
# -- Write parameter list file
parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
for parameter in self.config['PARAMETERS_LIST']:
print(parameter, file=parameters_f)
parameters_f.close()
# -- Write NNW configuration file
nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
nnw_f.write(nnw_config)
nnw_f.close()
# -- Write main configuration file
main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')
for key in self.config.keys():
if (key in SExtractor._SE_config_special_keys):
continue
if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value
value = " ".join(map(str, self.config[key]))
else:
value = str(self.config[key])
print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f)
main_f.close()
|
Update the configuration files according to the current
in-memory SExtractor configuration.
|
def create_reserved_ip_address(self, name, label=None, location=None):
'''
Reserves an IPv4 address for the specified subscription.
name:
Required. Specifies the name for the reserved IP address.
label:
Optional. Specifies a label for the reserved IP address. The label
can be up to 100 characters long and can be used for your tracking
purposes.
location:
Required. Specifies the location of the reserved IP address. This
should be the same location that is assigned to the cloud service
containing the deployment that will use the reserved IP address.
To see the available locations, you can use list_locations.
'''
_validate_not_none('name', name)
return self._perform_post(
self._get_reserved_ip_path(),
_XmlSerializer.create_reserved_ip_to_xml(name, label, location),
as_async=True)
|
Reserves an IPv4 address for the specified subscription.
name:
Required. Specifies the name for the reserved IP address.
label:
Optional. Specifies a label for the reserved IP address. The label
can be up to 100 characters long and can be used for your tracking
purposes.
location:
Required. Specifies the location of the reserved IP address. This
should be the same location that is assigned to the cloud service
containing the deployment that will use the reserved IP address.
To see the available locations, you can use list_locations.
|
def getAllMetadata(self, remote, address):
"""Get all metadata of device"""
if self._server is not None:
return self._server.getAllMetadata(remote, address)
|
Get all metadata of device
|
def detect(self, text):
"""Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
"""
if isinstance(text, list):
result = []
for item in text:
lang = self.detect(item)
result.append(lang)
return result
data = self._translate(text, dest='en', src='auto')
# actual source language that will be recognized by Google Translator when the
# src passed is equal to auto.
src = ''
confidence = 0.0
try:
src = ''.join(data[8][0])
confidence = data[8][-2][0]
except Exception: # pragma: nocover
pass
result = Detected(lang=src, confidence=confidence)
return result
|
Detect language of the input text
:param text: The source text(s) whose language you want to identify.
Batch detection is supported via sequence input.
:type text: UTF-8 :class:`str`; :class:`unicode`; string sequence (list, tuple, iterator, generator)
:rtype: Detected
:rtype: :class:`list` (when a list is passed)
Basic usage:
>>> from googletrans import Translator
>>> translator = Translator()
>>> translator.detect('이 문장은 한글로 쓰여졌습니다.')
<Detected lang=ko confidence=0.27041003>
>>> translator.detect('この文章は日本語で書かれました。')
<Detected lang=ja confidence=0.64889508>
>>> translator.detect('This sentence is written in English.')
<Detected lang=en confidence=0.22348526>
>>> translator.detect('Tiu frazo estas skribita en Esperanto.')
<Detected lang=eo confidence=0.10538048>
Advanced usage:
>>> langs = translator.detect(['한국어', '日本語', 'English', 'le français'])
>>> for lang in langs:
... print(lang.lang, lang.confidence)
ko 1
ja 0.92929292
en 0.96954316
fr 0.043500196
|
def _format_disk_metrics(self, metrics):
"""Cast the disk stats to float and convert them to bytes"""
for name, raw_val in metrics.iteritems():
if raw_val:
match = DISK_STATS_RE.search(raw_val)
if match is None or len(match.groups()) != 2:
self.log.warning('Can\'t parse value %s for disk metric %s. Dropping it.' % (raw_val, name))
metrics[name] = None
val, unit = match.groups()
# by default some are uppercased others lowercased. That's error prone.
unit = unit.lower()
try:
val = int(float(val) * UNIT_MAP[unit])
metrics[name] = val
except KeyError:
self.log.error('Unrecognized unit %s for disk metric %s. Dropping it.' % (unit, name))
metrics[name] = None
return metrics
|
Cast the disk stats to float and convert them to bytes
|
def save(self, filename, config):
"""Loads a config from disk"""
return open(os.path.expanduser(filename), 'w').write(json.dumps(config, cls=HCEncoder, sort_keys=True, indent=2, separators=(',', ': ')))
|
Loads a config from disk
|
def to_fixed(stype):
""" Returns the instruction sequence for converting the given
type stored in DE,HL to fixed DE,HL.
"""
output = [] # List of instructions
if is_int_type(stype):
output = to_word(stype)
output.append('ex de, hl')
output.append('ld hl, 0') # 'Truncate' the fixed point
elif stype == 'f':
output.append('call __FTOF16REG')
REQUIRES.add('ftof16reg.asm')
return output
|
Returns the instruction sequence for converting the given
type stored in DE,HL to fixed DE,HL.
|
def clear_history(pymux, variables):
" Clear scrollback buffer. "
pane = pymux.arrangement.get_active_pane()
if pane.display_scroll_buffer:
raise CommandException('Not available in copy mode')
else:
pane.process.screen.clear_history()
|
Clear scrollback buffer.
|
def bids_to_you(self):
'''
Get bids made to you
@return: [[player,owner,team,money,date,datechange,status],]
'''
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/team_news.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/exchangemarket.phtml?viewoffers_x=',headers=headers).content
soup = BeautifulSoup(req)
table = []
for i in soup.find('table',{'class','tablecontent03'}).find_all('tr')[1:]:
player,owner,team,price,bid_date,trans_date,status = self._parse_bid_table(i)
table.append([player,owner,team,price,bid_date,trans_date,status])
return table
|
Get bids made to you
@return: [[player,owner,team,money,date,datechange,status],]
|
def train(self, data_iterator):
"""Train a keras model on a worker and send asynchronous updates
to parameter server
"""
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
if x_train.size == 0:
return
optimizer = get_optimizer(self.master_optimizer)
self.model = model_from_yaml(self.yaml, self.custom_objects)
self.model.compile(optimizer=optimizer,
loss=self.master_loss, metrics=self.master_metrics)
self.model.set_weights(self.parameters.value)
epochs = self.train_config['epochs']
batch_size = self.train_config.get('batch_size')
nb_train_sample = x_train.shape[0]
nb_batch = int(np.ceil(nb_train_sample / float(batch_size)))
index_array = np.arange(nb_train_sample)
batches = [
(i * batch_size, min(nb_train_sample, (i + 1) * batch_size))
for i in range(0, nb_batch)
]
if self.frequency == 'epoch':
for epoch in range(epochs):
weights_before_training = self.client.get_parameters()
self.model.set_weights(weights_before_training)
self.train_config['epochs'] = 1
if x_train.shape[0] > batch_size:
self.model.fit(x_train, y_train, **self.train_config)
self.train_config['epochs'] = epochs
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
self.client.update_parameters(deltas)
elif self.frequency == 'batch':
for epoch in range(epochs):
if x_train.shape[0] > batch_size:
for (batch_start, batch_end) in batches:
weights_before_training = self.client.get_parameters()
self.model.set_weights(weights_before_training)
batch_ids = index_array[batch_start:batch_end]
x = slice_arrays(x_train, batch_ids)
y = slice_arrays(y_train, batch_ids)
self.model.train_on_batch(x, y)
weights_after_training = self.model.get_weights()
deltas = subtract_params(
weights_before_training, weights_after_training)
self.client.update_parameters(deltas)
else:
raise ValueError(
'frequency parameter can be `epoch` or `batch, got {}'.format(self.frequency))
yield []
|
Train a keras model on a worker and send asynchronous updates
to parameter server
|
def _detect(self):
""" Detect uninitialized storage variables
Recursively visit the calls
Returns:
dict: [contract name] = set(storage variable uninitialized)
"""
results = []
self.results = []
self.visited_all_paths = {}
for contract in self.slither.contracts:
for function in contract.functions:
if function.is_implemented:
uninitialized_storage_variables = [v for v in function.local_variables if v.is_storage and v.uninitialized]
function.entry_point.context[self.key] = uninitialized_storage_variables
self._detect_uninitialized(function, function.entry_point, [])
for(function, uninitialized_storage_variable) in self.results:
var_name = uninitialized_storage_variable.name
info = "{} in {}.{} ({}) is a storage variable never initialiazed\n"
info = info.format(var_name, function.contract.name, function.name, uninitialized_storage_variable.source_mapping_str)
json = self.generate_json_result(info)
self.add_variable_to_json(uninitialized_storage_variable, json)
self.add_function_to_json(function, json)
results.append(json)
return results
|
Detect uninitialized storage variables
Recursively visit the calls
Returns:
dict: [contract name] = set(storage variable uninitialized)
|
def ack(self, frame):
"""
Handles the ACK command: Acknowledges receipt of a message.
"""
if not frame.message_id:
raise ProtocolError("No message-id specified for ACK command.")
self.engine.queue_manager.ack(self.engine.connection, frame)
|
Handles the ACK command: Acknowledges receipt of a message.
|
def _non_idempotent_tasks(self, output):
"""
Parses the output to identify the non idempotent tasks.
:param (str) output: A string containing the output of the ansible run.
:return: A list containing the names of the non idempotent tasks.
"""
# Remove blank lines to make regex matches easier.
output = re.sub(r'\n\s*\n*', '\n', output)
# Remove ansi escape sequences.
output = util.strip_ansi_escape(output)
# Split the output into a list and go through it.
output_lines = output.split('\n')
res = []
task_line = ''
for _, line in enumerate(output_lines):
if line.startswith('TASK'):
task_line = line
elif line.startswith('changed'):
host_name = re.search(r'\[(.*)\]', line).groups()[0]
task_name = re.search(r'\[(.*)\]', task_line).groups()[0]
res.append(u'* [{}] => {}'.format(host_name, task_name))
return res
|
Parses the output to identify the non idempotent tasks.
:param (str) output: A string containing the output of the ansible run.
:return: A list containing the names of the non idempotent tasks.
|
def columns_by_index(self) -> Dict[str, List[Well]]:
"""
Accessor function used to navigate through a labware by column name.
With indexing one can treat it as a typical python dictionary.
To access row A for example,
simply write: labware.columns_by_index()['1']
This will output ['A1', 'B1', 'C1', 'D1'...].
:return: Dictionary of Well lists keyed by column name
"""
col_dict = self._create_indexed_dictionary(group=2)
return col_dict
|
Accessor function used to navigate through a labware by column name.
With indexing one can treat it as a typical python dictionary.
To access row A for example,
simply write: labware.columns_by_index()['1']
This will output ['A1', 'B1', 'C1', 'D1'...].
:return: Dictionary of Well lists keyed by column name
|
def flip_iterable_dict(d: dict) -> dict:
"""Transform dictionary to unpack values to map to respective key."""
value_keys = disjoint_union((cartesian_product((v, k))
for k, v in d.items()))
return dict(value_keys)
|
Transform dictionary to unpack values to map to respective key.
|
def _get_log_covariance(self, log_variance_mat, log_expectation_symbols, covariance_matrix, x, y):
r"""
Compute log covariances according to:\\
:math:`\log{(Cov(x_i,x_j))} = \frac { 1 + Cov(x_i,x_j)}{\exp[\log \mathbb{E}(x_i) + \log \mathbb{E}(x_j)+\frac{1}{2} (\log Var(x_i) + \log Var(x_j)]}`
:param log_variance_mat: a column matrix of log variance
:param log_expectation_symbols: a column matrix of log expectations
:param covariance_matrix: a matrix of covariances
:param x: x-coordinate in matrix of log variances and log covariances
:param y: y-coordinate in matrix of log variances and log covariances
:return: the log covariance between x and y
"""
# The diagonal of the return matrix includes all the log variances
if x == y:
return log_variance_mat[x, x]
# log covariances are calculated if not on the diagonal of the return matrix
elif self.is_multivariate:
denom = sp.exp(log_expectation_symbols[x] +
log_expectation_symbols[y] +
(log_variance_mat[x, x] + log_variance_mat[y, y])/ sp.Integer(2))
return sp.log(sp.Integer(1) + covariance_matrix[x, y] / denom)
# univariate case: log covariances are 0s.
else:
return sp.Integer(0)
|
r"""
Compute log covariances according to:\\
:math:`\log{(Cov(x_i,x_j))} = \frac { 1 + Cov(x_i,x_j)}{\exp[\log \mathbb{E}(x_i) + \log \mathbb{E}(x_j)+\frac{1}{2} (\log Var(x_i) + \log Var(x_j)]}`
:param log_variance_mat: a column matrix of log variance
:param log_expectation_symbols: a column matrix of log expectations
:param covariance_matrix: a matrix of covariances
:param x: x-coordinate in matrix of log variances and log covariances
:param y: y-coordinate in matrix of log variances and log covariances
:return: the log covariance between x and y
|
def _parse_xml(child, parser):
"""Parses the specified child XML tag and creates a Subroutine or
Function object out of it."""
name, modifiers, dtype, kind = _parse_common(child)
#Handle the symbol modification according to the isense settings.
name = _isense_builtin_symbol(name)
if child.tag == "subroutine":
parent = Subroutine(name, modifiers, None)
elif child.tag == "function":
parent = Function(name, modifiers, dtype, kind, None)
if parent is not None:
for kid in child:
if kid.tag == "parameter":
_parse_parameter(kid, parser, parent)
elif kid.tag == "summary":
_parse_summary(kid, parser, parent)
elif kid.tag == "usage":
_parse_usage(kid, parser, parent)
return parent
|
Parses the specified child XML tag and creates a Subroutine or
Function object out of it.
|
def global_env_valid(env):
"""
Given an env, determine if it's a valid "global" or "mgmt" env as listed in EFConfig
Args:
env: the env to check
Returns:
True if the env is a valid global env in EFConfig
Raises:
ValueError with message if the env is not valid
"""
if env not in EFConfig.ACCOUNT_SCOPED_ENVS:
raise ValueError("Invalid global env: {}; global envs are: {}".format(env, EFConfig.ACCOUNT_SCOPED_ENVS))
return True
|
Given an env, determine if it's a valid "global" or "mgmt" env as listed in EFConfig
Args:
env: the env to check
Returns:
True if the env is a valid global env in EFConfig
Raises:
ValueError with message if the env is not valid
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.