code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _compile_to_sklearn(self, expr):
"""Compile a DEAP pipeline into a sklearn pipeline.
Parameters
----------
expr: DEAP individual
The DEAP pipeline to be compiled
Returns
-------
sklearn_pipeline: sklearn.pipeline.Pipeline
"""
sklearn_pipeline_str = generate_pipeline_code(expr_to_tree(expr, self._pset), self.operators)
sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context)
sklearn_pipeline.memory = self._memory
return sklearn_pipeline | Compile a DEAP pipeline into a sklearn pipeline.
Parameters
----------
expr: DEAP individual
The DEAP pipeline to be compiled
Returns
-------
sklearn_pipeline: sklearn.pipeline.Pipeline | Below is the the instruction that describes the task:
### Input:
Compile a DEAP pipeline into a sklearn pipeline.
Parameters
----------
expr: DEAP individual
The DEAP pipeline to be compiled
Returns
-------
sklearn_pipeline: sklearn.pipeline.Pipeline
### Response:
def _compile_to_sklearn(self, expr):
"""Compile a DEAP pipeline into a sklearn pipeline.
Parameters
----------
expr: DEAP individual
The DEAP pipeline to be compiled
Returns
-------
sklearn_pipeline: sklearn.pipeline.Pipeline
"""
sklearn_pipeline_str = generate_pipeline_code(expr_to_tree(expr, self._pset), self.operators)
sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context)
sklearn_pipeline.memory = self._memory
return sklearn_pipeline |
def aikif_web_menu(cur=''):
""" returns the web page header containing standard AIKIF top level web menu """
pgeHdg = ''
pgeBlurb = ''
if cur == '':
cur = 'Home'
txt = get_header(cur) #"<div id=top_menu>"
txt += '<div id = "container">\n'
txt += ' <div id = "header">\n'
txt += ' <!-- Banner -->\n'
txt += ' <img src = "' + os.path.join('/static','aikif_banner.jpg') + '" alt="AIKIF Banner"/>\n'
txt += ' <ul id = "menu_list">\n'
for m in menu:
if m[1] == cur:
txt += ' <LI id="top_menu_selected"><a href=' + m[0] + '>' + m[1] + '</a></li>\n'
pgeHdg = m[1]
try:
pgeBlurb = m[2]
except Exception:
pass
else:
txt += ' <LI id="top_menu"><a href=' + m[0] + '>' + m[1] + '</a></li>\n'
txt += " </ul>\n </div>\n\n"
txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n'
txt += '<H4>' + pgeBlurb + '</H4>\n'
return txt | returns the web page header containing standard AIKIF top level web menu | Below is the the instruction that describes the task:
### Input:
returns the web page header containing standard AIKIF top level web menu
### Response:
def aikif_web_menu(cur=''):
""" returns the web page header containing standard AIKIF top level web menu """
pgeHdg = ''
pgeBlurb = ''
if cur == '':
cur = 'Home'
txt = get_header(cur) #"<div id=top_menu>"
txt += '<div id = "container">\n'
txt += ' <div id = "header">\n'
txt += ' <!-- Banner -->\n'
txt += ' <img src = "' + os.path.join('/static','aikif_banner.jpg') + '" alt="AIKIF Banner"/>\n'
txt += ' <ul id = "menu_list">\n'
for m in menu:
if m[1] == cur:
txt += ' <LI id="top_menu_selected"><a href=' + m[0] + '>' + m[1] + '</a></li>\n'
pgeHdg = m[1]
try:
pgeBlurb = m[2]
except Exception:
pass
else:
txt += ' <LI id="top_menu"><a href=' + m[0] + '>' + m[1] + '</a></li>\n'
txt += " </ul>\n </div>\n\n"
txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n'
txt += '<H4>' + pgeBlurb + '</H4>\n'
return txt |
def close_threads(self, parent):
"""Close threads associated to parent_id"""
logger.debug("Call ThreadManager's 'close_threads'")
if parent is None:
# Closing all threads
self.pending_threads = []
threadlist = []
for threads in list(self.started_threads.values()):
threadlist += threads
else:
parent_id = id(parent)
self.pending_threads = [(_th, _id) for (_th, _id)
in self.pending_threads
if _id != parent_id]
threadlist = self.started_threads.get(parent_id, [])
for thread in threadlist:
logger.debug("Waiting for thread %r to finish" % thread)
while thread.isRunning():
# We can't terminate thread safely, so we simply wait...
QApplication.processEvents() | Close threads associated to parent_id | Below is the the instruction that describes the task:
### Input:
Close threads associated to parent_id
### Response:
def close_threads(self, parent):
"""Close threads associated to parent_id"""
logger.debug("Call ThreadManager's 'close_threads'")
if parent is None:
# Closing all threads
self.pending_threads = []
threadlist = []
for threads in list(self.started_threads.values()):
threadlist += threads
else:
parent_id = id(parent)
self.pending_threads = [(_th, _id) for (_th, _id)
in self.pending_threads
if _id != parent_id]
threadlist = self.started_threads.get(parent_id, [])
for thread in threadlist:
logger.debug("Waiting for thread %r to finish" % thread)
while thread.isRunning():
# We can't terminate thread safely, so we simply wait...
QApplication.processEvents() |
def transitive_hydrated_targets(build_file_addresses):
"""Given BuildFileAddresses, kicks off recursion on expansion of TransitiveHydratedTargets.
The TransitiveHydratedTarget struct represents a structure-shared graph, which we walk
and flatten here. The engine memoizes the computation of TransitiveHydratedTarget, so
when multiple TransitiveHydratedTargets objects are being constructed for multiple
roots, their structure will be shared.
"""
transitive_hydrated_targets = yield [Get(TransitiveHydratedTarget, Address, a)
for a in build_file_addresses.addresses]
closure = OrderedSet()
to_visit = deque(transitive_hydrated_targets)
while to_visit:
tht = to_visit.popleft()
if tht.root in closure:
continue
closure.add(tht.root)
to_visit.extend(tht.dependencies)
yield TransitiveHydratedTargets(tuple(tht.root for tht in transitive_hydrated_targets), closure) | Given BuildFileAddresses, kicks off recursion on expansion of TransitiveHydratedTargets.
The TransitiveHydratedTarget struct represents a structure-shared graph, which we walk
and flatten here. The engine memoizes the computation of TransitiveHydratedTarget, so
when multiple TransitiveHydratedTargets objects are being constructed for multiple
roots, their structure will be shared. | Below is the the instruction that describes the task:
### Input:
Given BuildFileAddresses, kicks off recursion on expansion of TransitiveHydratedTargets.
The TransitiveHydratedTarget struct represents a structure-shared graph, which we walk
and flatten here. The engine memoizes the computation of TransitiveHydratedTarget, so
when multiple TransitiveHydratedTargets objects are being constructed for multiple
roots, their structure will be shared.
### Response:
def transitive_hydrated_targets(build_file_addresses):
"""Given BuildFileAddresses, kicks off recursion on expansion of TransitiveHydratedTargets.
The TransitiveHydratedTarget struct represents a structure-shared graph, which we walk
and flatten here. The engine memoizes the computation of TransitiveHydratedTarget, so
when multiple TransitiveHydratedTargets objects are being constructed for multiple
roots, their structure will be shared.
"""
transitive_hydrated_targets = yield [Get(TransitiveHydratedTarget, Address, a)
for a in build_file_addresses.addresses]
closure = OrderedSet()
to_visit = deque(transitive_hydrated_targets)
while to_visit:
tht = to_visit.popleft()
if tht.root in closure:
continue
closure.add(tht.root)
to_visit.extend(tht.dependencies)
yield TransitiveHydratedTargets(tuple(tht.root for tht in transitive_hydrated_targets), closure) |
def is_extracted(self, file_path):
"""
Check if the data file is already extracted.
"""
if os.path.isdir(file_path):
self.chatbot.logger.info('File is already extracted')
return True
return False | Check if the data file is already extracted. | Below is the the instruction that describes the task:
### Input:
Check if the data file is already extracted.
### Response:
def is_extracted(self, file_path):
"""
Check if the data file is already extracted.
"""
if os.path.isdir(file_path):
self.chatbot.logger.info('File is already extracted')
return True
return False |
def iaf_hparams(hidden_size=512, filter_size=4096):
"""Create hyperpameters for inverse autoregressive flows.
Args:
hidden_size: Width of attention layers and neural network output layer.
filter_size: Hidden layer width for neural network.
Returns:
hparams: Hyperpameters with basic presets for inverse autoregressive flows.
"""
hparams = common_hparams.basic_params1()
# Attention hyperparameters.
hparams.hidden_size = hidden_size
hparams.add_hparam("attention_key_channels", None)
hparams.add_hparam("attention_value_channels", None)
hparams.add_hparam("num_heads", 4)
hparams.add_hparam("attention_dropout", 0.1)
hparams.add_hparam("shared_rel", False)
hparams.add_hparam("block_width", 1)
hparams.add_hparam("block_length", 1)
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
# Preprocessing and postprocesing hyperparameters.
hparams.layer_preprocess_sequence = "n"
hparams.layer_prepostprocess_dropout = 0.1
hparams.norm_type = "layer"
hparams.norm_epsilon = 1e-06
hparams.layer_prepostprocess_dropout_broadcast_dims = ""
hparams.layer_postprocess_sequence = "da"
# Feedforward neural network hyperparameters.
hparams.add_hparam("filter_size", filter_size)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("relu_dropout", 0.1)
return hparams | Create hyperpameters for inverse autoregressive flows.
Args:
hidden_size: Width of attention layers and neural network output layer.
filter_size: Hidden layer width for neural network.
Returns:
hparams: Hyperpameters with basic presets for inverse autoregressive flows. | Below is the the instruction that describes the task:
### Input:
Create hyperpameters for inverse autoregressive flows.
Args:
hidden_size: Width of attention layers and neural network output layer.
filter_size: Hidden layer width for neural network.
Returns:
hparams: Hyperpameters with basic presets for inverse autoregressive flows.
### Response:
def iaf_hparams(hidden_size=512, filter_size=4096):
"""Create hyperpameters for inverse autoregressive flows.
Args:
hidden_size: Width of attention layers and neural network output layer.
filter_size: Hidden layer width for neural network.
Returns:
hparams: Hyperpameters with basic presets for inverse autoregressive flows.
"""
hparams = common_hparams.basic_params1()
# Attention hyperparameters.
hparams.hidden_size = hidden_size
hparams.add_hparam("attention_key_channels", None)
hparams.add_hparam("attention_value_channels", None)
hparams.add_hparam("num_heads", 4)
hparams.add_hparam("attention_dropout", 0.1)
hparams.add_hparam("shared_rel", False)
hparams.add_hparam("block_width", 1)
hparams.add_hparam("block_length", 1)
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
# Preprocessing and postprocesing hyperparameters.
hparams.layer_preprocess_sequence = "n"
hparams.layer_prepostprocess_dropout = 0.1
hparams.norm_type = "layer"
hparams.norm_epsilon = 1e-06
hparams.layer_prepostprocess_dropout_broadcast_dims = ""
hparams.layer_postprocess_sequence = "da"
# Feedforward neural network hyperparameters.
hparams.add_hparam("filter_size", filter_size)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("relu_dropout", 0.1)
return hparams |
def Graph2Pandas_converter(self):
'''Updates self.g or self.path bc you could only choose 1'''
if isinstance(self.path, str) or isinstance(self.path, p):
self.path = str(self.path)
filetype = p(self.path).suffix
if filetype == '.pickle':
self.g = pickle.load(open(self.path, 'rb'))
if isinstance(self.g, rdflib.graph.Graph):
return self.get_sparql_dataframe()
else:
print('WARNING:: function df() wont work unless an ontology source is loaded')
return self.g
elif filetype == '.ttl' or filetype == '.rdf':
self.g = rdflib.Graph()
self.g.parse(self.path, format='turtle')
return self.get_sparql_dataframe()
elif filetype == '.nt':
self.g = rdflib.Graph()
self.g.parse(self.path, format='nt')
return self.get_sparql_dataframe()
elif filetype == '.owl' or filetype == '.xrdf':
self.g = rdflib.Graph()
try:
self.g.parse(self.path, format='xml')
except:
# some owl formats are more rdf than owl
self.g.parse(self.path, format='turtle')
return self.get_sparql_dataframe()
else:
exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')
try:
return self.get_sparql_dataframe()
self.path = None
except:
exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')
elif isinstance(self.g, rdflib.graph.Graph):
self.path = None
return self.get_sparql_dataframe()
else:
exit('Obj given is not str, pathlib obj, or an rdflib.Graph()') | Updates self.g or self.path bc you could only choose 1 | Below is the the instruction that describes the task:
### Input:
Updates self.g or self.path bc you could only choose 1
### Response:
def Graph2Pandas_converter(self):
'''Updates self.g or self.path bc you could only choose 1'''
if isinstance(self.path, str) or isinstance(self.path, p):
self.path = str(self.path)
filetype = p(self.path).suffix
if filetype == '.pickle':
self.g = pickle.load(open(self.path, 'rb'))
if isinstance(self.g, rdflib.graph.Graph):
return self.get_sparql_dataframe()
else:
print('WARNING:: function df() wont work unless an ontology source is loaded')
return self.g
elif filetype == '.ttl' or filetype == '.rdf':
self.g = rdflib.Graph()
self.g.parse(self.path, format='turtle')
return self.get_sparql_dataframe()
elif filetype == '.nt':
self.g = rdflib.Graph()
self.g.parse(self.path, format='nt')
return self.get_sparql_dataframe()
elif filetype == '.owl' or filetype == '.xrdf':
self.g = rdflib.Graph()
try:
self.g.parse(self.path, format='xml')
except:
# some owl formats are more rdf than owl
self.g.parse(self.path, format='turtle')
return self.get_sparql_dataframe()
else:
exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')
try:
return self.get_sparql_dataframe()
self.path = None
except:
exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')
elif isinstance(self.g, rdflib.graph.Graph):
self.path = None
return self.get_sparql_dataframe()
else:
exit('Obj given is not str, pathlib obj, or an rdflib.Graph()') |
def print_desc(desc, verbose=False):
'''
:param desc: The describe hash of a DNAnexus entity
:type desc: dict
Depending on the class of the entity, this method will print a
formatted and human-readable string containing the data in *desc*.
'''
if desc['class'] in ['project', 'workspace', 'container']:
print_project_desc(desc, verbose=verbose)
elif desc['class'] == 'app':
print_app_desc(desc, verbose=verbose)
elif desc['class'] == 'globalworkflow':
print_globalworkflow_desc(desc, verbose=verbose)
elif desc['class'] in ['job', 'analysis']:
print_execution_desc(desc)
elif desc['class'] == 'user':
print_user_desc(desc)
elif desc['class'] in ['org', 'team']:
print_generic_desc(desc)
else:
print_data_obj_desc(desc, verbose=verbose) | :param desc: The describe hash of a DNAnexus entity
:type desc: dict
Depending on the class of the entity, this method will print a
formatted and human-readable string containing the data in *desc*. | Below is the the instruction that describes the task:
### Input:
:param desc: The describe hash of a DNAnexus entity
:type desc: dict
Depending on the class of the entity, this method will print a
formatted and human-readable string containing the data in *desc*.
### Response:
def print_desc(desc, verbose=False):
'''
:param desc: The describe hash of a DNAnexus entity
:type desc: dict
Depending on the class of the entity, this method will print a
formatted and human-readable string containing the data in *desc*.
'''
if desc['class'] in ['project', 'workspace', 'container']:
print_project_desc(desc, verbose=verbose)
elif desc['class'] == 'app':
print_app_desc(desc, verbose=verbose)
elif desc['class'] == 'globalworkflow':
print_globalworkflow_desc(desc, verbose=verbose)
elif desc['class'] in ['job', 'analysis']:
print_execution_desc(desc)
elif desc['class'] == 'user':
print_user_desc(desc)
elif desc['class'] in ['org', 'team']:
print_generic_desc(desc)
else:
print_data_obj_desc(desc, verbose=verbose) |
def add_msis(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses MSIS model to obtain thermospheric values.
Uses pyglow module to run MSIS. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_msis, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include MSIS values winds.
'Nn' total neutral density particles/cm^3
'Nn_N' Nitrogen number density (particles/cm^3)
'Nn_N2' N2 number density (particles/cm^3)
'Nn_O' Oxygen number density (particles/cm^3)
'Nn_O2' O2 number density (particles/cm^3)
'Tn_msis' Temperature from MSIS (Kelvin)
"""
import pyglow
from pyglow.pyglow import Point
msis_params = []
# print 'IRI Simulations'
for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]):
pt = Point(time,lat,lon,alt)
pt.run_msis()
msis = {}
total = 0
for key in pt.nn.keys():
total += pt.nn[key]
msis['Nn'] = total
msis['Nn_N'] = pt.nn['N']
msis['Nn_N2'] = pt.nn['N2']
msis['Nn_O'] = pt.nn['O']
msis['Nn_O2'] = pt.nn['O2']
msis['Tn_msis'] = pt.Tn_msis
msis_params.append(msis)
# print 'Complete.'
msis = pds.DataFrame(msis_params)
msis.index = inst.data.index
inst[msis.keys()] = msis
# metadata
inst.meta['Nn'] = {'units':'cm^-3',
'desc':'Total neutral number particle density from MSIS.'}
inst.meta['Nn_N'] = {'units':'cm^-3',
'desc':'Total nitrogen number particle density from MSIS.'}
inst.meta['Nn_N2'] = {'units':'cm^-3',
'desc':'Total N2 number particle density from MSIS.'}
inst.meta['Nn_O'] = {'units':'cm^-3',
'desc':'Total oxygen number particle density from MSIS.'}
inst.meta['Nn_O2'] = {'units':'cm^-3',
'desc':'Total O2 number particle density from MSIS.'}
inst.meta['Tn_msis'] = {'units':'K',
'desc':'Neutral temperature from MSIS.'}
return | Uses MSIS model to obtain thermospheric values.
Uses pyglow module to run MSIS. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_msis, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include MSIS values winds.
'Nn' total neutral density particles/cm^3
'Nn_N' Nitrogen number density (particles/cm^3)
'Nn_N2' N2 number density (particles/cm^3)
'Nn_O' Oxygen number density (particles/cm^3)
'Nn_O2' O2 number density (particles/cm^3)
'Tn_msis' Temperature from MSIS (Kelvin) | Below is the the instruction that describes the task:
### Input:
Uses MSIS model to obtain thermospheric values.
Uses pyglow module to run MSIS. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_msis, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include MSIS values winds.
'Nn' total neutral density particles/cm^3
'Nn_N' Nitrogen number density (particles/cm^3)
'Nn_N2' N2 number density (particles/cm^3)
'Nn_O' Oxygen number density (particles/cm^3)
'Nn_O2' O2 number density (particles/cm^3)
'Tn_msis' Temperature from MSIS (Kelvin)
### Response:
def add_msis(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses MSIS model to obtain thermospheric values.
Uses pyglow module to run MSIS. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_msis, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include MSIS values winds.
'Nn' total neutral density particles/cm^3
'Nn_N' Nitrogen number density (particles/cm^3)
'Nn_N2' N2 number density (particles/cm^3)
'Nn_O' Oxygen number density (particles/cm^3)
'Nn_O2' O2 number density (particles/cm^3)
'Tn_msis' Temperature from MSIS (Kelvin)
"""
import pyglow
from pyglow.pyglow import Point
msis_params = []
# print 'IRI Simulations'
for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]):
pt = Point(time,lat,lon,alt)
pt.run_msis()
msis = {}
total = 0
for key in pt.nn.keys():
total += pt.nn[key]
msis['Nn'] = total
msis['Nn_N'] = pt.nn['N']
msis['Nn_N2'] = pt.nn['N2']
msis['Nn_O'] = pt.nn['O']
msis['Nn_O2'] = pt.nn['O2']
msis['Tn_msis'] = pt.Tn_msis
msis_params.append(msis)
# print 'Complete.'
msis = pds.DataFrame(msis_params)
msis.index = inst.data.index
inst[msis.keys()] = msis
# metadata
inst.meta['Nn'] = {'units':'cm^-3',
'desc':'Total neutral number particle density from MSIS.'}
inst.meta['Nn_N'] = {'units':'cm^-3',
'desc':'Total nitrogen number particle density from MSIS.'}
inst.meta['Nn_N2'] = {'units':'cm^-3',
'desc':'Total N2 number particle density from MSIS.'}
inst.meta['Nn_O'] = {'units':'cm^-3',
'desc':'Total oxygen number particle density from MSIS.'}
inst.meta['Nn_O2'] = {'units':'cm^-3',
'desc':'Total O2 number particle density from MSIS.'}
inst.meta['Tn_msis'] = {'units':'K',
'desc':'Neutral temperature from MSIS.'}
return |
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val)) | Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter | Below is the the instruction that describes the task:
### Input:
Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
### Response:
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val)) |
def to_timeseries(self, fieldnames=(), verbose=True,
index=None, storage='wide',
values=None, pivot_columns=None, freq=None,
coerce_float=True, rs_kwargs=None):
"""
A convenience method for creating a time series DataFrame i.e the
DataFrame index will be an instance of DateTime or PeriodIndex
Parameters
----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
index: specify the field to use for the index. If the index
field is not in fieldnames it will be appended. This
is mandatory for timeseries.
storage: Specify if the queryset uses the
``wide`` format
date | col1| col2| col3|
-----------|------|-----|-----|
2001-01-01-| 100.5| 23.3| 2.2|
2001-02-01-| 106.3| 17.0| 4.6|
2001-03-01-| 111.7| 11.1| 0.7|
or the `long` format.
date |values| names|
-----------|------|------|
2001-01-01-| 100.5| col1|
2001-02-01-| 106.3| col1|
2001-03-01-| 111.7| col1|
2001-01-01-| 23.3| col2|
2001-02-01-| 17.0| col2|
2001-01-01-| 23.3| col2|
2001-02-01-| 2.2| col3|
2001-03-01-| 4.6| col3|
2001-03-01-| 0.7| col3|
pivot_columns: Required once the you specify `long` format
storage. This could either be a list or string
identifying the field name or combination of field.
If the pivot_column is a single column then the
unique values in this column become a new columns in
the DataFrame If the pivot column is a list the values
in these columns are concatenated (using the '-'
as a separator) and these values are used for the new
timeseries columns
values: Also required if you utilize the `long` storage the
values column name is use for populating new frame values
freq: The offset string or object representing a target conversion
rs_kwargs: A dictonary of keyword arguments based on the
``pandas.DataFrame.resample`` method
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values else use the actual values set
in the model.
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point.
"""
assert index is not None, 'You must supply an index field'
assert storage in ('wide', 'long'), 'storage must be wide or long'
if rs_kwargs is None:
rs_kwargs = {}
if storage == 'wide':
df = self.to_dataframe(fieldnames, verbose=verbose, index=index,
coerce_float=coerce_float, datetime_index=True)
else:
df = self.to_dataframe(fieldnames, verbose=verbose,
coerce_float=coerce_float, datetime_index=True)
assert values is not None, 'You must specify a values field'
assert pivot_columns is not None, 'You must specify pivot_columns'
if isinstance(pivot_columns, (tuple, list)):
df['combined_keys'] = ''
for c in pivot_columns:
df['combined_keys'] += df[c].str.upper() + '.'
df['combined_keys'] += values.lower()
df = df.pivot(index=index,
columns='combined_keys',
values=values)
else:
df = df.pivot(index=index,
columns=pivot_columns,
values=values)
if freq is not None:
df = df.resample(freq, **rs_kwargs)
return df | A convenience method for creating a time series DataFrame i.e the
DataFrame index will be an instance of DateTime or PeriodIndex
Parameters
----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
index: specify the field to use for the index. If the index
field is not in fieldnames it will be appended. This
is mandatory for timeseries.
storage: Specify if the queryset uses the
``wide`` format
date | col1| col2| col3|
-----------|------|-----|-----|
2001-01-01-| 100.5| 23.3| 2.2|
2001-02-01-| 106.3| 17.0| 4.6|
2001-03-01-| 111.7| 11.1| 0.7|
or the `long` format.
date |values| names|
-----------|------|------|
2001-01-01-| 100.5| col1|
2001-02-01-| 106.3| col1|
2001-03-01-| 111.7| col1|
2001-01-01-| 23.3| col2|
2001-02-01-| 17.0| col2|
2001-01-01-| 23.3| col2|
2001-02-01-| 2.2| col3|
2001-03-01-| 4.6| col3|
2001-03-01-| 0.7| col3|
pivot_columns: Required once the you specify `long` format
storage. This could either be a list or string
identifying the field name or combination of field.
If the pivot_column is a single column then the
unique values in this column become a new columns in
the DataFrame If the pivot column is a list the values
in these columns are concatenated (using the '-'
as a separator) and these values are used for the new
timeseries columns
values: Also required if you utilize the `long` storage the
values column name is use for populating new frame values
freq: The offset string or object representing a target conversion
rs_kwargs: A dictonary of keyword arguments based on the
``pandas.DataFrame.resample`` method
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values else use the actual values set
in the model.
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point. | Below is the the instruction that describes the task:
### Input:
A convenience method for creating a time series DataFrame i.e the
DataFrame index will be an instance of DateTime or PeriodIndex
Parameters
----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
index: specify the field to use for the index. If the index
field is not in fieldnames it will be appended. This
is mandatory for timeseries.
storage: Specify if the queryset uses the
``wide`` format
date | col1| col2| col3|
-----------|------|-----|-----|
2001-01-01-| 100.5| 23.3| 2.2|
2001-02-01-| 106.3| 17.0| 4.6|
2001-03-01-| 111.7| 11.1| 0.7|
or the `long` format.
date |values| names|
-----------|------|------|
2001-01-01-| 100.5| col1|
2001-02-01-| 106.3| col1|
2001-03-01-| 111.7| col1|
2001-01-01-| 23.3| col2|
2001-02-01-| 17.0| col2|
2001-01-01-| 23.3| col2|
2001-02-01-| 2.2| col3|
2001-03-01-| 4.6| col3|
2001-03-01-| 0.7| col3|
pivot_columns: Required once the you specify `long` format
storage. This could either be a list or string
identifying the field name or combination of field.
If the pivot_column is a single column then the
unique values in this column become a new columns in
the DataFrame If the pivot column is a list the values
in these columns are concatenated (using the '-'
as a separator) and these values are used for the new
timeseries columns
values: Also required if you utilize the `long` storage the
values column name is use for populating new frame values
freq: The offset string or object representing a target conversion
rs_kwargs: A dictonary of keyword arguments based on the
``pandas.DataFrame.resample`` method
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values else use the actual values set
in the model.
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point.
### Response:
def to_timeseries(self, fieldnames=(), verbose=True,
index=None, storage='wide',
values=None, pivot_columns=None, freq=None,
coerce_float=True, rs_kwargs=None):
"""
A convenience method for creating a time series DataFrame i.e the
DataFrame index will be an instance of DateTime or PeriodIndex
Parameters
----------
fieldnames: The model field names(columns) to utilise in creating
the DataFrame. You can span a relationships in the usual
Django ORM way by using the foreign key field name
separated by double underscores and refer to a field
in a related model.
index: specify the field to use for the index. If the index
field is not in fieldnames it will be appended. This
is mandatory for timeseries.
storage: Specify if the queryset uses the
``wide`` format
date | col1| col2| col3|
-----------|------|-----|-----|
2001-01-01-| 100.5| 23.3| 2.2|
2001-02-01-| 106.3| 17.0| 4.6|
2001-03-01-| 111.7| 11.1| 0.7|
or the `long` format.
date |values| names|
-----------|------|------|
2001-01-01-| 100.5| col1|
2001-02-01-| 106.3| col1|
2001-03-01-| 111.7| col1|
2001-01-01-| 23.3| col2|
2001-02-01-| 17.0| col2|
2001-01-01-| 23.3| col2|
2001-02-01-| 2.2| col3|
2001-03-01-| 4.6| col3|
2001-03-01-| 0.7| col3|
pivot_columns: Required once the you specify `long` format
storage. This could either be a list or string
identifying the field name or combination of field.
If the pivot_column is a single column then the
unique values in this column become a new columns in
the DataFrame If the pivot column is a list the values
in these columns are concatenated (using the '-'
as a separator) and these values are used for the new
timeseries columns
values: Also required if you utilize the `long` storage the
values column name is use for populating new frame values
freq: The offset string or object representing a target conversion
rs_kwargs: A dictonary of keyword arguments based on the
``pandas.DataFrame.resample`` method
verbose: If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values else use the actual values set
in the model.
coerce_float: Attempt to convert values to non-string, non-numeric
objects (like decimal.Decimal) to floating point.
"""
assert index is not None, 'You must supply an index field'
assert storage in ('wide', 'long'), 'storage must be wide or long'
if rs_kwargs is None:
rs_kwargs = {}
if storage == 'wide':
df = self.to_dataframe(fieldnames, verbose=verbose, index=index,
coerce_float=coerce_float, datetime_index=True)
else:
df = self.to_dataframe(fieldnames, verbose=verbose,
coerce_float=coerce_float, datetime_index=True)
assert values is not None, 'You must specify a values field'
assert pivot_columns is not None, 'You must specify pivot_columns'
if isinstance(pivot_columns, (tuple, list)):
df['combined_keys'] = ''
for c in pivot_columns:
df['combined_keys'] += df[c].str.upper() + '.'
df['combined_keys'] += values.lower()
df = df.pivot(index=index,
columns='combined_keys',
values=values)
else:
df = df.pivot(index=index,
columns=pivot_columns,
values=values)
if freq is not None:
df = df.resample(freq, **rs_kwargs)
return df |
def _create_activity2(self, parent, name, activity_type=ActivityType.TASK):
"""Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided
"""
# WIM1: activity_class, WIM2: activity_type
if self.match_app_version(label='wim', version='<2.0.0', default=True):
raise APIError('This method is only compatible with versions of KE-chain where the internal `wim` module '
'has a version >=2.0.0. Use the `Client.create_activity()` method.')
if activity_type and activity_type not in ActivityType.values():
raise IllegalArgumentError("Please provide accepted activity_type (provided:{} accepted:{})".
format(activity_type, ActivityType.values()))
if isinstance(parent, (Activity, Activity2)):
parent = parent.id
elif is_uuid(parent):
parent = parent
else:
raise IllegalArgumentError("Please provide either an activity object or a UUID")
data = {
"name": name,
"parent_id": parent,
"activity_type": activity_type
}
response = self._request('POST', self._build_url('activities'), data=data,
params=API_EXTRA_PARAMS['activities'])
if response.status_code != requests.codes.created: # pragma: no cover
raise APIError("Could not create activity")
data = response.json()
return Activity2(data['results'][0], client=self) | Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided | Below is the the instruction that describes the task:
### Input:
Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided
### Response:
def _create_activity2(self, parent, name, activity_type=ActivityType.TASK):
"""Create a new activity.
.. important::
This function creates activities for KE-chain versions later than 2.9.0-135
In effect where the module 'wim' has version '>=2.0.0'.
The version of 'wim' in KE-chain can be found in the property :attr:`Client.app_versions`
In WIM2 the type of the activity is called activity_type
:param parent: parent under which to create the activity
:type parent: basestring or :class:`models.Activity2`
:param name: new activity name
:type name: basestring
:param activity_type: type of activity: TASK (default) or PROCESS
:type activity_type: basestring
:return: the created :class:`models.Activity2`
:raises APIError: When the object could not be created
:raises IllegalArgumentError: When an incorrect activitytype or parent is provided
"""
# WIM1: activity_class, WIM2: activity_type
if self.match_app_version(label='wim', version='<2.0.0', default=True):
raise APIError('This method is only compatible with versions of KE-chain where the internal `wim` module '
'has a version >=2.0.0. Use the `Client.create_activity()` method.')
if activity_type and activity_type not in ActivityType.values():
raise IllegalArgumentError("Please provide accepted activity_type (provided:{} accepted:{})".
format(activity_type, ActivityType.values()))
if isinstance(parent, (Activity, Activity2)):
parent = parent.id
elif is_uuid(parent):
parent = parent
else:
raise IllegalArgumentError("Please provide either an activity object or a UUID")
data = {
"name": name,
"parent_id": parent,
"activity_type": activity_type
}
response = self._request('POST', self._build_url('activities'), data=data,
params=API_EXTRA_PARAMS['activities'])
if response.status_code != requests.codes.created: # pragma: no cover
raise APIError("Could not create activity")
data = response.json()
return Activity2(data['results'][0], client=self) |
def computePairwisePreferences(self, profile):
"""
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and
cand2, with number of voters who prefer cand1 to cand2.
:ivar Profile profile: A Profile object that represents an election profile.
"""
cands = profile.candMap.keys()
# Initialize the two-dimensional dictionary that will hold our pairwise preferences.
pairwisePreferences = dict()
for cand in cands:
pairwisePreferences[cand] = dict()
for cand1 in cands:
for cand2 in cands:
if cand1 != cand2:
pairwisePreferences[cand1][cand2] = 0
for preference in profile.preferences:
wmgMap = preference.wmgMap
for cand1, cand2 in itertools.combinations(cands, 2):
# If either candidate was unranked, we assume that they are lower ranked than all
# ranked candidates.
if cand1 not in wmgMap.keys():
if cand2 in wmgMap.keys():
pairwisePreferences[cand2][cand1] += 1 * preference.count
elif cand2 not in wmgMap.keys():
if cand1 in wmgMap.keys():
pairwisePreferences[cand1][cand2] += 1 * preference.count
elif wmgMap[cand1][cand2] == 1:
pairwisePreferences[cand1][cand2] += 1 * preference.count
elif wmgMap[cand1][cand2] == -1:
pairwisePreferences[cand2][cand1] += 1 * preference.count
return pairwisePreferences | Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and
cand2, with number of voters who prefer cand1 to cand2.
:ivar Profile profile: A Profile object that represents an election profile. | Below is the the instruction that describes the task:
### Input:
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and
cand2, with number of voters who prefer cand1 to cand2.
:ivar Profile profile: A Profile object that represents an election profile.
### Response:
def computePairwisePreferences(self, profile):
"""
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and
cand2, with number of voters who prefer cand1 to cand2.
:ivar Profile profile: A Profile object that represents an election profile.
"""
cands = profile.candMap.keys()
# Initialize the two-dimensional dictionary that will hold our pairwise preferences.
pairwisePreferences = dict()
for cand in cands:
pairwisePreferences[cand] = dict()
for cand1 in cands:
for cand2 in cands:
if cand1 != cand2:
pairwisePreferences[cand1][cand2] = 0
for preference in profile.preferences:
wmgMap = preference.wmgMap
for cand1, cand2 in itertools.combinations(cands, 2):
# If either candidate was unranked, we assume that they are lower ranked than all
# ranked candidates.
if cand1 not in wmgMap.keys():
if cand2 in wmgMap.keys():
pairwisePreferences[cand2][cand1] += 1 * preference.count
elif cand2 not in wmgMap.keys():
if cand1 in wmgMap.keys():
pairwisePreferences[cand1][cand2] += 1 * preference.count
elif wmgMap[cand1][cand2] == 1:
pairwisePreferences[cand1][cand2] += 1 * preference.count
elif wmgMap[cand1][cand2] == -1:
pairwisePreferences[cand2][cand1] += 1 * preference.count
return pairwisePreferences |
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
TSKPartitionDirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return TSKPartitionDirectory(self._file_system, self.path_spec) | Retrieves a directory.
Returns:
TSKPartitionDirectory: a directory or None if not available. | Below is the the instruction that describes the task:
### Input:
Retrieves a directory.
Returns:
TSKPartitionDirectory: a directory or None if not available.
### Response:
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
TSKPartitionDirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return TSKPartitionDirectory(self._file_system, self.path_spec) |
def write_file(filename, text):
"""Write text to a file."""
logging.debug(_('Writing file: %s'), filename)
try:
with open(filename, 'w') as writable:
writable.write(text)
except (PermissionError, NotADirectoryError):
logging.error(_('Error writing file: %s'), filename)
return False
return True | Write text to a file. | Below is the the instruction that describes the task:
### Input:
Write text to a file.
### Response:
def write_file(filename, text):
"""Write text to a file."""
logging.debug(_('Writing file: %s'), filename)
try:
with open(filename, 'w') as writable:
writable.write(text)
except (PermissionError, NotADirectoryError):
logging.error(_('Error writing file: %s'), filename)
return False
return True |
def _getuie(self):
"""Return data as unsigned interleaved exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
"""
try:
value, newpos = self._readuie(0)
if value is None or newpos != self.len:
raise ReadError
except ReadError:
raise InterpretError("Bitstring is not a single interleaved exponential-Golomb code.")
return value | Return data as unsigned interleaved exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code. | Below is the the instruction that describes the task:
### Input:
Return data as unsigned interleaved exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
### Response:
def _getuie(self):
"""Return data as unsigned interleaved exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
"""
try:
value, newpos = self._readuie(0)
if value is None or newpos != self.len:
raise ReadError
except ReadError:
raise InterpretError("Bitstring is not a single interleaved exponential-Golomb code.")
return value |
def chained_get(self, *keys):
"""
This function allows traversals over several keys to
be performed by passing a list of keys::
d.chained_get(key1,key2,key3) = d[key1][key2][key3]
"""
existing = self
for i in range(0, len(keys)):
if keys[i] in existing:
existing = existing[keys[i]]
else:
return None
return existing | This function allows traversals over several keys to
be performed by passing a list of keys::
d.chained_get(key1,key2,key3) = d[key1][key2][key3] | Below is the the instruction that describes the task:
### Input:
This function allows traversals over several keys to
be performed by passing a list of keys::
d.chained_get(key1,key2,key3) = d[key1][key2][key3]
### Response:
def chained_get(self, *keys):
"""
This function allows traversals over several keys to
be performed by passing a list of keys::
d.chained_get(key1,key2,key3) = d[key1][key2][key3]
"""
existing = self
for i in range(0, len(keys)):
if keys[i] in existing:
existing = existing[keys[i]]
else:
return None
return existing |
def _loads(self, response):
""" Parse the BSER packet """
return bser.loads(
response,
True,
value_encoding=encoding.get_local_encoding(),
value_errors=encoding.default_local_errors,
) | Parse the BSER packet | Below is the the instruction that describes the task:
### Input:
Parse the BSER packet
### Response:
def _loads(self, response):
""" Parse the BSER packet """
return bser.loads(
response,
True,
value_encoding=encoding.get_local_encoding(),
value_errors=encoding.default_local_errors,
) |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
call_sid=self._solution['call_sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['conference_sid'],
call_sid=self._solution['call_sid'],
)
return self._context |
def redirect_profile(request):
'''
The default destination from logging in, redirect to the actual profile URL
'''
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('wafer_user_profile',
args=(request.user.username,)))
else:
return redirect_to_login(next=reverse(redirect_profile)) | The default destination from logging in, redirect to the actual profile URL | Below is the the instruction that describes the task:
### Input:
The default destination from logging in, redirect to the actual profile URL
### Response:
def redirect_profile(request):
'''
The default destination from logging in, redirect to the actual profile URL
'''
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('wafer_user_profile',
args=(request.user.username,)))
else:
return redirect_to_login(next=reverse(redirect_profile)) |
def render_target(self):
"""Texture: The current render target, or None if using the default render target."""
render_target = lib.SDL_GetRenderTarget(self._ptr)
if render_target == ffi.NULL:
return None
else:
return Texture._from_ptr(render_target) | Texture: The current render target, or None if using the default render target. | Below is the the instruction that describes the task:
### Input:
Texture: The current render target, or None if using the default render target.
### Response:
def render_target(self):
"""Texture: The current render target, or None if using the default render target."""
render_target = lib.SDL_GetRenderTarget(self._ptr)
if render_target == ffi.NULL:
return None
else:
return Texture._from_ptr(render_target) |
def gps2dt(gps_week, gps_ms):
"""Convert GPS week and ms to a datetime
"""
gps_epoch = datetime(1980,1,6,0,0,0)
gps_week_s = timedelta(seconds=gps_week*7*24*60*60)
gps_ms_s = timedelta(milliseconds=gps_ms)
return gps_epoch + gps_week_s + gps_ms_s | Convert GPS week and ms to a datetime | Below is the the instruction that describes the task:
### Input:
Convert GPS week and ms to a datetime
### Response:
def gps2dt(gps_week, gps_ms):
"""Convert GPS week and ms to a datetime
"""
gps_epoch = datetime(1980,1,6,0,0,0)
gps_week_s = timedelta(seconds=gps_week*7*24*60*60)
gps_ms_s = timedelta(milliseconds=gps_ms)
return gps_epoch + gps_week_s + gps_ms_s |
def execute(self):
"""
params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
}
"""
if 'type' not in self.params:
raise MMException("Please include the type of log, 'user' or 'apex'")
if 'debug_categories' not in self.params:
raise MMException("Please include debug categories in dictionary format: e.g.: {'ApexCode':'DEBUG', 'Visualforce':'INFO'}")
request = {}
if self.params['type'] == 'user':
request['ScopeId'] = None
request['TracedEntityId'] = self.params.get('user_id', config.sfdc_client.user_id)
elif self.params['type'] == 'apex':
#request['ScopeId'] = 'user'
request['ScopeId'] = config.sfdc_client.user_id
request['TracedEntityId'] = self.params['apex_id']
for c in self.params['debug_categories']:
if 'category' in c:
request[c['category']] = c['level']
else:
request[c] = self.params['debug_categories'][c]
request['ExpirationDate'] = util.get_iso_8601_timestamp(int(float(self.params.get('expiration', 30))))
config.logger.debug(self.params['debug_categories'])
config.logger.debug("Log creation reuqest--->")
config.logger.debug(request)
create_result = config.sfdc_client.create_trace_flag(request)
config.logger.debug("Log creation response--->")
config.logger.debug(create_result)
if type(create_result) is list:
create_result = create_result[0]
if type(create_result) is not str and type(create_result) is not unicode:
return json.dumps(create_result)
else:
return create_result | params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
} | Below is the the instruction that describes the task:
### Input:
params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
}
### Response:
def execute(self):
"""
params = {
"ApexCode" : "None",
"ApexProfiling" : "01pd0000001yXtYAAU",
"Callout" : True,
"Database" : 1,
"ExpirationDate" : 3,
"ScopeId" : "",
"System" : "",
"TracedEntityId" : "",
"Validation" : "",
"Visualforce" : "",
"Workflow" : ""
}
"""
if 'type' not in self.params:
raise MMException("Please include the type of log, 'user' or 'apex'")
if 'debug_categories' not in self.params:
raise MMException("Please include debug categories in dictionary format: e.g.: {'ApexCode':'DEBUG', 'Visualforce':'INFO'}")
request = {}
if self.params['type'] == 'user':
request['ScopeId'] = None
request['TracedEntityId'] = self.params.get('user_id', config.sfdc_client.user_id)
elif self.params['type'] == 'apex':
#request['ScopeId'] = 'user'
request['ScopeId'] = config.sfdc_client.user_id
request['TracedEntityId'] = self.params['apex_id']
for c in self.params['debug_categories']:
if 'category' in c:
request[c['category']] = c['level']
else:
request[c] = self.params['debug_categories'][c]
request['ExpirationDate'] = util.get_iso_8601_timestamp(int(float(self.params.get('expiration', 30))))
config.logger.debug(self.params['debug_categories'])
config.logger.debug("Log creation reuqest--->")
config.logger.debug(request)
create_result = config.sfdc_client.create_trace_flag(request)
config.logger.debug("Log creation response--->")
config.logger.debug(create_result)
if type(create_result) is list:
create_result = create_result[0]
if type(create_result) is not str and type(create_result) is not unicode:
return json.dumps(create_result)
else:
return create_result |
def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None):
"""DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if reclassify_id is not None:
query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int')
self._send(http_method='DELETE',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters) | DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification. | Below is the the instruction that describes the task:
### Input:
DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
### Response:
def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None):
"""DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if reclassify_id is not None:
query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int')
self._send(http_method='DELETE',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters) |
def is_transaction_signer_authorized(self, transactions, state_root,
from_state):
""" Check the transaction signing key against the allowed transactor
permissions. The roles being checked are the following, from first
to last:
"transactor.transaction_signer.<TP_Name>"
"transactor.transaction_signer"
"transactor"
"default"
The first role that is set will be the one used to enforce if the
transaction signer is allowed.
Args:
transactions (List of Transactions): The transactions that are
being verified.
state_root(string): The state root of the previous block. If
this is None, the current state root hash will be
retrieved.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
"""
role = None
if role is None:
role = self._cache.get_role("transactor.transaction_signer",
state_root, from_state)
if role is None:
role = self._cache.get_role("transactor", state_root, from_state)
if role is None:
policy_name = "default"
else:
policy_name = role.policy_name
policy = self._cache.get_policy(policy_name, state_root, from_state)
family_roles = {}
for transaction in transactions:
header = TransactionHeader()
header.ParseFromString(transaction.header)
family_policy = None
if header.family_name not in family_roles:
role = self._cache.get_role(
"transactor.transaction_signer." + header.family_name,
state_root,
from_state)
if role is not None:
family_policy = self._cache.get_policy(role.policy_name,
state_root,
from_state)
family_roles[header.family_name] = family_policy
else:
family_policy = family_roles[header.family_name]
if family_policy is not None:
if not self._allowed(header.signer_public_key, family_policy):
LOGGER.debug("Transaction Signer: %s is not permitted.",
header.signer_public_key)
return False
else:
if policy is not None:
if not self._allowed(header.signer_public_key, policy):
LOGGER.debug(
"Transaction Signer: %s is not permitted.",
header.signer_public_key)
return False
return True | Check the transaction signing key against the allowed transactor
permissions. The roles being checked are the following, from first
to last:
"transactor.transaction_signer.<TP_Name>"
"transactor.transaction_signer"
"transactor"
"default"
The first role that is set will be the one used to enforce if the
transaction signer is allowed.
Args:
transactions (List of Transactions): The transactions that are
being verified.
state_root(string): The state root of the previous block. If
this is None, the current state root hash will be
retrieved.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head. | Below is the the instruction that describes the task:
### Input:
Check the transaction signing key against the allowed transactor
permissions. The roles being checked are the following, from first
to last:
"transactor.transaction_signer.<TP_Name>"
"transactor.transaction_signer"
"transactor"
"default"
The first role that is set will be the one used to enforce if the
transaction signer is allowed.
Args:
transactions (List of Transactions): The transactions that are
being verified.
state_root(string): The state root of the previous block. If
this is None, the current state root hash will be
retrieved.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
### Response:
def is_transaction_signer_authorized(self, transactions, state_root,
from_state):
""" Check the transaction signing key against the allowed transactor
permissions. The roles being checked are the following, from first
to last:
"transactor.transaction_signer.<TP_Name>"
"transactor.transaction_signer"
"transactor"
"default"
The first role that is set will be the one used to enforce if the
transaction signer is allowed.
Args:
transactions (List of Transactions): The transactions that are
being verified.
state_root(string): The state root of the previous block. If
this is None, the current state root hash will be
retrieved.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
"""
role = None
if role is None:
role = self._cache.get_role("transactor.transaction_signer",
state_root, from_state)
if role is None:
role = self._cache.get_role("transactor", state_root, from_state)
if role is None:
policy_name = "default"
else:
policy_name = role.policy_name
policy = self._cache.get_policy(policy_name, state_root, from_state)
family_roles = {}
for transaction in transactions:
header = TransactionHeader()
header.ParseFromString(transaction.header)
family_policy = None
if header.family_name not in family_roles:
role = self._cache.get_role(
"transactor.transaction_signer." + header.family_name,
state_root,
from_state)
if role is not None:
family_policy = self._cache.get_policy(role.policy_name,
state_root,
from_state)
family_roles[header.family_name] = family_policy
else:
family_policy = family_roles[header.family_name]
if family_policy is not None:
if not self._allowed(header.signer_public_key, family_policy):
LOGGER.debug("Transaction Signer: %s is not permitted.",
header.signer_public_key)
return False
else:
if policy is not None:
if not self._allowed(header.signer_public_key, policy):
LOGGER.debug(
"Transaction Signer: %s is not permitted.",
header.signer_public_key)
return False
return True |
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br) | ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu' | Below is the the instruction that describes the task:
### Input:
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
### Response:
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br) |
def camera_position(self, camera_location):
""" Set camera position of all active render windows """
if camera_location is None:
return
if isinstance(camera_location, str):
camera_location = camera_location.lower()
if camera_location == 'xy':
self.view_xy()
elif camera_location == 'xz':
self.view_xz()
elif camera_location == 'yz':
self.view_yz()
elif camera_location == 'yx':
self.view_xy(True)
elif camera_location == 'zx':
self.view_xz(True)
elif camera_location == 'zy':
self.view_yz(True)
return
if isinstance(camera_location[0], (int, float)):
return self.view_vector(camera_location)
# everything is set explicitly
self.camera.SetPosition(camera_location[0])
self.camera.SetFocalPoint(camera_location[1])
self.camera.SetViewUp(camera_location[2])
# reset clipping range
self.ResetCameraClippingRange()
self.camera_set = True | Set camera position of all active render windows | Below is the the instruction that describes the task:
### Input:
Set camera position of all active render windows
### Response:
def camera_position(self, camera_location):
""" Set camera position of all active render windows """
if camera_location is None:
return
if isinstance(camera_location, str):
camera_location = camera_location.lower()
if camera_location == 'xy':
self.view_xy()
elif camera_location == 'xz':
self.view_xz()
elif camera_location == 'yz':
self.view_yz()
elif camera_location == 'yx':
self.view_xy(True)
elif camera_location == 'zx':
self.view_xz(True)
elif camera_location == 'zy':
self.view_yz(True)
return
if isinstance(camera_location[0], (int, float)):
return self.view_vector(camera_location)
# everything is set explicitly
self.camera.SetPosition(camera_location[0])
self.camera.SetFocalPoint(camera_location[1])
self.camera.SetViewUp(camera_location[2])
# reset clipping range
self.ResetCameraClippingRange()
self.camera_set = True |
def _abbrev_program(program: Program, max_len=10):
"""Create an abbreviated string representation of a Program.
This will join all instructions onto a single line joined by '; '. If the number of
instructions exceeds ``max_len``, some will be excluded from the string representation.
"""
program_lines = program.out().splitlines()
if max_len is not None and len(program_lines) > max_len:
first_n = max_len // 2
last_n = max_len - first_n
excluded = len(program_lines) - max_len
program_lines = (program_lines[:first_n] + [f'... {excluded} instrs not shown ...']
+ program_lines[-last_n:])
return '; '.join(program_lines) | Create an abbreviated string representation of a Program.
This will join all instructions onto a single line joined by '; '. If the number of
instructions exceeds ``max_len``, some will be excluded from the string representation. | Below is the the instruction that describes the task:
### Input:
Create an abbreviated string representation of a Program.
This will join all instructions onto a single line joined by '; '. If the number of
instructions exceeds ``max_len``, some will be excluded from the string representation.
### Response:
def _abbrev_program(program: Program, max_len=10):
"""Create an abbreviated string representation of a Program.
This will join all instructions onto a single line joined by '; '. If the number of
instructions exceeds ``max_len``, some will be excluded from the string representation.
"""
program_lines = program.out().splitlines()
if max_len is not None and len(program_lines) > max_len:
first_n = max_len // 2
last_n = max_len - first_n
excluded = len(program_lines) - max_len
program_lines = (program_lines[:first_n] + [f'... {excluded} instrs not shown ...']
+ program_lines[-last_n:])
return '; '.join(program_lines) |
def sig(self, name, dtype=BIT, clk=None, syncRst=None, defVal=None):
"""
Create new signal in this context
:param clk: clk signal, if specified signal is synthesized
as SyncSignal
:param syncRst: synchronous reset signal
"""
if isinstance(defVal, RtlSignal):
assert defVal._const, \
"Initial value of register has to be constant"
_defVal = defVal._auto_cast(dtype)
elif isinstance(defVal, Value):
_defVal = defVal._auto_cast(dtype)
elif isinstance(defVal, InterfaceBase):
_defVal = defVal._sig
else:
_defVal = dtype.fromPy(defVal)
if clk is not None:
s = RtlSyncSignal(self, name, dtype, _defVal)
if syncRst is not None and defVal is None:
raise SigLvlConfErr(
"Probably forgotten default value on sync signal %s", name)
if syncRst is not None:
r = If(syncRst._isOn(),
RtlSignal.__call__(s, _defVal)
).Else(
RtlSignal.__call__(s, s.next)
)
else:
r = [RtlSignal.__call__(s, s.next)]
If(clk._onRisingEdge(),
r
)
else:
if syncRst:
raise SigLvlConfErr(
"Signal %s has reset but has no clk" % name)
s = RtlSignal(self, name, dtype, defVal=_defVal)
self.signals.add(s)
return s | Create new signal in this context
:param clk: clk signal, if specified signal is synthesized
as SyncSignal
:param syncRst: synchronous reset signal | Below is the the instruction that describes the task:
### Input:
Create new signal in this context
:param clk: clk signal, if specified signal is synthesized
as SyncSignal
:param syncRst: synchronous reset signal
### Response:
def sig(self, name, dtype=BIT, clk=None, syncRst=None, defVal=None):
"""
Create new signal in this context
:param clk: clk signal, if specified signal is synthesized
as SyncSignal
:param syncRst: synchronous reset signal
"""
if isinstance(defVal, RtlSignal):
assert defVal._const, \
"Initial value of register has to be constant"
_defVal = defVal._auto_cast(dtype)
elif isinstance(defVal, Value):
_defVal = defVal._auto_cast(dtype)
elif isinstance(defVal, InterfaceBase):
_defVal = defVal._sig
else:
_defVal = dtype.fromPy(defVal)
if clk is not None:
s = RtlSyncSignal(self, name, dtype, _defVal)
if syncRst is not None and defVal is None:
raise SigLvlConfErr(
"Probably forgotten default value on sync signal %s", name)
if syncRst is not None:
r = If(syncRst._isOn(),
RtlSignal.__call__(s, _defVal)
).Else(
RtlSignal.__call__(s, s.next)
)
else:
r = [RtlSignal.__call__(s, s.next)]
If(clk._onRisingEdge(),
r
)
else:
if syncRst:
raise SigLvlConfErr(
"Signal %s has reset but has no clk" % name)
s = RtlSignal(self, name, dtype, defVal=_defVal)
self.signals.add(s)
return s |
def kraken_request(self, method, endpoint, **kwargs):
"""Make a request to one of the kraken api endpoints.
Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`.
Also the client id from :data:`CLIENT_ID` will be set.
The url will be constructed of :data:`TWITCH_KRAKENURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the kraken api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
"""
url = TWITCH_KRAKENURL + endpoint
headers = kwargs.setdefault('headers', {})
headers['Accept'] = TWITCH_HEADER_ACCEPT
headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits
return self.request(method, url, **kwargs) | Make a request to one of the kraken api endpoints.
Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`.
Also the client id from :data:`CLIENT_ID` will be set.
The url will be constructed of :data:`TWITCH_KRAKENURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the kraken api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError` | Below is the the instruction that describes the task:
### Input:
Make a request to one of the kraken api endpoints.
Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`.
Also the client id from :data:`CLIENT_ID` will be set.
The url will be constructed of :data:`TWITCH_KRAKENURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the kraken api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
### Response:
def kraken_request(self, method, endpoint, **kwargs):
"""Make a request to one of the kraken api endpoints.
Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`.
Also the client id from :data:`CLIENT_ID` will be set.
The url will be constructed of :data:`TWITCH_KRAKENURL` and
the given endpoint.
:param method: the request method
:type method: :class:`str`
:param endpoint: the endpoint of the kraken api.
The base url is automatically provided.
:type endpoint: :class:`str`
:param kwargs: keyword arguments of :meth:`requests.Session.request`
:returns: a resonse object
:rtype: :class:`requests.Response`
:raises: :class:`requests.HTTPError`
"""
url = TWITCH_KRAKENURL + endpoint
headers = kwargs.setdefault('headers', {})
headers['Accept'] = TWITCH_HEADER_ACCEPT
headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits
return self.request(method, url, **kwargs) |
def sign(jar_file, cert_file, key_file, key_alias,
extra_certs=None, digest="SHA-256", output=None):
"""
Signs the jar (almost) identically to jarsigner.
:exception ManifestNotFoundError, CannotFindKeyTypeError
:return None
"""
mf = Manifest()
mf.load_from_jar(jar_file)
mf.add_jar_entries(jar_file, digest)
# create a signature manifest, and make it match the line separator
# style of the manifest it'll be digesting.
sf = SignatureManifest(linesep=mf.linesep)
sf_digest_algorithm = "SHA-256" if digest is None else digest
sf.digest_manifest(mf, sf_digest_algorithm)
sig_digest_algorithm = digest # No point to make it different
sig_block_extension = private_key_type(key_file)
sigdata = sf.get_signature(cert_file, key_file,
extra_certs, sig_digest_algorithm)
# We might just add new entries to the original JAR, but jarsigner puts
# all META-INF/ to the beginning of the archive. Let's do the same.
with NamedTemporaryFile() as new_jar_file:
new_jar = ZipFile(new_jar_file, "w", ZIP_DEFLATED)
new_jar.writestr("META-INF/MANIFEST.MF", mf.get_data())
new_jar.writestr("META-INF/%s.SF" % key_alias, sf.get_data())
new_jar.writestr("META-INF/%s.%s" % (key_alias, sig_block_extension),
sigdata)
jar = ZipFile(jar_file, "a")
for entry in jar.namelist():
# TODO: In Py2, namelist() can be of type unicode
if not entry.upper() == "META-INF/MANIFEST.MF":
new_jar.writestr(entry, jar.read(entry))
new_jar.close()
new_jar_file.flush()
copyfile(new_jar_file.name, jar_file if output is None else output) | Signs the jar (almost) identically to jarsigner.
:exception ManifestNotFoundError, CannotFindKeyTypeError
:return None | Below is the the instruction that describes the task:
### Input:
Signs the jar (almost) identically to jarsigner.
:exception ManifestNotFoundError, CannotFindKeyTypeError
:return None
### Response:
def sign(jar_file, cert_file, key_file, key_alias,
extra_certs=None, digest="SHA-256", output=None):
"""
Signs the jar (almost) identically to jarsigner.
:exception ManifestNotFoundError, CannotFindKeyTypeError
:return None
"""
mf = Manifest()
mf.load_from_jar(jar_file)
mf.add_jar_entries(jar_file, digest)
# create a signature manifest, and make it match the line separator
# style of the manifest it'll be digesting.
sf = SignatureManifest(linesep=mf.linesep)
sf_digest_algorithm = "SHA-256" if digest is None else digest
sf.digest_manifest(mf, sf_digest_algorithm)
sig_digest_algorithm = digest # No point to make it different
sig_block_extension = private_key_type(key_file)
sigdata = sf.get_signature(cert_file, key_file,
extra_certs, sig_digest_algorithm)
# We might just add new entries to the original JAR, but jarsigner puts
# all META-INF/ to the beginning of the archive. Let's do the same.
with NamedTemporaryFile() as new_jar_file:
new_jar = ZipFile(new_jar_file, "w", ZIP_DEFLATED)
new_jar.writestr("META-INF/MANIFEST.MF", mf.get_data())
new_jar.writestr("META-INF/%s.SF" % key_alias, sf.get_data())
new_jar.writestr("META-INF/%s.%s" % (key_alias, sig_block_extension),
sigdata)
jar = ZipFile(jar_file, "a")
for entry in jar.namelist():
# TODO: In Py2, namelist() can be of type unicode
if not entry.upper() == "META-INF/MANIFEST.MF":
new_jar.writestr(entry, jar.read(entry))
new_jar.close()
new_jar_file.flush()
copyfile(new_jar_file.name, jar_file if output is None else output) |
def _req_files_edit(self, fid, file_name=None, is_mark=0):
"""Edit a file or directory"""
url = self.web_api_url + '/edit'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.') | Edit a file or directory | Below is the the instruction that describes the task:
### Input:
Edit a file or directory
### Response:
def _req_files_edit(self, fid, file_name=None, is_mark=0):
"""Edit a file or directory"""
url = self.web_api_url + '/edit'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.') |
def is_isolated_list_abundance(graph: BELGraph, node: BaseEntity, cls: Type[ListAbundance] = ListAbundance) -> bool:
"""Return if the node is a list abundance but has no qualified edges."""
return (
isinstance(node, cls) and
0 == graph.in_degree(node) and
all(
data[RELATION] == HAS_COMPONENT
for _, __, data in graph.out_edges(node, data=True)
)
) | Return if the node is a list abundance but has no qualified edges. | Below is the the instruction that describes the task:
### Input:
Return if the node is a list abundance but has no qualified edges.
### Response:
def is_isolated_list_abundance(graph: BELGraph, node: BaseEntity, cls: Type[ListAbundance] = ListAbundance) -> bool:
"""Return if the node is a list abundance but has no qualified edges."""
return (
isinstance(node, cls) and
0 == graph.in_degree(node) and
all(
data[RELATION] == HAS_COMPONENT
for _, __, data in graph.out_edges(node, data=True)
)
) |
def patch_constructor(model):
"""
Monkey patches the original model to rewrite fields names in __init__
"""
old_init = model.__init__
def new_init(self, *args, **kwargs):
self._mt_init = True
populate_translation_fields(self.__class__, kwargs)
for key, val in list(kwargs.items()):
new_key = rewrite_lookup_key(model, key)
# Old key is intentionally left in case old_init wants to play with it
kwargs.setdefault(new_key, val)
old_init(self, *args, **kwargs)
model.__init__ = new_init | Monkey patches the original model to rewrite fields names in __init__ | Below is the the instruction that describes the task:
### Input:
Monkey patches the original model to rewrite fields names in __init__
### Response:
def patch_constructor(model):
"""
Monkey patches the original model to rewrite fields names in __init__
"""
old_init = model.__init__
def new_init(self, *args, **kwargs):
self._mt_init = True
populate_translation_fields(self.__class__, kwargs)
for key, val in list(kwargs.items()):
new_key = rewrite_lookup_key(model, key)
# Old key is intentionally left in case old_init wants to play with it
kwargs.setdefault(new_key, val)
old_init(self, *args, **kwargs)
model.__init__ = new_init |
def search(self):
"""
Execute solr search query
"""
params = self.solr_params()
logging.info("PARAMS=" + str(params))
results = self.solr.search(**params)
logging.info("Docs found: {}".format(results.hits))
return self._process_search_results(results) | Execute solr search query | Below is the the instruction that describes the task:
### Input:
Execute solr search query
### Response:
def search(self):
"""
Execute solr search query
"""
params = self.solr_params()
logging.info("PARAMS=" + str(params))
results = self.solr.search(**params)
logging.info("Docs found: {}".format(results.hits))
return self._process_search_results(results) |
def copyresource( resource, filename, destdir ):
"""
Copy a resource file to a destination
"""
data = pkgutil.get_data(resource, os.path.join('resources',filename) )
#log.info( "Installing %s", os.path.join(destdir,filename) )
with open( os.path.join(destdir,filename), 'wb' ) as fp:
fp.write(data) | Copy a resource file to a destination | Below is the the instruction that describes the task:
### Input:
Copy a resource file to a destination
### Response:
def copyresource( resource, filename, destdir ):
"""
Copy a resource file to a destination
"""
data = pkgutil.get_data(resource, os.path.join('resources',filename) )
#log.info( "Installing %s", os.path.join(destdir,filename) )
with open( os.path.join(destdir,filename), 'wb' ) as fp:
fp.write(data) |
def imagesc(data, title=None, fig='current', ax=None):
'''Simple alias for a Matlab-like imshow function.'''
ax = _get_axis(fig, ax, False)
ax.imshow(data, interpolation='nearest', aspect='auto')
if title:
ax.set_title(title)
return plt.show | Simple alias for a Matlab-like imshow function. | Below is the the instruction that describes the task:
### Input:
Simple alias for a Matlab-like imshow function.
### Response:
def imagesc(data, title=None, fig='current', ax=None):
'''Simple alias for a Matlab-like imshow function.'''
ax = _get_axis(fig, ax, False)
ax.imshow(data, interpolation='nearest', aspect='auto')
if title:
ax.set_title(title)
return plt.show |
def connect(self, interface, event, handler):
"""Connect to a DBus signal. Returns subscription id (int)."""
object_path = self.object_path
return self.bus.connect(interface, event, object_path, handler) | Connect to a DBus signal. Returns subscription id (int). | Below is the the instruction that describes the task:
### Input:
Connect to a DBus signal. Returns subscription id (int).
### Response:
def connect(self, interface, event, handler):
"""Connect to a DBus signal. Returns subscription id (int)."""
object_path = self.object_path
return self.bus.connect(interface, event, object_path, handler) |
def fill(self, source, size=-1):
"""Fill the buffer with bytes from source until one of these
conditions is met:
* size bytes have been read from source (if size >= 0);
* chunk_size bytes have been read from source;
* no more bytes can be read from source;
Returns the number of new bytes added to the buffer.
Note: all previously-read bytes in the buffer are removed.
Parameters
----------
source: a file-like object, or iterable/list that contains bytes
The source of bytes to fill the buffer with. If this argument has
the `read` attribute, it's assumed to be a file-like object and
`read` is called to get the bytes; otherwise it's assumed to be an
iterable or list that contains bytes, and a for loop is used to get
the bytes.
size: int, optional
The number of bytes to try to read from source. If not supplied,
negative, or larger than the buffer's chunk_size, then chunk_size
bytes are read. Note that if source is an iterable or list, then
it's possible that more than size bytes will be read if iterating
over source produces more than one byte at a time.
Returns
-------
int, the number of new bytes added to the buffer.
"""
size = size if size >= 0 else self._chunk_size
size = min(size, self._chunk_size)
if self._pos != 0:
self._bytes = self._bytes[self._pos:]
self._pos = 0
if hasattr(source, 'read'):
new_bytes = source.read(size)
else:
new_bytes = b''
for more_bytes in source:
new_bytes += more_bytes
if len(new_bytes) >= size:
break
self._bytes += new_bytes
return len(new_bytes) | Fill the buffer with bytes from source until one of these
conditions is met:
* size bytes have been read from source (if size >= 0);
* chunk_size bytes have been read from source;
* no more bytes can be read from source;
Returns the number of new bytes added to the buffer.
Note: all previously-read bytes in the buffer are removed.
Parameters
----------
source: a file-like object, or iterable/list that contains bytes
The source of bytes to fill the buffer with. If this argument has
the `read` attribute, it's assumed to be a file-like object and
`read` is called to get the bytes; otherwise it's assumed to be an
iterable or list that contains bytes, and a for loop is used to get
the bytes.
size: int, optional
The number of bytes to try to read from source. If not supplied,
negative, or larger than the buffer's chunk_size, then chunk_size
bytes are read. Note that if source is an iterable or list, then
it's possible that more than size bytes will be read if iterating
over source produces more than one byte at a time.
Returns
-------
int, the number of new bytes added to the buffer. | Below is the the instruction that describes the task:
### Input:
Fill the buffer with bytes from source until one of these
conditions is met:
* size bytes have been read from source (if size >= 0);
* chunk_size bytes have been read from source;
* no more bytes can be read from source;
Returns the number of new bytes added to the buffer.
Note: all previously-read bytes in the buffer are removed.
Parameters
----------
source: a file-like object, or iterable/list that contains bytes
The source of bytes to fill the buffer with. If this argument has
the `read` attribute, it's assumed to be a file-like object and
`read` is called to get the bytes; otherwise it's assumed to be an
iterable or list that contains bytes, and a for loop is used to get
the bytes.
size: int, optional
The number of bytes to try to read from source. If not supplied,
negative, or larger than the buffer's chunk_size, then chunk_size
bytes are read. Note that if source is an iterable or list, then
it's possible that more than size bytes will be read if iterating
over source produces more than one byte at a time.
Returns
-------
int, the number of new bytes added to the buffer.
### Response:
def fill(self, source, size=-1):
"""Fill the buffer with bytes from source until one of these
conditions is met:
* size bytes have been read from source (if size >= 0);
* chunk_size bytes have been read from source;
* no more bytes can be read from source;
Returns the number of new bytes added to the buffer.
Note: all previously-read bytes in the buffer are removed.
Parameters
----------
source: a file-like object, or iterable/list that contains bytes
The source of bytes to fill the buffer with. If this argument has
the `read` attribute, it's assumed to be a file-like object and
`read` is called to get the bytes; otherwise it's assumed to be an
iterable or list that contains bytes, and a for loop is used to get
the bytes.
size: int, optional
The number of bytes to try to read from source. If not supplied,
negative, or larger than the buffer's chunk_size, then chunk_size
bytes are read. Note that if source is an iterable or list, then
it's possible that more than size bytes will be read if iterating
over source produces more than one byte at a time.
Returns
-------
int, the number of new bytes added to the buffer.
"""
size = size if size >= 0 else self._chunk_size
size = min(size, self._chunk_size)
if self._pos != 0:
self._bytes = self._bytes[self._pos:]
self._pos = 0
if hasattr(source, 'read'):
new_bytes = source.read(size)
else:
new_bytes = b''
for more_bytes in source:
new_bytes += more_bytes
if len(new_bytes) >= size:
break
self._bytes += new_bytes
return len(new_bytes) |
def add_input_file(self, filename):
"""
Add filename as a necessary input file for this DAG node.
@param filename: input filename to add
"""
if filename not in self.__input_files:
self.__input_files.append(filename) | Add filename as a necessary input file for this DAG node.
@param filename: input filename to add | Below is the the instruction that describes the task:
### Input:
Add filename as a necessary input file for this DAG node.
@param filename: input filename to add
### Response:
def add_input_file(self, filename):
"""
Add filename as a necessary input file for this DAG node.
@param filename: input filename to add
"""
if filename not in self.__input_files:
self.__input_files.append(filename) |
def getOptionsAndArgs( self ):
'''Returns the tuple ( options, args )
options - a dictionary of option names and values
args - a sequence of args'''
option_values = self._getOptions()
args = self._getArgs()
return option_values, args | Returns the tuple ( options, args )
options - a dictionary of option names and values
args - a sequence of args | Below is the the instruction that describes the task:
### Input:
Returns the tuple ( options, args )
options - a dictionary of option names and values
args - a sequence of args
### Response:
def getOptionsAndArgs( self ):
'''Returns the tuple ( options, args )
options - a dictionary of option names and values
args - a sequence of args'''
option_values = self._getOptions()
args = self._getArgs()
return option_values, args |
def getPrecision(data_type_oid, type_modifier):
"""
Returns the precision for the given Vertica type with consideration of
the type modifier.
For numerics, precision is the total number of digits (in base 10) that can
fit in the type.
For intervals, time and timestamps, precision is the number of digits to the
right of the decimal point in the seconds portion of the time.
The type modifier of -1 is used when the size of a type is unknown. In those
cases we assume the maximum possible size.
"""
if data_type_oid == VerticaType.NUMERIC:
if type_modifier == -1:
return 1024
return ((type_modifier - 4) >> 16) & 0xFFFF
elif data_type_oid in (VerticaType.TIME, VerticaType.TIMETZ,
VerticaType.TIMESTAMP, VerticaType.TIMESTAMPTZ,
VerticaType.INTERVAL, VerticaType.INTERVALYM):
if type_modifier == -1:
return 6
return type_modifier & 0xF
else:
return None | Returns the precision for the given Vertica type with consideration of
the type modifier.
For numerics, precision is the total number of digits (in base 10) that can
fit in the type.
For intervals, time and timestamps, precision is the number of digits to the
right of the decimal point in the seconds portion of the time.
The type modifier of -1 is used when the size of a type is unknown. In those
cases we assume the maximum possible size. | Below is the the instruction that describes the task:
### Input:
Returns the precision for the given Vertica type with consideration of
the type modifier.
For numerics, precision is the total number of digits (in base 10) that can
fit in the type.
For intervals, time and timestamps, precision is the number of digits to the
right of the decimal point in the seconds portion of the time.
The type modifier of -1 is used when the size of a type is unknown. In those
cases we assume the maximum possible size.
### Response:
def getPrecision(data_type_oid, type_modifier):
"""
Returns the precision for the given Vertica type with consideration of
the type modifier.
For numerics, precision is the total number of digits (in base 10) that can
fit in the type.
For intervals, time and timestamps, precision is the number of digits to the
right of the decimal point in the seconds portion of the time.
The type modifier of -1 is used when the size of a type is unknown. In those
cases we assume the maximum possible size.
"""
if data_type_oid == VerticaType.NUMERIC:
if type_modifier == -1:
return 1024
return ((type_modifier - 4) >> 16) & 0xFFFF
elif data_type_oid in (VerticaType.TIME, VerticaType.TIMETZ,
VerticaType.TIMESTAMP, VerticaType.TIMESTAMPTZ,
VerticaType.INTERVAL, VerticaType.INTERVALYM):
if type_modifier == -1:
return 6
return type_modifier & 0xF
else:
return None |
def get_pkg_version_module(packagename, fromlist=None):
"""Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found.
"""
version = import_file(os.path.join(packagename, 'version.py'), name='version')
if fromlist:
return tuple(getattr(version, member) for member in fromlist)
else:
return version | Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found. | Below is the the instruction that describes the task:
### Input:
Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found.
### Response:
def get_pkg_version_module(packagename, fromlist=None):
"""Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found.
"""
version = import_file(os.path.join(packagename, 'version.py'), name='version')
if fromlist:
return tuple(getattr(version, member) for member in fromlist)
else:
return version |
def read(calc_id, username=None):
"""
:param calc_id: a calculation ID
:param username: if given, restrict the search to the user's calculations
:returns: the associated DataStore instance
"""
if isinstance(calc_id, str) or calc_id < 0 and not username:
# get the last calculation in the datastore of the current user
return datastore.read(calc_id)
job = logs.dbcmd('get_job', calc_id, username)
if job:
return datastore.read(job.ds_calc_dir + '.hdf5')
else:
# calc_id can be present in the datastore and not in the database:
# this happens if the calculation was run with `oq run`
return datastore.read(calc_id) | :param calc_id: a calculation ID
:param username: if given, restrict the search to the user's calculations
:returns: the associated DataStore instance | Below is the the instruction that describes the task:
### Input:
:param calc_id: a calculation ID
:param username: if given, restrict the search to the user's calculations
:returns: the associated DataStore instance
### Response:
def read(calc_id, username=None):
"""
:param calc_id: a calculation ID
:param username: if given, restrict the search to the user's calculations
:returns: the associated DataStore instance
"""
if isinstance(calc_id, str) or calc_id < 0 and not username:
# get the last calculation in the datastore of the current user
return datastore.read(calc_id)
job = logs.dbcmd('get_job', calc_id, username)
if job:
return datastore.read(job.ds_calc_dir + '.hdf5')
else:
# calc_id can be present in the datastore and not in the database:
# this happens if the calculation was run with `oq run`
return datastore.read(calc_id) |
def clear(self):
"""Clear the currently set extent."""
self.tool.reset()
self._populate_coordinates()
# Revert to using hazard, exposure and view as basis for analysis
self.hazard_exposure_view_extent.setChecked(True) | Clear the currently set extent. | Below is the the instruction that describes the task:
### Input:
Clear the currently set extent.
### Response:
def clear(self):
"""Clear the currently set extent."""
self.tool.reset()
self._populate_coordinates()
# Revert to using hazard, exposure and view as basis for analysis
self.hazard_exposure_view_extent.setChecked(True) |
def get_asset_spatial_assignment_session_for_repository(self, repository_id, proxy):
"""Gets the session for assigning spatial coverage of an asset for
the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSpatialAssignmentSession) - an
AssetSpatialAssignmentSession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_spatial_assignment() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_spatial_assignment() and
supports_visible_federation() are true.
"""
if not repository_id:
raise NullArgument()
if not self.supports_asset_spatial_assignment() or not self.supports_visible_federation():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed('import error')
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetSpatialAssignmentSession(repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed('attribute error')
return session | Gets the session for assigning spatial coverage of an asset for
the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSpatialAssignmentSession) - an
AssetSpatialAssignmentSession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_spatial_assignment() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_spatial_assignment() and
supports_visible_federation() are true. | Below is the the instruction that describes the task:
### Input:
Gets the session for assigning spatial coverage of an asset for
the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSpatialAssignmentSession) - an
AssetSpatialAssignmentSession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_spatial_assignment() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_spatial_assignment() and
supports_visible_federation() are true.
### Response:
def get_asset_spatial_assignment_session_for_repository(self, repository_id, proxy):
"""Gets the session for assigning spatial coverage of an asset for
the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSpatialAssignmentSession) - an
AssetSpatialAssignmentSession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_spatial_assignment() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_spatial_assignment() and
supports_visible_federation() are true.
"""
if not repository_id:
raise NullArgument()
if not self.supports_asset_spatial_assignment() or not self.supports_visible_federation():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed('import error')
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetSpatialAssignmentSession(repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed('attribute error')
return session |
def load_yaml_file(yaml_file):
""" load yaml file and check file content format
"""
with io.open(yaml_file, 'r', encoding='utf-8') as stream:
yaml_content = yaml.load(stream)
_check_format(yaml_file, yaml_content)
return yaml_content | load yaml file and check file content format | Below is the the instruction that describes the task:
### Input:
load yaml file and check file content format
### Response:
def load_yaml_file(yaml_file):
""" load yaml file and check file content format
"""
with io.open(yaml_file, 'r', encoding='utf-8') as stream:
yaml_content = yaml.load(stream)
_check_format(yaml_file, yaml_content)
return yaml_content |
def unconvert_coord_object(tile):
"""Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate"""
assert isinstance(tile, Tile)
return Coordinate(zoom=tile.z, column=tile.x, row=tile.y) | Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate | Below is the the instruction that describes the task:
### Input:
Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate
### Response:
def unconvert_coord_object(tile):
"""Convert rawr_tiles.tile.Tile -> ModestMaps.Core.Coordinate"""
assert isinstance(tile, Tile)
return Coordinate(zoom=tile.z, column=tile.x, row=tile.y) |
def scoped_format(txt, **objects):
"""Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print scoped_format("hello {foo.name}", foo=Foo())
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`.
"""
pretty = objects.pop("pretty", RecursiveAttribute.format_pretty)
expand = objects.pop("expand", RecursiveAttribute.format_expand)
attr = RecursiveAttribute(objects, read_only=True)
formatter = scoped_formatter(**objects)
return formatter.format(txt, pretty=pretty, expand=expand) | Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print scoped_format("hello {foo.name}", foo=Foo())
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`. | Below is the the instruction that describes the task:
### Input:
Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print scoped_format("hello {foo.name}", foo=Foo())
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`.
### Response:
def scoped_format(txt, **objects):
"""Format a string with respect to a set of objects' attributes.
Example:
>>> Class Foo(object):
>>> def __init__(self):
>>> self.name = "Dave"
>>> print scoped_format("hello {foo.name}", foo=Foo())
hello Dave
Args:
objects (dict): Dict of objects to format with. If a value is a dict,
its values, and any further neted dicts, will also format with dot
notation.
pretty (bool): See `ObjectStringFormatter`.
expand (bool): See `ObjectStringFormatter`.
"""
pretty = objects.pop("pretty", RecursiveAttribute.format_pretty)
expand = objects.pop("expand", RecursiveAttribute.format_expand)
attr = RecursiveAttribute(objects, read_only=True)
formatter = scoped_formatter(**objects)
return formatter.format(txt, pretty=pretty, expand=expand) |
def nodes_to_dict_of_dataframes(grid, nodes, lv_transformer=True):
"""
Creates dictionary of dataframes containing grid
Parameters
----------
grid: ding0.Network
nodes: list of ding0 grid components objects
Nodes of the grid graph
lv_transformer: bool, True
Toggle transformer representation in power flow analysis
Returns:
components: dict of pandas.DataFrame
DataFrames contain components attributes. Dict is keyed by components
type
components_data: dict of pandas.DataFrame
DataFrame containing components time-varying data
"""
generator_instances = [MVStationDing0, GeneratorDing0]
# TODO: MVStationDing0 has a slack generator
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
srid = int(cfg_ding0.get('geo', 'srid'))
load_in_generation_case = cfg_ding0.get('assumptions',
'load_in_generation_case')
generation_in_load_case = cfg_ding0.get('assumptions',
'generation_in_load_case')
Q_factor_load = tan(acos(cos_phi_load))
Q_factor_generation = tan(acos(cos_phi_feedin))
voltage_set_slack = cfg_ding0.get("mv_routing_tech_constraints",
"mv_station_v_level_operation")
kw2mw = 1e-3
# define dictionaries
buses = {'bus_id': [], 'v_nom': [], 'geom': [], 'grid_id': []}
bus_v_mag_set = {'bus_id': [], 'temp_id': [], 'v_mag_pu_set': [],
'grid_id': []}
generator = {'generator_id': [], 'bus': [], 'control': [], 'grid_id': [],
'p_nom': []}
generator_pq_set = {'generator_id': [], 'temp_id': [], 'p_set': [],
'grid_id': [], 'q_set': []}
load = {'load_id': [], 'bus': [], 'grid_id': []}
load_pq_set = {'load_id': [], 'temp_id': [], 'p_set': [],
'grid_id': [], 'q_set': []}
# # TODO: consider other implications of `lv_transformer is True`
# if lv_transformer is True:
# bus_instances.append(Transformer)
# # TODO: only for debugging, remove afterwards
# import csv
# nodeslist = sorted([node.__repr__() for node in nodes
# if node not in grid.graph_isolated_nodes()])
# with open('/home/guido/ding0_debug/nodes_via_dataframe.csv', 'w', newline='') as csvfile:
# writer = csv.writer(csvfile, delimiter='\n')
# writer.writerow(nodeslist)
for node in nodes:
if node not in grid.graph_isolated_nodes():
# buses only
if isinstance(node, MVCableDistributorDing0):
buses['bus_id'].append(node.pypsa_id)
buses['v_nom'].append(grid.v_level)
buses['geom'].append(from_shape(node.geo_data, srid=srid))
buses['grid_id'].append(grid.id_db)
bus_v_mag_set['bus_id'].append(node.pypsa_id)
bus_v_mag_set['temp_id'].append(1)
bus_v_mag_set['v_mag_pu_set'].append([1, 1])
bus_v_mag_set['grid_id'].append(grid.id_db)
# bus + generator
elif isinstance(node, tuple(generator_instances)):
# slack generator
if isinstance(node, MVStationDing0):
logger.info('Only MV side bus of MVStation will be added.')
generator['generator_id'].append(
'_'.join(['MV', str(grid.id_db), 'slack']))
generator['control'].append('Slack')
generator['p_nom'].append(0)
bus_v_mag_set['v_mag_pu_set'].append(
[voltage_set_slack, voltage_set_slack])
# other generators
if isinstance(node, GeneratorDing0):
generator['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator['control'].append('PQ')
generator['p_nom'].append(node.capacity * node.capacity_factor)
generator_pq_set['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator_pq_set['temp_id'].append(1)
generator_pq_set['p_set'].append(
[node.capacity * node.capacity_factor * kw2mw * generation_in_load_case,
node.capacity * node.capacity_factor * kw2mw])
generator_pq_set['q_set'].append(
[node.capacity * node.capacity_factor * kw2mw * Q_factor_generation * generation_in_load_case,
node.capacity * node.capacity_factor * kw2mw * Q_factor_generation])
generator_pq_set['grid_id'].append(grid.id_db)
bus_v_mag_set['v_mag_pu_set'].append([1, 1])
buses['bus_id'].append(node.pypsa_id)
buses['v_nom'].append(grid.v_level)
buses['geom'].append(from_shape(node.geo_data, srid=srid))
buses['grid_id'].append(grid.id_db)
bus_v_mag_set['bus_id'].append(node.pypsa_id)
bus_v_mag_set['temp_id'].append(1)
bus_v_mag_set['grid_id'].append(grid.id_db)
generator['grid_id'].append(grid.id_db)
generator['bus'].append(node.pypsa_id)
# aggregated load at hv/mv substation
elif isinstance(node, LVLoadAreaCentreDing0):
load['load_id'].append(node.pypsa_id)
load['bus'].append('_'.join(['HV', str(grid.id_db), 'trd']))
load['grid_id'].append(grid.id_db)
load_pq_set['load_id'].append(node.pypsa_id)
load_pq_set['temp_id'].append(1)
load_pq_set['p_set'].append(
[node.lv_load_area.peak_load * kw2mw,
node.lv_load_area.peak_load * kw2mw * load_in_generation_case])
load_pq_set['q_set'].append(
[node.lv_load_area.peak_load * kw2mw * Q_factor_load,
node.lv_load_area.peak_load * kw2mw * Q_factor_load * load_in_generation_case])
load_pq_set['grid_id'].append(grid.id_db)
# generator representing generation capacity of aggregate LA
# analogously to load, generation is connected directly to
# HV-MV substation
generator['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'lcg', str(node.id_db)]))
generator['control'].append('PQ')
generator['p_nom'].append(node.lv_load_area.peak_generation)
generator['grid_id'].append(grid.id_db)
generator['bus'].append('_'.join(['HV', str(grid.id_db), 'trd']))
generator_pq_set['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'lcg', str(node.id_db)]))
generator_pq_set['temp_id'].append(1)
generator_pq_set['p_set'].append(
[node.lv_load_area.peak_generation * kw2mw * generation_in_load_case,
node.lv_load_area.peak_generation * kw2mw])
generator_pq_set['q_set'].append(
[node.lv_load_area.peak_generation * kw2mw * Q_factor_generation * generation_in_load_case,
node.lv_load_area.peak_generation * kw2mw * Q_factor_generation])
generator_pq_set['grid_id'].append(grid.id_db)
# bus + aggregate load of lv grids (at mv/ls substation)
elif isinstance(node, LVStationDing0):
# Aggregated load representing load in LV grid
load['load_id'].append(
'_'.join(['MV', str(grid.id_db), 'loa', str(node.id_db)]))
load['bus'].append(node.pypsa_id)
load['grid_id'].append(grid.id_db)
load_pq_set['load_id'].append(
'_'.join(['MV', str(grid.id_db), 'loa', str(node.id_db)]))
load_pq_set['temp_id'].append(1)
load_pq_set['p_set'].append(
[node.peak_load * kw2mw,
node.peak_load * kw2mw * load_in_generation_case])
load_pq_set['q_set'].append(
[node.peak_load * kw2mw * Q_factor_load,
node.peak_load * kw2mw * Q_factor_load * load_in_generation_case])
load_pq_set['grid_id'].append(grid.id_db)
# bus at primary MV-LV transformer side
buses['bus_id'].append(node.pypsa_id)
buses['v_nom'].append(grid.v_level)
buses['geom'].append(from_shape(node.geo_data, srid=srid))
buses['grid_id'].append(grid.id_db)
bus_v_mag_set['bus_id'].append(node.pypsa_id)
bus_v_mag_set['temp_id'].append(1)
bus_v_mag_set['v_mag_pu_set'].append([1, 1])
bus_v_mag_set['grid_id'].append(grid.id_db)
# generator representing generation capacity in LV grid
generator['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator['control'].append('PQ')
generator['p_nom'].append(node.peak_generation)
generator['grid_id'].append(grid.id_db)
generator['bus'].append(node.pypsa_id)
generator_pq_set['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator_pq_set['temp_id'].append(1)
generator_pq_set['p_set'].append(
[node.peak_generation * kw2mw * generation_in_load_case,
node.peak_generation * kw2mw])
generator_pq_set['q_set'].append(
[node.peak_generation * kw2mw * Q_factor_generation * generation_in_load_case,
node.peak_generation * kw2mw * Q_factor_generation])
generator_pq_set['grid_id'].append(grid.id_db)
elif isinstance(node, CircuitBreakerDing0):
# TODO: remove this elif-case if CircuitBreaker are removed from graph
continue
else:
raise TypeError("Node of type", node, "cannot be handled here")
else:
if not isinstance(node, CircuitBreakerDing0):
add_info = "LA is aggr. {0}".format(
node.lv_load_area.is_aggregated)
else:
add_info = ""
logger.warning("Node {0} is not connected to the graph and will " \
"be omitted in power flow analysis. {1}".format(
node, add_info))
components = {'Bus': DataFrame(buses).set_index('bus_id'),
'Generator': DataFrame(generator).set_index('generator_id'),
'Load': DataFrame(load).set_index('load_id')}
components_data = {'Bus': DataFrame(bus_v_mag_set).set_index('bus_id'),
'Generator': DataFrame(generator_pq_set).set_index(
'generator_id'),
'Load': DataFrame(load_pq_set).set_index('load_id')}
# with open('/home/guido/ding0_debug/number_of_nodes_buses.csv', 'a') as csvfile:
# csvfile.write(','.join(['\n', str(len(nodes)), str(len(grid.graph_isolated_nodes())), str(len(components['Bus']))]))
return components, components_data | Creates dictionary of dataframes containing grid
Parameters
----------
grid: ding0.Network
nodes: list of ding0 grid components objects
Nodes of the grid graph
lv_transformer: bool, True
Toggle transformer representation in power flow analysis
Returns:
components: dict of pandas.DataFrame
DataFrames contain components attributes. Dict is keyed by components
type
components_data: dict of pandas.DataFrame
DataFrame containing components time-varying data | Below is the the instruction that describes the task:
### Input:
Creates dictionary of dataframes containing grid
Parameters
----------
grid: ding0.Network
nodes: list of ding0 grid components objects
Nodes of the grid graph
lv_transformer: bool, True
Toggle transformer representation in power flow analysis
Returns:
components: dict of pandas.DataFrame
DataFrames contain components attributes. Dict is keyed by components
type
components_data: dict of pandas.DataFrame
DataFrame containing components time-varying data
### Response:
def nodes_to_dict_of_dataframes(grid, nodes, lv_transformer=True):
"""
Creates dictionary of dataframes containing grid
Parameters
----------
grid: ding0.Network
nodes: list of ding0 grid components objects
Nodes of the grid graph
lv_transformer: bool, True
Toggle transformer representation in power flow analysis
Returns:
components: dict of pandas.DataFrame
DataFrames contain components attributes. Dict is keyed by components
type
components_data: dict of pandas.DataFrame
DataFrame containing components time-varying data
"""
generator_instances = [MVStationDing0, GeneratorDing0]
# TODO: MVStationDing0 has a slack generator
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
srid = int(cfg_ding0.get('geo', 'srid'))
load_in_generation_case = cfg_ding0.get('assumptions',
'load_in_generation_case')
generation_in_load_case = cfg_ding0.get('assumptions',
'generation_in_load_case')
Q_factor_load = tan(acos(cos_phi_load))
Q_factor_generation = tan(acos(cos_phi_feedin))
voltage_set_slack = cfg_ding0.get("mv_routing_tech_constraints",
"mv_station_v_level_operation")
kw2mw = 1e-3
# define dictionaries
buses = {'bus_id': [], 'v_nom': [], 'geom': [], 'grid_id': []}
bus_v_mag_set = {'bus_id': [], 'temp_id': [], 'v_mag_pu_set': [],
'grid_id': []}
generator = {'generator_id': [], 'bus': [], 'control': [], 'grid_id': [],
'p_nom': []}
generator_pq_set = {'generator_id': [], 'temp_id': [], 'p_set': [],
'grid_id': [], 'q_set': []}
load = {'load_id': [], 'bus': [], 'grid_id': []}
load_pq_set = {'load_id': [], 'temp_id': [], 'p_set': [],
'grid_id': [], 'q_set': []}
# # TODO: consider other implications of `lv_transformer is True`
# if lv_transformer is True:
# bus_instances.append(Transformer)
# # TODO: only for debugging, remove afterwards
# import csv
# nodeslist = sorted([node.__repr__() for node in nodes
# if node not in grid.graph_isolated_nodes()])
# with open('/home/guido/ding0_debug/nodes_via_dataframe.csv', 'w', newline='') as csvfile:
# writer = csv.writer(csvfile, delimiter='\n')
# writer.writerow(nodeslist)
for node in nodes:
if node not in grid.graph_isolated_nodes():
# buses only
if isinstance(node, MVCableDistributorDing0):
buses['bus_id'].append(node.pypsa_id)
buses['v_nom'].append(grid.v_level)
buses['geom'].append(from_shape(node.geo_data, srid=srid))
buses['grid_id'].append(grid.id_db)
bus_v_mag_set['bus_id'].append(node.pypsa_id)
bus_v_mag_set['temp_id'].append(1)
bus_v_mag_set['v_mag_pu_set'].append([1, 1])
bus_v_mag_set['grid_id'].append(grid.id_db)
# bus + generator
elif isinstance(node, tuple(generator_instances)):
# slack generator
if isinstance(node, MVStationDing0):
logger.info('Only MV side bus of MVStation will be added.')
generator['generator_id'].append(
'_'.join(['MV', str(grid.id_db), 'slack']))
generator['control'].append('Slack')
generator['p_nom'].append(0)
bus_v_mag_set['v_mag_pu_set'].append(
[voltage_set_slack, voltage_set_slack])
# other generators
if isinstance(node, GeneratorDing0):
generator['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator['control'].append('PQ')
generator['p_nom'].append(node.capacity * node.capacity_factor)
generator_pq_set['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator_pq_set['temp_id'].append(1)
generator_pq_set['p_set'].append(
[node.capacity * node.capacity_factor * kw2mw * generation_in_load_case,
node.capacity * node.capacity_factor * kw2mw])
generator_pq_set['q_set'].append(
[node.capacity * node.capacity_factor * kw2mw * Q_factor_generation * generation_in_load_case,
node.capacity * node.capacity_factor * kw2mw * Q_factor_generation])
generator_pq_set['grid_id'].append(grid.id_db)
bus_v_mag_set['v_mag_pu_set'].append([1, 1])
buses['bus_id'].append(node.pypsa_id)
buses['v_nom'].append(grid.v_level)
buses['geom'].append(from_shape(node.geo_data, srid=srid))
buses['grid_id'].append(grid.id_db)
bus_v_mag_set['bus_id'].append(node.pypsa_id)
bus_v_mag_set['temp_id'].append(1)
bus_v_mag_set['grid_id'].append(grid.id_db)
generator['grid_id'].append(grid.id_db)
generator['bus'].append(node.pypsa_id)
# aggregated load at hv/mv substation
elif isinstance(node, LVLoadAreaCentreDing0):
load['load_id'].append(node.pypsa_id)
load['bus'].append('_'.join(['HV', str(grid.id_db), 'trd']))
load['grid_id'].append(grid.id_db)
load_pq_set['load_id'].append(node.pypsa_id)
load_pq_set['temp_id'].append(1)
load_pq_set['p_set'].append(
[node.lv_load_area.peak_load * kw2mw,
node.lv_load_area.peak_load * kw2mw * load_in_generation_case])
load_pq_set['q_set'].append(
[node.lv_load_area.peak_load * kw2mw * Q_factor_load,
node.lv_load_area.peak_load * kw2mw * Q_factor_load * load_in_generation_case])
load_pq_set['grid_id'].append(grid.id_db)
# generator representing generation capacity of aggregate LA
# analogously to load, generation is connected directly to
# HV-MV substation
generator['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'lcg', str(node.id_db)]))
generator['control'].append('PQ')
generator['p_nom'].append(node.lv_load_area.peak_generation)
generator['grid_id'].append(grid.id_db)
generator['bus'].append('_'.join(['HV', str(grid.id_db), 'trd']))
generator_pq_set['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'lcg', str(node.id_db)]))
generator_pq_set['temp_id'].append(1)
generator_pq_set['p_set'].append(
[node.lv_load_area.peak_generation * kw2mw * generation_in_load_case,
node.lv_load_area.peak_generation * kw2mw])
generator_pq_set['q_set'].append(
[node.lv_load_area.peak_generation * kw2mw * Q_factor_generation * generation_in_load_case,
node.lv_load_area.peak_generation * kw2mw * Q_factor_generation])
generator_pq_set['grid_id'].append(grid.id_db)
# bus + aggregate load of lv grids (at mv/ls substation)
elif isinstance(node, LVStationDing0):
# Aggregated load representing load in LV grid
load['load_id'].append(
'_'.join(['MV', str(grid.id_db), 'loa', str(node.id_db)]))
load['bus'].append(node.pypsa_id)
load['grid_id'].append(grid.id_db)
load_pq_set['load_id'].append(
'_'.join(['MV', str(grid.id_db), 'loa', str(node.id_db)]))
load_pq_set['temp_id'].append(1)
load_pq_set['p_set'].append(
[node.peak_load * kw2mw,
node.peak_load * kw2mw * load_in_generation_case])
load_pq_set['q_set'].append(
[node.peak_load * kw2mw * Q_factor_load,
node.peak_load * kw2mw * Q_factor_load * load_in_generation_case])
load_pq_set['grid_id'].append(grid.id_db)
# bus at primary MV-LV transformer side
buses['bus_id'].append(node.pypsa_id)
buses['v_nom'].append(grid.v_level)
buses['geom'].append(from_shape(node.geo_data, srid=srid))
buses['grid_id'].append(grid.id_db)
bus_v_mag_set['bus_id'].append(node.pypsa_id)
bus_v_mag_set['temp_id'].append(1)
bus_v_mag_set['v_mag_pu_set'].append([1, 1])
bus_v_mag_set['grid_id'].append(grid.id_db)
# generator representing generation capacity in LV grid
generator['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator['control'].append('PQ')
generator['p_nom'].append(node.peak_generation)
generator['grid_id'].append(grid.id_db)
generator['bus'].append(node.pypsa_id)
generator_pq_set['generator_id'].append('_'.join(
['MV', str(grid.id_db), 'gen', str(node.id_db)]))
generator_pq_set['temp_id'].append(1)
generator_pq_set['p_set'].append(
[node.peak_generation * kw2mw * generation_in_load_case,
node.peak_generation * kw2mw])
generator_pq_set['q_set'].append(
[node.peak_generation * kw2mw * Q_factor_generation * generation_in_load_case,
node.peak_generation * kw2mw * Q_factor_generation])
generator_pq_set['grid_id'].append(grid.id_db)
elif isinstance(node, CircuitBreakerDing0):
# TODO: remove this elif-case if CircuitBreaker are removed from graph
continue
else:
raise TypeError("Node of type", node, "cannot be handled here")
else:
if not isinstance(node, CircuitBreakerDing0):
add_info = "LA is aggr. {0}".format(
node.lv_load_area.is_aggregated)
else:
add_info = ""
logger.warning("Node {0} is not connected to the graph and will " \
"be omitted in power flow analysis. {1}".format(
node, add_info))
components = {'Bus': DataFrame(buses).set_index('bus_id'),
'Generator': DataFrame(generator).set_index('generator_id'),
'Load': DataFrame(load).set_index('load_id')}
components_data = {'Bus': DataFrame(bus_v_mag_set).set_index('bus_id'),
'Generator': DataFrame(generator_pq_set).set_index(
'generator_id'),
'Load': DataFrame(load_pq_set).set_index('load_id')}
# with open('/home/guido/ding0_debug/number_of_nodes_buses.csv', 'a') as csvfile:
# csvfile.write(','.join(['\n', str(len(nodes)), str(len(grid.graph_isolated_nodes())), str(len(components['Bus']))]))
return components, components_data |
def initialize_gdt_x86(self,state,concrete_target):
"""
Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return:
"""
_l.debug("Creating fake Global Descriptor Table and synchronizing gs segment register")
gs = self._read_gs_register_x86(concrete_target)
gdt = self.generate_gdt(0x0, gs)
self.setup_gdt(state, gdt)
# Synchronize the address of vsyscall in simprocedures dictionary with the concrete value
_vsyscall_address = concrete_target.read_memory(gs + 0x10, state.project.arch.bits / 8)
_vsyscall_address = struct.unpack(state.project.arch.struct_fmt(), _vsyscall_address)[0]
state.project.rehook_symbol(_vsyscall_address, '_vsyscall')
return gdt | Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: | Below is the the instruction that describes the task:
### Input:
Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return:
### Response:
def initialize_gdt_x86(self,state,concrete_target):
"""
Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return:
"""
_l.debug("Creating fake Global Descriptor Table and synchronizing gs segment register")
gs = self._read_gs_register_x86(concrete_target)
gdt = self.generate_gdt(0x0, gs)
self.setup_gdt(state, gdt)
# Synchronize the address of vsyscall in simprocedures dictionary with the concrete value
_vsyscall_address = concrete_target.read_memory(gs + 0x10, state.project.arch.bits / 8)
_vsyscall_address = struct.unpack(state.project.arch.struct_fmt(), _vsyscall_address)[0]
state.project.rehook_symbol(_vsyscall_address, '_vsyscall')
return gdt |
def create(cls, setting, **kwargs):
"""Instantiate a :class:`NetworkConnection` (or subclass) object based
on a given host name and port number (eg. ``192.168.0.205:9100``).
"""
host, port = setting.rsplit(':', 1)
return cls(host, int(port), **kwargs) | Instantiate a :class:`NetworkConnection` (or subclass) object based
on a given host name and port number (eg. ``192.168.0.205:9100``). | Below is the the instruction that describes the task:
### Input:
Instantiate a :class:`NetworkConnection` (or subclass) object based
on a given host name and port number (eg. ``192.168.0.205:9100``).
### Response:
def create(cls, setting, **kwargs):
"""Instantiate a :class:`NetworkConnection` (or subclass) object based
on a given host name and port number (eg. ``192.168.0.205:9100``).
"""
host, port = setting.rsplit(':', 1)
return cls(host, int(port), **kwargs) |
def validate_id(request):
"""Validate request id."""
if 'id' in request:
correct_id = isinstance(
request['id'],
(string_types, int, None),
)
error = 'Incorrect identifier'
assert correct_id, error | Validate request id. | Below is the the instruction that describes the task:
### Input:
Validate request id.
### Response:
def validate_id(request):
"""Validate request id."""
if 'id' in request:
correct_id = isinstance(
request['id'],
(string_types, int, None),
)
error = 'Incorrect identifier'
assert correct_id, error |
def create(sld, tld, nameserver, ip):
'''
Creates a new nameserver. Returns ``True`` if the nameserver was created
successfully.
sld
SLD of the domain name
tld
TLD of the domain name
nameserver
Nameserver to create
ip
Nameserver IP address
CLI Example:
.. code-block:: bash
salt '*' namecheap_domains_ns.create sld tld nameserver ip
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.ns.create')
opts['SLD'] = sld
opts['TLD'] = tld
opts['Nameserver'] = nameserver
opts['IP'] = ip
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return False
domainnscreateresult = response_xml.getElementsByTagName('DomainNSCreateResult')[0]
return salt.utils.namecheap.string_to_value(domainnscreateresult.getAttribute('IsSuccess')) | Creates a new nameserver. Returns ``True`` if the nameserver was created
successfully.
sld
SLD of the domain name
tld
TLD of the domain name
nameserver
Nameserver to create
ip
Nameserver IP address
CLI Example:
.. code-block:: bash
salt '*' namecheap_domains_ns.create sld tld nameserver ip | Below is the the instruction that describes the task:
### Input:
Creates a new nameserver. Returns ``True`` if the nameserver was created
successfully.
sld
SLD of the domain name
tld
TLD of the domain name
nameserver
Nameserver to create
ip
Nameserver IP address
CLI Example:
.. code-block:: bash
salt '*' namecheap_domains_ns.create sld tld nameserver ip
### Response:
def create(sld, tld, nameserver, ip):
'''
Creates a new nameserver. Returns ``True`` if the nameserver was created
successfully.
sld
SLD of the domain name
tld
TLD of the domain name
nameserver
Nameserver to create
ip
Nameserver IP address
CLI Example:
.. code-block:: bash
salt '*' namecheap_domains_ns.create sld tld nameserver ip
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.ns.create')
opts['SLD'] = sld
opts['TLD'] = tld
opts['Nameserver'] = nameserver
opts['IP'] = ip
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return False
domainnscreateresult = response_xml.getElementsByTagName('DomainNSCreateResult')[0]
return salt.utils.namecheap.string_to_value(domainnscreateresult.getAttribute('IsSuccess')) |
def import_users(
path,
resource_name=None,
send_email_to_user=None,
alternate_email=None,
verbose=None,
export_to_file=None,
**kwargs,
):
"""Import users from a CSV file with columns:
username
first_name
last_name
email
sites: a comma-separated list of sites
groups: a comma-separated list of groups
job_title
"""
users = []
with open(path) as f:
reader = csv.DictReader(f)
for user_data in reader:
username = user_data.get("username")
site_names = user_data.get("sites").lower().split(",")
group_names = user_data.get("groups").lower().split(",")
first_name = user_data.get("first_name")
last_name = user_data.get("last_name")
email = user_data.get("email")
o = UserImporter(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
site_names=site_names,
group_names=group_names,
resource_name=resource_name,
send_email_to_user=send_email_to_user,
alternate_email=alternate_email,
verbose=verbose,
**kwargs,
)
users.append(
{
"username": o.user.username,
"password": o.password,
"first_name": o.user.first_name,
"last_name": o.user.last_name,
"sites": o.site_names,
"groups": o.group_names,
}
)
if export_to_file:
fieldnames = [
"username",
"password",
"first_name",
"last_name",
"sites",
"groups",
]
with open(path + "new.csv", "w+") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for user in users:
writer.writerow(user) | Import users from a CSV file with columns:
username
first_name
last_name
email
sites: a comma-separated list of sites
groups: a comma-separated list of groups
job_title | Below is the the instruction that describes the task:
### Input:
Import users from a CSV file with columns:
username
first_name
last_name
email
sites: a comma-separated list of sites
groups: a comma-separated list of groups
job_title
### Response:
def import_users(
path,
resource_name=None,
send_email_to_user=None,
alternate_email=None,
verbose=None,
export_to_file=None,
**kwargs,
):
"""Import users from a CSV file with columns:
username
first_name
last_name
email
sites: a comma-separated list of sites
groups: a comma-separated list of groups
job_title
"""
users = []
with open(path) as f:
reader = csv.DictReader(f)
for user_data in reader:
username = user_data.get("username")
site_names = user_data.get("sites").lower().split(",")
group_names = user_data.get("groups").lower().split(",")
first_name = user_data.get("first_name")
last_name = user_data.get("last_name")
email = user_data.get("email")
o = UserImporter(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
site_names=site_names,
group_names=group_names,
resource_name=resource_name,
send_email_to_user=send_email_to_user,
alternate_email=alternate_email,
verbose=verbose,
**kwargs,
)
users.append(
{
"username": o.user.username,
"password": o.password,
"first_name": o.user.first_name,
"last_name": o.user.last_name,
"sites": o.site_names,
"groups": o.group_names,
}
)
if export_to_file:
fieldnames = [
"username",
"password",
"first_name",
"last_name",
"sites",
"groups",
]
with open(path + "new.csv", "w+") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for user in users:
writer.writerow(user) |
def _insert_tasks(tasks, queue, transactional=False,
retry_transient_errors=True,
retry_delay=RETRY_SLEEP_SECS):
"""Insert a batch of tasks into the specified queue. If an error occurs
during insertion, split the batch and retry until they are successfully
inserted. Return the number of successfully inserted tasks.
"""
from google.appengine.api import taskqueue
if not tasks:
return 0
try:
taskqueue.Queue(name=queue).add(tasks, transactional=transactional)
return len(tasks)
except (taskqueue.BadTaskStateError,
taskqueue.TaskAlreadyExistsError,
taskqueue.TombstonedTaskError):
if len(tasks) <= 1:
# Task has already been inserted, no reason to report an error here.
return 0
# If a list of more than one Tasks is given, a raised exception does
# not guarantee that no tasks were added to the queue (unless
# transactional is set to True). To determine which tasks were
# successfully added when an exception is raised, check the
# Task.was_enqueued property.
reinsert = _tasks_to_reinsert(tasks, transactional)
count = len(reinsert)
inserted = len(tasks) - count
inserted += _insert_tasks(reinsert[:count / 2], queue, transactional,
retry_transient_errors, retry_delay)
inserted += _insert_tasks(reinsert[count / 2:], queue, transactional,
retry_transient_errors, retry_delay)
return inserted
except taskqueue.TransientError:
# Always re-raise for transactional insert, or if specified by
# options.
if transactional or not retry_transient_errors:
raise
reinsert = _tasks_to_reinsert(tasks, transactional)
# Retry with a delay, and then let any errors re-raise.
time.sleep(retry_delay)
taskqueue.Queue(name=queue).add(reinsert, transactional=transactional)
return len(tasks) | Insert a batch of tasks into the specified queue. If an error occurs
during insertion, split the batch and retry until they are successfully
inserted. Return the number of successfully inserted tasks. | Below is the the instruction that describes the task:
### Input:
Insert a batch of tasks into the specified queue. If an error occurs
during insertion, split the batch and retry until they are successfully
inserted. Return the number of successfully inserted tasks.
### Response:
def _insert_tasks(tasks, queue, transactional=False,
retry_transient_errors=True,
retry_delay=RETRY_SLEEP_SECS):
"""Insert a batch of tasks into the specified queue. If an error occurs
during insertion, split the batch and retry until they are successfully
inserted. Return the number of successfully inserted tasks.
"""
from google.appengine.api import taskqueue
if not tasks:
return 0
try:
taskqueue.Queue(name=queue).add(tasks, transactional=transactional)
return len(tasks)
except (taskqueue.BadTaskStateError,
taskqueue.TaskAlreadyExistsError,
taskqueue.TombstonedTaskError):
if len(tasks) <= 1:
# Task has already been inserted, no reason to report an error here.
return 0
# If a list of more than one Tasks is given, a raised exception does
# not guarantee that no tasks were added to the queue (unless
# transactional is set to True). To determine which tasks were
# successfully added when an exception is raised, check the
# Task.was_enqueued property.
reinsert = _tasks_to_reinsert(tasks, transactional)
count = len(reinsert)
inserted = len(tasks) - count
inserted += _insert_tasks(reinsert[:count / 2], queue, transactional,
retry_transient_errors, retry_delay)
inserted += _insert_tasks(reinsert[count / 2:], queue, transactional,
retry_transient_errors, retry_delay)
return inserted
except taskqueue.TransientError:
# Always re-raise for transactional insert, or if specified by
# options.
if transactional or not retry_transient_errors:
raise
reinsert = _tasks_to_reinsert(tasks, transactional)
# Retry with a delay, and then let any errors re-raise.
time.sleep(retry_delay)
taskqueue.Queue(name=queue).add(reinsert, transactional=transactional)
return len(tasks) |
def after_install(options, home_dir):
# --- CUT here ---
"""
called after virtualenv was created and pip/setuptools installed.
Now we installed requirement libs/packages.
"""
if options.install_type==INST_PYPI:
requirements=NORMAL_INSTALLATION
elif options.install_type==INST_GIT:
requirements=GIT_READONLY_INSTALLATION
elif options.install_type==INST_DEV:
requirements=DEVELOPER_INSTALLATION
else:
# Should never happen
raise RuntimeError("Install type %r unknown?!?" % options.install_type)
env_subprocess = EnvSubprocess(home_dir) # from bootstrap_env.bootstrap_install_pip
logfile = os.path.join(env_subprocess.abs_home_dir, "install.log")
for requirement in requirements:
sys.stdout.write("\n\nInstall %r:\n" % requirement)
env_subprocess.call_env_pip(["install", "--log=%s" % logfile, requirement])
sys.stdout.write("\n") | called after virtualenv was created and pip/setuptools installed.
Now we installed requirement libs/packages. | Below is the the instruction that describes the task:
### Input:
called after virtualenv was created and pip/setuptools installed.
Now we installed requirement libs/packages.
### Response:
def after_install(options, home_dir):
# --- CUT here ---
"""
called after virtualenv was created and pip/setuptools installed.
Now we installed requirement libs/packages.
"""
if options.install_type==INST_PYPI:
requirements=NORMAL_INSTALLATION
elif options.install_type==INST_GIT:
requirements=GIT_READONLY_INSTALLATION
elif options.install_type==INST_DEV:
requirements=DEVELOPER_INSTALLATION
else:
# Should never happen
raise RuntimeError("Install type %r unknown?!?" % options.install_type)
env_subprocess = EnvSubprocess(home_dir) # from bootstrap_env.bootstrap_install_pip
logfile = os.path.join(env_subprocess.abs_home_dir, "install.log")
for requirement in requirements:
sys.stdout.write("\n\nInstall %r:\n" % requirement)
env_subprocess.call_env_pip(["install", "--log=%s" % logfile, requirement])
sys.stdout.write("\n") |
def get_summaries(ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands_dict, commands_order)
else:
cmditems = commands_dict.items()
for name, command_class in cmditems:
yield (name, command_class.summary) | Yields sorted (command name, command summary) tuples. | Below is the the instruction that describes the task:
### Input:
Yields sorted (command name, command summary) tuples.
### Response:
def get_summaries(ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands_dict, commands_order)
else:
cmditems = commands_dict.items()
for name, command_class in cmditems:
yield (name, command_class.summary) |
def marginalize(self, variables, inplace=True):
"""
Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
"""
if isinstance(variables, six.string_types):
raise TypeError('Expected list or array-like type got type str')
factor_set = self if inplace else self.copy()
factors_to_be_marginalized = set(filter(lambda x: set(x.scope()).intersection(variables),
factor_set.factors))
for factor in factors_to_be_marginalized:
variables_to_be_marginalized = list(set(factor.scope()).intersection(variables))
if inplace:
factor.marginalize(variables_to_be_marginalized, inplace=True)
else:
factor_set.remove_factors(factor)
factor_set.add_factors(factor.marginalize(variables_to_be_marginalized, inplace=False))
if not inplace:
return factor_set | Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>]) | Below is the the instruction that describes the task:
### Input:
Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
### Response:
def marginalize(self, variables, inplace=True):
"""
Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
"""
if isinstance(variables, six.string_types):
raise TypeError('Expected list or array-like type got type str')
factor_set = self if inplace else self.copy()
factors_to_be_marginalized = set(filter(lambda x: set(x.scope()).intersection(variables),
factor_set.factors))
for factor in factors_to_be_marginalized:
variables_to_be_marginalized = list(set(factor.scope()).intersection(variables))
if inplace:
factor.marginalize(variables_to_be_marginalized, inplace=True)
else:
factor_set.remove_factors(factor)
factor_set.add_factors(factor.marginalize(variables_to_be_marginalized, inplace=False))
if not inplace:
return factor_set |
def find_by_section(self, section, params={}, **options):
"""<b>Board view only:</b> Returns the compact section records for all tasks within the given section.
Parameters
----------
section : {Id} The section in which to search for tasks.
[params] : {Object} Parameters for the request
"""
path = "/sections/%s/tasks" % (section)
return self.client.get_collection(path, params, **options) | <b>Board view only:</b> Returns the compact section records for all tasks within the given section.
Parameters
----------
section : {Id} The section in which to search for tasks.
[params] : {Object} Parameters for the request | Below is the the instruction that describes the task:
### Input:
<b>Board view only:</b> Returns the compact section records for all tasks within the given section.
Parameters
----------
section : {Id} The section in which to search for tasks.
[params] : {Object} Parameters for the request
### Response:
def find_by_section(self, section, params={}, **options):
"""<b>Board view only:</b> Returns the compact section records for all tasks within the given section.
Parameters
----------
section : {Id} The section in which to search for tasks.
[params] : {Object} Parameters for the request
"""
path = "/sections/%s/tasks" % (section)
return self.client.get_collection(path, params, **options) |
def u40(self, name, value=None, align=None):
"""Add an unsigned 5 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length."""
self.uint(5, name, value, align) | Add an unsigned 5 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length. | Below is the the instruction that describes the task:
### Input:
Add an unsigned 5 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length.
### Response:
def u40(self, name, value=None, align=None):
"""Add an unsigned 5 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length."""
self.uint(5, name, value, align) |
def _read_para_dh_group_list(self, code, cbit, clen, *, desc, length, version):
"""Read HIP DH_GROUP_LIST parameter.
Structure of HIP DH_GROUP_LIST parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #1| DH GROUP ID #2| DH GROUP ID #3| DH GROUP ID #4|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #n| Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 dh_group_list.type Parameter Type
1 15 dh_group_list.critical Critical Bit
2 16 dh_group_list.length Length of Contents
4 32 dh_group_list.id DH GROUP ID
"""
_dhid = list()
for _ in range(clen):
_dhid.append(_GROUP_ID.get(self._read_unpack(1), 'Unassigned'))
dh_group_list = dict(
type=desc,
critical=cbit,
length=clen,
id=tuple(_dhid),
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return dh_group_list | Read HIP DH_GROUP_LIST parameter.
Structure of HIP DH_GROUP_LIST parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #1| DH GROUP ID #2| DH GROUP ID #3| DH GROUP ID #4|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #n| Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 dh_group_list.type Parameter Type
1 15 dh_group_list.critical Critical Bit
2 16 dh_group_list.length Length of Contents
4 32 dh_group_list.id DH GROUP ID | Below is the the instruction that describes the task:
### Input:
Read HIP DH_GROUP_LIST parameter.
Structure of HIP DH_GROUP_LIST parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #1| DH GROUP ID #2| DH GROUP ID #3| DH GROUP ID #4|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #n| Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 dh_group_list.type Parameter Type
1 15 dh_group_list.critical Critical Bit
2 16 dh_group_list.length Length of Contents
4 32 dh_group_list.id DH GROUP ID
### Response:
def _read_para_dh_group_list(self, code, cbit, clen, *, desc, length, version):
"""Read HIP DH_GROUP_LIST parameter.
Structure of HIP DH_GROUP_LIST parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #1| DH GROUP ID #2| DH GROUP ID #3| DH GROUP ID #4|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| DH GROUP ID #n| Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 dh_group_list.type Parameter Type
1 15 dh_group_list.critical Critical Bit
2 16 dh_group_list.length Length of Contents
4 32 dh_group_list.id DH GROUP ID
"""
_dhid = list()
for _ in range(clen):
_dhid.append(_GROUP_ID.get(self._read_unpack(1), 'Unassigned'))
dh_group_list = dict(
type=desc,
critical=cbit,
length=clen,
id=tuple(_dhid),
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return dh_group_list |
def decorate_obj(parent, n, o, otype, recurse=True, redecorate=False):
"""Adds the decoration for automated logging to the specified object, if it
hasn't already been done.
Args:
parent: object that `o` belongs to.
n (str): name in the parent's dictionary.
o (type): instance of the object's type.
otype (str): one of ['classes', 'functions', 'methods', 'modules'];
specifies which group the object belongs to.
recurse (bool): when True, the objects methods and functions are also
decorated recursively.
Examples:
Decorate the function `mymod.myfunc` to log automatically to the
database.
>>> from acorn.logging.decoration import decorate_obj
>>> import mymod
>>> decorate_obj(mymod, "myfunc", mymod.myfunc, "functions")
"""
global _decor_count, _decorated_o
from inspect import isclass, isfunction, ismodule
pmodule = parent if ismodule(parent) or isclass(parent) else None
fqdn = _fqdn(o, recheck=True, pmodule=pmodule)
if fqdn is None:
#This object didn't have a name, which means we can't extend it or
#track it anyway.
return
package = fqdn.split('.')[0]
d = _get_stack_depth(package, fqdn)
if (package in _decorated_o and
(id(o) not in _decorated_o[package] or redecorate)):
decor = None
if hasattr(o, "__call__") and otype != "classes":
#calling on class types is handled by the construction decorator
#below.
cdecor = CallingDecorator(o)
if isclass(parent):
clog = cdecor(fqdn, package, parent, d)
else:
clog = cdecor(fqdn, package, None, d)
#We can't update the attributes of the static methods (it just
#produces errors), so we do what we can before that.
msg.std("Setting decorator on {}.".format(fqdn), 4)
_update_attrs(clog, o)
if ((hasattr(o, "im_self") and o.im_self is parent)):
clog = staticmethod(clog)
setok = _safe_setattr(parent, n, clog)
if setok:
decor = cdecor
msg.okay("Set calling logger on {}: {}.".format(n, fqdn), 3)
_decor_count[package][0] += 1
else:
setok = _safe_setattr(o, "__acorn__", None)
_decor_count[package][2] += 1
if otype == "classes" and setok:
if hasattr(o, "__new__"):
setattr(o, "__old__", staticmethod(o.__new__))
crelog = creationlog(o, package, d)
setok = _safe_setattr(o, "__new__", creationlog(o, package, d))
if setok:
decor = crelog
msg.gen("Set creation logger on {}: {}.".format(n, fqdn),3)
_decor_count[package][0] += 1
#else: must have only static methods and no instances.
if setok:
_decorated_o[package][id(o)] = decor
else:
_decorated_o[package][id(o)] = None
#We don't need to bother recursing for those modules/classes that
#can't have their attributes set, since their members will have the
#same restrictions.
if setok and otype in ["classes", "modules"]:
#These types can be further decorated; let's traverse their members
#and try to decorate those as well.
splits = _split_object(o, package)
for ot, ol in splits.items():
for nobj, obj in ol:
decorate_obj(o, nobj, obj, ot)
elif otype != "classes" and package in _decorated_o:
#Even though the object with that id() has been decorated, it doesn't
#mean that the parent has had its attribute overwritten to point to the
#decorated object. This happens with instance methods on different
#classes that are implemented by another generic method.
target = _decorated_o[package][id(o)]
child = getattr(parent, n)
if target is not None:
clog = target(fqdn, package, parent)
_safe_setattr(clog, "__acorn__", o)
_update_attrs(clog, o)
setok = _safe_setattr(parent, n, clog)
msg.okay("Set existing calling logger on {}: {}.".format(n,fqdn), 4) | Adds the decoration for automated logging to the specified object, if it
hasn't already been done.
Args:
parent: object that `o` belongs to.
n (str): name in the parent's dictionary.
o (type): instance of the object's type.
otype (str): one of ['classes', 'functions', 'methods', 'modules'];
specifies which group the object belongs to.
recurse (bool): when True, the objects methods and functions are also
decorated recursively.
Examples:
Decorate the function `mymod.myfunc` to log automatically to the
database.
>>> from acorn.logging.decoration import decorate_obj
>>> import mymod
>>> decorate_obj(mymod, "myfunc", mymod.myfunc, "functions") | Below is the the instruction that describes the task:
### Input:
Adds the decoration for automated logging to the specified object, if it
hasn't already been done.
Args:
parent: object that `o` belongs to.
n (str): name in the parent's dictionary.
o (type): instance of the object's type.
otype (str): one of ['classes', 'functions', 'methods', 'modules'];
specifies which group the object belongs to.
recurse (bool): when True, the objects methods and functions are also
decorated recursively.
Examples:
Decorate the function `mymod.myfunc` to log automatically to the
database.
>>> from acorn.logging.decoration import decorate_obj
>>> import mymod
>>> decorate_obj(mymod, "myfunc", mymod.myfunc, "functions")
### Response:
def decorate_obj(parent, n, o, otype, recurse=True, redecorate=False):
"""Adds the decoration for automated logging to the specified object, if it
hasn't already been done.
Args:
parent: object that `o` belongs to.
n (str): name in the parent's dictionary.
o (type): instance of the object's type.
otype (str): one of ['classes', 'functions', 'methods', 'modules'];
specifies which group the object belongs to.
recurse (bool): when True, the objects methods and functions are also
decorated recursively.
Examples:
Decorate the function `mymod.myfunc` to log automatically to the
database.
>>> from acorn.logging.decoration import decorate_obj
>>> import mymod
>>> decorate_obj(mymod, "myfunc", mymod.myfunc, "functions")
"""
global _decor_count, _decorated_o
from inspect import isclass, isfunction, ismodule
pmodule = parent if ismodule(parent) or isclass(parent) else None
fqdn = _fqdn(o, recheck=True, pmodule=pmodule)
if fqdn is None:
#This object didn't have a name, which means we can't extend it or
#track it anyway.
return
package = fqdn.split('.')[0]
d = _get_stack_depth(package, fqdn)
if (package in _decorated_o and
(id(o) not in _decorated_o[package] or redecorate)):
decor = None
if hasattr(o, "__call__") and otype != "classes":
#calling on class types is handled by the construction decorator
#below.
cdecor = CallingDecorator(o)
if isclass(parent):
clog = cdecor(fqdn, package, parent, d)
else:
clog = cdecor(fqdn, package, None, d)
#We can't update the attributes of the static methods (it just
#produces errors), so we do what we can before that.
msg.std("Setting decorator on {}.".format(fqdn), 4)
_update_attrs(clog, o)
if ((hasattr(o, "im_self") and o.im_self is parent)):
clog = staticmethod(clog)
setok = _safe_setattr(parent, n, clog)
if setok:
decor = cdecor
msg.okay("Set calling logger on {}: {}.".format(n, fqdn), 3)
_decor_count[package][0] += 1
else:
setok = _safe_setattr(o, "__acorn__", None)
_decor_count[package][2] += 1
if otype == "classes" and setok:
if hasattr(o, "__new__"):
setattr(o, "__old__", staticmethod(o.__new__))
crelog = creationlog(o, package, d)
setok = _safe_setattr(o, "__new__", creationlog(o, package, d))
if setok:
decor = crelog
msg.gen("Set creation logger on {}: {}.".format(n, fqdn),3)
_decor_count[package][0] += 1
#else: must have only static methods and no instances.
if setok:
_decorated_o[package][id(o)] = decor
else:
_decorated_o[package][id(o)] = None
#We don't need to bother recursing for those modules/classes that
#can't have their attributes set, since their members will have the
#same restrictions.
if setok and otype in ["classes", "modules"]:
#These types can be further decorated; let's traverse their members
#and try to decorate those as well.
splits = _split_object(o, package)
for ot, ol in splits.items():
for nobj, obj in ol:
decorate_obj(o, nobj, obj, ot)
elif otype != "classes" and package in _decorated_o:
#Even though the object with that id() has been decorated, it doesn't
#mean that the parent has had its attribute overwritten to point to the
#decorated object. This happens with instance methods on different
#classes that are implemented by another generic method.
target = _decorated_o[package][id(o)]
child = getattr(parent, n)
if target is not None:
clog = target(fqdn, package, parent)
_safe_setattr(clog, "__acorn__", o)
_update_attrs(clog, o)
setok = _safe_setattr(parent, n, clog)
msg.okay("Set existing calling logger on {}: {}.".format(n,fqdn), 4) |
def perform_chunked_integrity_check(self):
# type: (Descriptor) -> None
"""Hash data against stored hasher safely
:param Descriptor self: this
"""
hasher = self.hmac or self.md5
# iterate from next chunk to be checked
while True:
ucc = None
with self._meta_lock:
chunk_num = self._next_integrity_chunk
# check if the next chunk is ready
if (chunk_num in self._unchecked_chunks and
self._unchecked_chunks[chunk_num]['decrypted']):
ucc = self._unchecked_chunks.pop(chunk_num)['ucc']
else:
break
# hash data and set next integrity chunk
md5hexdigest = None
if hasher is not None:
with ucc.file_path.open('rb') as fd:
if not ucc.temp:
fd.seek(ucc.fd_start, 0)
chunk = fd.read(ucc.data_len)
if ucc.temp:
ucc.file_path.unlink()
with self._hasher_lock:
hasher.update(chunk)
if hasher == self.md5:
md5hexdigest = hasher.hexdigest()
with self._meta_lock:
# update integrity counter and resume db
self._next_integrity_chunk += 1
if self.is_resumable:
self._resume_mgr.add_or_update_record(
self.final_path, self._ase, self._chunk_size,
self._next_integrity_chunk, False, md5hexdigest,
)
# decrement outstanding op counter
self._outstanding_ops -= 1 | Hash data against stored hasher safely
:param Descriptor self: this | Below is the the instruction that describes the task:
### Input:
Hash data against stored hasher safely
:param Descriptor self: this
### Response:
def perform_chunked_integrity_check(self):
# type: (Descriptor) -> None
"""Hash data against stored hasher safely
:param Descriptor self: this
"""
hasher = self.hmac or self.md5
# iterate from next chunk to be checked
while True:
ucc = None
with self._meta_lock:
chunk_num = self._next_integrity_chunk
# check if the next chunk is ready
if (chunk_num in self._unchecked_chunks and
self._unchecked_chunks[chunk_num]['decrypted']):
ucc = self._unchecked_chunks.pop(chunk_num)['ucc']
else:
break
# hash data and set next integrity chunk
md5hexdigest = None
if hasher is not None:
with ucc.file_path.open('rb') as fd:
if not ucc.temp:
fd.seek(ucc.fd_start, 0)
chunk = fd.read(ucc.data_len)
if ucc.temp:
ucc.file_path.unlink()
with self._hasher_lock:
hasher.update(chunk)
if hasher == self.md5:
md5hexdigest = hasher.hexdigest()
with self._meta_lock:
# update integrity counter and resume db
self._next_integrity_chunk += 1
if self.is_resumable:
self._resume_mgr.add_or_update_record(
self.final_path, self._ase, self._chunk_size,
self._next_integrity_chunk, False, md5hexdigest,
)
# decrement outstanding op counter
self._outstanding_ops -= 1 |
def get_value(self, name):
"""
Get return value of a dependency factory or
a live singleton instance.
"""
factory = self._registered.get(name)
if not factory:
raise KeyError('Name not registered')
if factory._giveme_singleton:
if name in self._singletons:
return self._singletons[name]
self._singletons[name] = factory()
return self._singletons[name]
elif factory._giveme_threadlocal:
if hasattr(self._threadlocals, name):
return getattr(self._threadlocals, name)
setattr(self._threadlocals, name, factory())
return getattr(self._threadlocals, name)
return factory() | Get return value of a dependency factory or
a live singleton instance. | Below is the the instruction that describes the task:
### Input:
Get return value of a dependency factory or
a live singleton instance.
### Response:
def get_value(self, name):
"""
Get return value of a dependency factory or
a live singleton instance.
"""
factory = self._registered.get(name)
if not factory:
raise KeyError('Name not registered')
if factory._giveme_singleton:
if name in self._singletons:
return self._singletons[name]
self._singletons[name] = factory()
return self._singletons[name]
elif factory._giveme_threadlocal:
if hasattr(self._threadlocals, name):
return getattr(self._threadlocals, name)
setattr(self._threadlocals, name, factory())
return getattr(self._threadlocals, name)
return factory() |
def _add_detection_gt(self, img, add_mask):
"""
Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection.
If add_mask is True, also add 'segmentation' in coco poly format.
"""
# ann_ids = self.coco.getAnnIds(imgIds=img['image_id'])
# objs = self.coco.loadAnns(ann_ids)
objs = self.coco.imgToAnns[img['image_id']] # equivalent but faster than the above two lines
# clean-up boxes
valid_objs = []
width = img.pop('width')
height = img.pop('height')
for objid, obj in enumerate(objs):
if obj.get('ignore', 0) == 1:
continue
x1, y1, w, h = obj['bbox']
# bbox is originally in float
# x1/y1 means upper-left corner and w/h means true w/h. This can be verified by segmentation pixels.
# But we do make an assumption here that (0.0, 0.0) is upper-left corner of the first pixel
x1 = np.clip(float(x1), 0, width)
y1 = np.clip(float(y1), 0, height)
w = np.clip(float(x1 + w), 0, width) - x1
h = np.clip(float(y1 + h), 0, height) - y1
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 1 and w > 0 and h > 0 and w * h >= 4:
obj['bbox'] = [x1, y1, x1 + w, y1 + h]
valid_objs.append(obj)
if add_mask:
segs = obj['segmentation']
if not isinstance(segs, list):
assert obj['iscrowd'] == 1
obj['segmentation'] = None
else:
valid_segs = [np.asarray(p).reshape(-1, 2).astype('float32') for p in segs if len(p) >= 6]
if len(valid_segs) == 0:
logger.error("Object {} in image {} has no valid polygons!".format(objid, img['file_name']))
elif len(valid_segs) < len(segs):
logger.warn("Object {} in image {} has invalid polygons!".format(objid, img['file_name']))
obj['segmentation'] = valid_segs
# all geometrically-valid boxes are returned
boxes = np.asarray([obj['bbox'] for obj in valid_objs], dtype='float32') # (n, 4)
cls = np.asarray([
self.COCO_id_to_category_id[obj['category_id']]
for obj in valid_objs], dtype='int32') # (n,)
is_crowd = np.asarray([obj['iscrowd'] for obj in valid_objs], dtype='int8')
# add the keys
img['boxes'] = boxes # nx4
img['class'] = cls # n, always >0
img['is_crowd'] = is_crowd # n,
if add_mask:
# also required to be float32
img['segmentation'] = [
obj['segmentation'] for obj in valid_objs] | Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection.
If add_mask is True, also add 'segmentation' in coco poly format. | Below is the the instruction that describes the task:
### Input:
Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection.
If add_mask is True, also add 'segmentation' in coco poly format.
### Response:
def _add_detection_gt(self, img, add_mask):
"""
Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection.
If add_mask is True, also add 'segmentation' in coco poly format.
"""
# ann_ids = self.coco.getAnnIds(imgIds=img['image_id'])
# objs = self.coco.loadAnns(ann_ids)
objs = self.coco.imgToAnns[img['image_id']] # equivalent but faster than the above two lines
# clean-up boxes
valid_objs = []
width = img.pop('width')
height = img.pop('height')
for objid, obj in enumerate(objs):
if obj.get('ignore', 0) == 1:
continue
x1, y1, w, h = obj['bbox']
# bbox is originally in float
# x1/y1 means upper-left corner and w/h means true w/h. This can be verified by segmentation pixels.
# But we do make an assumption here that (0.0, 0.0) is upper-left corner of the first pixel
x1 = np.clip(float(x1), 0, width)
y1 = np.clip(float(y1), 0, height)
w = np.clip(float(x1 + w), 0, width) - x1
h = np.clip(float(y1 + h), 0, height) - y1
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 1 and w > 0 and h > 0 and w * h >= 4:
obj['bbox'] = [x1, y1, x1 + w, y1 + h]
valid_objs.append(obj)
if add_mask:
segs = obj['segmentation']
if not isinstance(segs, list):
assert obj['iscrowd'] == 1
obj['segmentation'] = None
else:
valid_segs = [np.asarray(p).reshape(-1, 2).astype('float32') for p in segs if len(p) >= 6]
if len(valid_segs) == 0:
logger.error("Object {} in image {} has no valid polygons!".format(objid, img['file_name']))
elif len(valid_segs) < len(segs):
logger.warn("Object {} in image {} has invalid polygons!".format(objid, img['file_name']))
obj['segmentation'] = valid_segs
# all geometrically-valid boxes are returned
boxes = np.asarray([obj['bbox'] for obj in valid_objs], dtype='float32') # (n, 4)
cls = np.asarray([
self.COCO_id_to_category_id[obj['category_id']]
for obj in valid_objs], dtype='int32') # (n,)
is_crowd = np.asarray([obj['iscrowd'] for obj in valid_objs], dtype='int8')
# add the keys
img['boxes'] = boxes # nx4
img['class'] = cls # n, always >0
img['is_crowd'] = is_crowd # n,
if add_mask:
# also required to be float32
img['segmentation'] = [
obj['segmentation'] for obj in valid_objs] |
def _process_items(cls, vals):
"Processes list of items assigning unique paths to each."
if type(vals) is cls:
return vals.data
elif not isinstance(vals, (list, tuple)):
vals = [vals]
items = []
counts = defaultdict(lambda: 1)
cls._unpack_paths(vals, items, counts)
items = cls._deduplicate_items(items)
return items | Processes list of items assigning unique paths to each. | Below is the the instruction that describes the task:
### Input:
Processes list of items assigning unique paths to each.
### Response:
def _process_items(cls, vals):
"Processes list of items assigning unique paths to each."
if type(vals) is cls:
return vals.data
elif not isinstance(vals, (list, tuple)):
vals = [vals]
items = []
counts = defaultdict(lambda: 1)
cls._unpack_paths(vals, items, counts)
items = cls._deduplicate_items(items)
return items |
def highlight(text: str, color_code: int, bold: bool=False) -> str:
"""Wraps the given string with terminal color codes.
Args:
text: The content to highlight.
color_code: The color to highlight with, e.g. 'shelltools.RED'.
bold: Whether to bold the content in addition to coloring.
Returns:
The highlighted string.
"""
return '{}\033[{}m{}\033[0m'.format(
'\033[1m' if bold else '',
color_code,
text,) | Wraps the given string with terminal color codes.
Args:
text: The content to highlight.
color_code: The color to highlight with, e.g. 'shelltools.RED'.
bold: Whether to bold the content in addition to coloring.
Returns:
The highlighted string. | Below is the the instruction that describes the task:
### Input:
Wraps the given string with terminal color codes.
Args:
text: The content to highlight.
color_code: The color to highlight with, e.g. 'shelltools.RED'.
bold: Whether to bold the content in addition to coloring.
Returns:
The highlighted string.
### Response:
def highlight(text: str, color_code: int, bold: bool=False) -> str:
"""Wraps the given string with terminal color codes.
Args:
text: The content to highlight.
color_code: The color to highlight with, e.g. 'shelltools.RED'.
bold: Whether to bold the content in addition to coloring.
Returns:
The highlighted string.
"""
return '{}\033[{}m{}\033[0m'.format(
'\033[1m' if bold else '',
color_code,
text,) |
def check(
state,
unused=False,
style=False,
ignore=None,
args=None,
**kwargs
):
"""Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile."""
from ..core import do_check
do_check(
three=state.three,
python=state.python,
system=state.system,
unused=unused,
ignore=ignore,
args=args,
pypi_mirror=state.pypi_mirror,
) | Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile. | Below is the the instruction that describes the task:
### Input:
Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile.
### Response:
def check(
state,
unused=False,
style=False,
ignore=None,
args=None,
**kwargs
):
"""Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile."""
from ..core import do_check
do_check(
three=state.three,
python=state.python,
system=state.system,
unused=unused,
ignore=ignore,
args=args,
pypi_mirror=state.pypi_mirror,
) |
def digest(self, elimseq=False, notrunc=False):
"""
Obtain the fuzzy hash.
This operation does not change the state at all. It reports the hash
for the concatenation of the data previously fed using update().
:return: The fuzzy hash
:rtype: String
:raises InternalError: If lib returns an internal error
"""
if self._state == ffi.NULL:
raise InternalError("State object is NULL")
flags = (binding.lib.FUZZY_FLAG_ELIMSEQ if elimseq else 0) | \
(binding.lib.FUZZY_FLAG_NOTRUNC if notrunc else 0)
result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT)
if binding.lib.fuzzy_digest(self._state, result, flags) != 0:
raise InternalError("Function returned an unexpected error code")
return ffi.string(result).decode("ascii") | Obtain the fuzzy hash.
This operation does not change the state at all. It reports the hash
for the concatenation of the data previously fed using update().
:return: The fuzzy hash
:rtype: String
:raises InternalError: If lib returns an internal error | Below is the the instruction that describes the task:
### Input:
Obtain the fuzzy hash.
This operation does not change the state at all. It reports the hash
for the concatenation of the data previously fed using update().
:return: The fuzzy hash
:rtype: String
:raises InternalError: If lib returns an internal error
### Response:
def digest(self, elimseq=False, notrunc=False):
"""
Obtain the fuzzy hash.
This operation does not change the state at all. It reports the hash
for the concatenation of the data previously fed using update().
:return: The fuzzy hash
:rtype: String
:raises InternalError: If lib returns an internal error
"""
if self._state == ffi.NULL:
raise InternalError("State object is NULL")
flags = (binding.lib.FUZZY_FLAG_ELIMSEQ if elimseq else 0) | \
(binding.lib.FUZZY_FLAG_NOTRUNC if notrunc else 0)
result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT)
if binding.lib.fuzzy_digest(self._state, result, flags) != 0:
raise InternalError("Function returned an unexpected error code")
return ffi.string(result).decode("ascii") |
def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images. | Below is the the instruction that describes the task:
### Input:
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
### Response:
def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) |
def revoke_admin():
"""Form submission handler for revoking admin access to a build."""
build = g.build
form = forms.RemoveAdminForm()
if form.validate_on_submit():
user = models.User.query.get(form.user_id.data)
if not user:
logging.debug('User being revoked admin access does not exist.'
'id=%r, build_id=%r', form.user_id.data, build.id)
abort(400)
if user == current_user:
logging.debug('User trying to remove themself as admin. '
'id=%r, build_id=%r', user.id, build.id)
abort(400)
db.session.add(build)
db.session.add(user)
db.session.refresh(build, lockmode='update')
db.session.refresh(user, lockmode='update')
user_is_owner = build.owners.filter_by(id=user.id)
if not user_is_owner:
logging.debug('User being revoked admin access is not owner. '
'id=%r, build_id=%r.', user.id, build.id)
abort(400)
build.owners.remove(user)
save_admin_log(build, revoked_admin=True, message=user.email_address)
db.session.commit()
operations.UserOps(user.get_id()).evict()
return redirect(url_for('manage_admins', build_id=build.id)) | Form submission handler for revoking admin access to a build. | Below is the the instruction that describes the task:
### Input:
Form submission handler for revoking admin access to a build.
### Response:
def revoke_admin():
"""Form submission handler for revoking admin access to a build."""
build = g.build
form = forms.RemoveAdminForm()
if form.validate_on_submit():
user = models.User.query.get(form.user_id.data)
if not user:
logging.debug('User being revoked admin access does not exist.'
'id=%r, build_id=%r', form.user_id.data, build.id)
abort(400)
if user == current_user:
logging.debug('User trying to remove themself as admin. '
'id=%r, build_id=%r', user.id, build.id)
abort(400)
db.session.add(build)
db.session.add(user)
db.session.refresh(build, lockmode='update')
db.session.refresh(user, lockmode='update')
user_is_owner = build.owners.filter_by(id=user.id)
if not user_is_owner:
logging.debug('User being revoked admin access is not owner. '
'id=%r, build_id=%r.', user.id, build.id)
abort(400)
build.owners.remove(user)
save_admin_log(build, revoked_admin=True, message=user.email_address)
db.session.commit()
operations.UserOps(user.get_id()).evict()
return redirect(url_for('manage_admins', build_id=build.id)) |
def check_array_or_list(input):
"""Return 1D ndarray, if input can be converted and elements are
non-negative."""
if type(input) != np.ndarray:
if type(input) == list:
output = np.array(input)
else:
raise TypeError('Expecting input type as ndarray or list.')
else:
output = input
if output.ndim != 1:
raise ValueError('Input array must have 1 dimension.')
if np.sum(output < 0.) > 0:
raise ValueError("Input array values cannot be negative.")
return output | Return 1D ndarray, if input can be converted and elements are
non-negative. | Below is the the instruction that describes the task:
### Input:
Return 1D ndarray, if input can be converted and elements are
non-negative.
### Response:
def check_array_or_list(input):
"""Return 1D ndarray, if input can be converted and elements are
non-negative."""
if type(input) != np.ndarray:
if type(input) == list:
output = np.array(input)
else:
raise TypeError('Expecting input type as ndarray or list.')
else:
output = input
if output.ndim != 1:
raise ValueError('Input array must have 1 dimension.')
if np.sum(output < 0.) > 0:
raise ValueError("Input array values cannot be negative.")
return output |
def _clone_reverses(self, old_reverses):
"""
Clones all the objects that were previously gathered.
"""
for ctype, reverses in old_reverses.items():
for parts in reverses.values():
sub_objs = parts[1]
field_name = parts[0]
attrs = {}
for sub_obj in sub_objs:
if ctype != 'm2m' and not attrs:
field = sub_obj._meta.get_field(field_name)
attrs = {
field.column: getattr(self, field.rel.field_name)
}
sub_obj._clone(**attrs)
if ctype == 'm2m':
setattr(self, field_name, sub_objs) | Clones all the objects that were previously gathered. | Below is the the instruction that describes the task:
### Input:
Clones all the objects that were previously gathered.
### Response:
def _clone_reverses(self, old_reverses):
"""
Clones all the objects that were previously gathered.
"""
for ctype, reverses in old_reverses.items():
for parts in reverses.values():
sub_objs = parts[1]
field_name = parts[0]
attrs = {}
for sub_obj in sub_objs:
if ctype != 'm2m' and not attrs:
field = sub_obj._meta.get_field(field_name)
attrs = {
field.column: getattr(self, field.rel.field_name)
}
sub_obj._clone(**attrs)
if ctype == 'm2m':
setattr(self, field_name, sub_objs) |
def asarray_ndim(a, *ndims, **kwargs):
"""Ensure numpy array.
Parameters
----------
a : array_like
*ndims : int, optional
Allowed values for number of dimensions.
**kwargs
Passed through to :func:`numpy.array`.
Returns
-------
a : numpy.ndarray
"""
allow_none = kwargs.pop('allow_none', False)
kwargs.setdefault('copy', False)
if a is None and allow_none:
return None
a = np.array(a, **kwargs)
if a.ndim not in ndims:
if len(ndims) > 1:
expect_str = 'one of %s' % str(ndims)
else:
# noinspection PyUnresolvedReferences
expect_str = '%s' % ndims[0]
raise TypeError('bad number of dimensions: expected %s; found %s' %
(expect_str, a.ndim))
return a | Ensure numpy array.
Parameters
----------
a : array_like
*ndims : int, optional
Allowed values for number of dimensions.
**kwargs
Passed through to :func:`numpy.array`.
Returns
-------
a : numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Ensure numpy array.
Parameters
----------
a : array_like
*ndims : int, optional
Allowed values for number of dimensions.
**kwargs
Passed through to :func:`numpy.array`.
Returns
-------
a : numpy.ndarray
### Response:
def asarray_ndim(a, *ndims, **kwargs):
"""Ensure numpy array.
Parameters
----------
a : array_like
*ndims : int, optional
Allowed values for number of dimensions.
**kwargs
Passed through to :func:`numpy.array`.
Returns
-------
a : numpy.ndarray
"""
allow_none = kwargs.pop('allow_none', False)
kwargs.setdefault('copy', False)
if a is None and allow_none:
return None
a = np.array(a, **kwargs)
if a.ndim not in ndims:
if len(ndims) > 1:
expect_str = 'one of %s' % str(ndims)
else:
# noinspection PyUnresolvedReferences
expect_str = '%s' % ndims[0]
raise TypeError('bad number of dimensions: expected %s; found %s' %
(expect_str, a.ndim))
return a |
def __connect_to_bus(self, bus):
"""
Attempt to connect to an I2C bus
"""
def connect(bus_num):
try:
self.log.debug("Attempting to connect to bus %s..." % bus_num)
self.bus = smbus.SMBus(bus_num)
self.log.debug("Success")
except IOError:
self.log.debug("Failed")
raise
# If the bus is not explicitly stated, try 0 and then try 1 if that
# fails
if bus is None:
try:
connect(0)
return
except IOError:
pass
try:
connect(1)
return
except IOError:
raise
else:
try:
connect(bus)
return
except IOError:
raise | Attempt to connect to an I2C bus | Below is the the instruction that describes the task:
### Input:
Attempt to connect to an I2C bus
### Response:
def __connect_to_bus(self, bus):
"""
Attempt to connect to an I2C bus
"""
def connect(bus_num):
try:
self.log.debug("Attempting to connect to bus %s..." % bus_num)
self.bus = smbus.SMBus(bus_num)
self.log.debug("Success")
except IOError:
self.log.debug("Failed")
raise
# If the bus is not explicitly stated, try 0 and then try 1 if that
# fails
if bus is None:
try:
connect(0)
return
except IOError:
pass
try:
connect(1)
return
except IOError:
raise
else:
try:
connect(bus)
return
except IOError:
raise |
def checkReszie(self):
'''Checks if the window was resized.'''
if not self.resized:
oldypx = self.ypx
oldxpx = self.xpx
self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi
self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi
if (oldypx != self.ypx) or (oldxpx != self.xpx):
self.resized = True
else:
self.resized = False | Checks if the window was resized. | Below is the the instruction that describes the task:
### Input:
Checks if the window was resized.
### Response:
def checkReszie(self):
'''Checks if the window was resized.'''
if not self.resized:
oldypx = self.ypx
oldxpx = self.xpx
self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi
self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi
if (oldypx != self.ypx) or (oldxpx != self.xpx):
self.resized = True
else:
self.resized = False |
def language(self, value):
"""
Setter for **self.__language** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"language", value)
self.__language = value | Setter for **self.__language** attribute.
:param value: Attribute value.
:type value: unicode | Below is the the instruction that describes the task:
### Input:
Setter for **self.__language** attribute.
:param value: Attribute value.
:type value: unicode
### Response:
def language(self, value):
"""
Setter for **self.__language** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"language", value)
self.__language = value |
def fetch_pcr(*args, **kwargs):
"""Wrapper for fetch to automatically parse results from the PCR API."""
# Load user's token from `PCR_AUTH_TOKEN`, use public token as default if missing
kwargs['token'] = os.getenv("PCR_AUTH_TOKEN", "public")
return fetch(DOMAIN, *args, **kwargs)['result'] | Wrapper for fetch to automatically parse results from the PCR API. | Below is the the instruction that describes the task:
### Input:
Wrapper for fetch to automatically parse results from the PCR API.
### Response:
def fetch_pcr(*args, **kwargs):
"""Wrapper for fetch to automatically parse results from the PCR API."""
# Load user's token from `PCR_AUTH_TOKEN`, use public token as default if missing
kwargs['token'] = os.getenv("PCR_AUTH_TOKEN", "public")
return fetch(DOMAIN, *args, **kwargs)['result'] |
def main(args=sys.argv[1:]):
"""Generate EO O&M XML metadata."""
parser = argparse.ArgumentParser()
parser.add_argument("filename", nargs=1)
parser.add_argument("--granule-id", dest="granule_id",
help=(
"Optional. Specify a granule to export metadata from."
)
)
parser.add_argument("--single-granule", dest="single_granule",
action="store_true", default=False,
help=(
"When only one granule is contained in the package, include product "
"metadata from this one granule. Fails when more than one granule "
"is contained."
)
)
parser.add_argument("--out-file", "-f", dest="out_file",
help=(
"Specify an output file to write the metadata to. By default, the "
"XML is printed on stdout."
)
)
parser.add_argument("--resolution", "-r", dest="resolution", default="10",
help=(
"Only produce metadata for bands of this resolution (in meters). "
"Default is 10."
)
)
parsed = parser.parse_args(args)
try:
safe_pkg = s2reader.open(parsed.filename[0])
except IOError, e:
parser.error('Could not open SAFE package. Error was "%s"' % e)
granules = safe_pkg.granules
granule = None
if parsed.granule_id:
granule_dict = dict(
(granule.granule_identifier, granule) for granule in granules
)
try:
granule = granule_dict[parsed.granule_id]
except KeyError:
parser.error('No such granule %r' % parsed.granule_id)
elif parsed.single_granule:
if len(granules) > 1:
parser.error('Package contains more than one granule.')
granule = granules[0]
params = _get_product_template_params(safe_pkg, parsed.resolution)
if granule:
params.update(_get_granule_template_params(granule, parsed.resolution))
xml_string = EOOM_TEMPLATE_GRANULE.format(**params)
else:
xml_string = EOOM_TEMPLATE_PRODUCT.format(**params)
if parsed.out_file:
with open(parsed.out_file, "w") as f:
f.write(xml_string)
else:
print(xml_string) | Generate EO O&M XML metadata. | Below is the the instruction that describes the task:
### Input:
Generate EO O&M XML metadata.
### Response:
def main(args=sys.argv[1:]):
"""Generate EO O&M XML metadata."""
parser = argparse.ArgumentParser()
parser.add_argument("filename", nargs=1)
parser.add_argument("--granule-id", dest="granule_id",
help=(
"Optional. Specify a granule to export metadata from."
)
)
parser.add_argument("--single-granule", dest="single_granule",
action="store_true", default=False,
help=(
"When only one granule is contained in the package, include product "
"metadata from this one granule. Fails when more than one granule "
"is contained."
)
)
parser.add_argument("--out-file", "-f", dest="out_file",
help=(
"Specify an output file to write the metadata to. By default, the "
"XML is printed on stdout."
)
)
parser.add_argument("--resolution", "-r", dest="resolution", default="10",
help=(
"Only produce metadata for bands of this resolution (in meters). "
"Default is 10."
)
)
parsed = parser.parse_args(args)
try:
safe_pkg = s2reader.open(parsed.filename[0])
except IOError, e:
parser.error('Could not open SAFE package. Error was "%s"' % e)
granules = safe_pkg.granules
granule = None
if parsed.granule_id:
granule_dict = dict(
(granule.granule_identifier, granule) for granule in granules
)
try:
granule = granule_dict[parsed.granule_id]
except KeyError:
parser.error('No such granule %r' % parsed.granule_id)
elif parsed.single_granule:
if len(granules) > 1:
parser.error('Package contains more than one granule.')
granule = granules[0]
params = _get_product_template_params(safe_pkg, parsed.resolution)
if granule:
params.update(_get_granule_template_params(granule, parsed.resolution))
xml_string = EOOM_TEMPLATE_GRANULE.format(**params)
else:
xml_string = EOOM_TEMPLATE_PRODUCT.format(**params)
if parsed.out_file:
with open(parsed.out_file, "w") as f:
f.write(xml_string)
else:
print(xml_string) |
def load_children(self):
"""
Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child.
"""
# Containers
children = list()
statuses = list()
version = None
titles = list()
descriptions = list()
platforms = list()
idents = list()
# Element load
for element in self.xml_element:
uri, tag = Element.get_namespace_and_tag(element.tag)
if tag == 'version':
if version is None:
version = Version(element)
else:
error_msg = 'version element found more than once'
raise CardinalityException(error_msg)
elif tag == 'status':
statuses.append(Status(element))
elif tag == 'title':
titles.append(Title(element))
elif tag == 'description':
descriptions.append(Description(element))
elif tag == 'platform':
platforms.append(Platform(element))
elif tag == 'ident':
idents.append(Ident(element))
# List construction
children.extend(statuses)
if version is not None:
children.append(version)
children.extend(titles)
children.extend(descriptions)
children.extend(platforms)
children.extend(idents)
return children | Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child. | Below is the the instruction that describes the task:
### Input:
Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child.
### Response:
def load_children(self):
"""
Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child.
"""
# Containers
children = list()
statuses = list()
version = None
titles = list()
descriptions = list()
platforms = list()
idents = list()
# Element load
for element in self.xml_element:
uri, tag = Element.get_namespace_and_tag(element.tag)
if tag == 'version':
if version is None:
version = Version(element)
else:
error_msg = 'version element found more than once'
raise CardinalityException(error_msg)
elif tag == 'status':
statuses.append(Status(element))
elif tag == 'title':
titles.append(Title(element))
elif tag == 'description':
descriptions.append(Description(element))
elif tag == 'platform':
platforms.append(Platform(element))
elif tag == 'ident':
idents.append(Ident(element))
# List construction
children.extend(statuses)
if version is not None:
children.append(version)
children.extend(titles)
children.extend(descriptions)
children.extend(platforms)
children.extend(idents)
return children |
def get_chat_member(self, *args, **kwargs):
"""See :func:`get_chat_member`"""
return get_chat_member(*args, **self._merge_overrides(**kwargs)).run() | See :func:`get_chat_member` | Below is the the instruction that describes the task:
### Input:
See :func:`get_chat_member`
### Response:
def get_chat_member(self, *args, **kwargs):
"""See :func:`get_chat_member`"""
return get_chat_member(*args, **self._merge_overrides(**kwargs)).run() |
def find_contours_level(density, x, y, level, closed=False):
"""Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used
"""
if level >= 1 or level <= 0:
raise ValueError("`level` must be in (0,1), got '{}'!".format(level))
# level relative to maximum
level = level * density.max()
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
if closed:
# find closed contours
density = np.pad(density, ((1, 1), (1, 1)), mode="constant")
offset = 1
else:
# leave contours open at kde boundary
offset = 0
conts_idx = find_contours(density, level)
conts_xy = []
for cc in conts_idx:
cx = np.interp(x=cc[:, 0]-offset,
xp=range(x.size),
fp=x)
cy = np.interp(x=cc[:, 1]-offset,
xp=range(y.size),
fp=y)
conts_xy.append(np.stack((cx, cy), axis=1))
return conts_xy | Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used | Below is the the instruction that describes the task:
### Input:
Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used
### Response:
def find_contours_level(density, x, y, level, closed=False):
"""Find iso-valued density contours for a given level value
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
level: float between 0 and 1
Value along which to find contours in `kde` relative
to its maximum kde
Returns
-------
contours: list of ndarrays of shape (P, 2)
Contours found for the given level value
See Also
--------
skimage.measure.find_contours: Contour finding algorithm used
"""
if level >= 1 or level <= 0:
raise ValueError("`level` must be in (0,1), got '{}'!".format(level))
# level relative to maximum
level = level * density.max()
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
if closed:
# find closed contours
density = np.pad(density, ((1, 1), (1, 1)), mode="constant")
offset = 1
else:
# leave contours open at kde boundary
offset = 0
conts_idx = find_contours(density, level)
conts_xy = []
for cc in conts_idx:
cx = np.interp(x=cc[:, 0]-offset,
xp=range(x.size),
fp=x)
cy = np.interp(x=cc[:, 1]-offset,
xp=range(y.size),
fp=y)
conts_xy.append(np.stack((cx, cy), axis=1))
return conts_xy |
def compute_interpolator(self, ikwargs={}):
"""
Compute/define the interpolating spline. This function can be overriden
in a subclass to define custom interpolators.
Parameters
----------
ikwargs : dict, optional
Additional optional keyword arguments. Possible values are:
- **degree** : int, tuple, optional
Degree of the interpolating spline. A tuple can be used to
provide different degrees for the X- and Y-axes.
Default value is degree=3.
- **s** : float, optional
Non-negative smoothing factor. Default value s=0 corresponds to
interpolation.
See :py:class:`~scipy.interpolate.RectBivariateSpline` for more
details.
Notes
-----
* When subclassing :py:class:`FittableImageModel` for the
purpose of overriding :py:func:`compute_interpolator`,
the :py:func:`evaluate` may need to overriden as well depending
on the behavior of the new interpolator. In addition, for
improved future compatibility, make sure
that the overriding method stores keyword arguments ``ikwargs``
by calling ``_store_interpolator_kwargs`` method.
* Use caution when modifying interpolator's degree or smoothness in
a computationally intensive part of the code as it may decrease
code performance due to the need to recompute interpolator.
"""
from scipy.interpolate import RectBivariateSpline
if 'degree' in ikwargs:
degree = ikwargs['degree']
if hasattr(degree, '__iter__') and len(degree) == 2:
degx = int(degree[0])
degy = int(degree[1])
else:
degx = int(degree)
degy = int(degree)
if degx < 0 or degy < 0:
raise ValueError("Interpolator degree must be a non-negative "
"integer")
else:
degx = 3
degy = 3
if 's' in ikwargs:
smoothness = ikwargs['s']
else:
smoothness = 0
x = np.arange(self._nx, dtype=np.float)
y = np.arange(self._ny, dtype=np.float)
self.interpolator = RectBivariateSpline(
x, y, self._data.T, kx=degx, ky=degy, s=smoothness
)
self._store_interpolator_kwargs(ikwargs) | Compute/define the interpolating spline. This function can be overriden
in a subclass to define custom interpolators.
Parameters
----------
ikwargs : dict, optional
Additional optional keyword arguments. Possible values are:
- **degree** : int, tuple, optional
Degree of the interpolating spline. A tuple can be used to
provide different degrees for the X- and Y-axes.
Default value is degree=3.
- **s** : float, optional
Non-negative smoothing factor. Default value s=0 corresponds to
interpolation.
See :py:class:`~scipy.interpolate.RectBivariateSpline` for more
details.
Notes
-----
* When subclassing :py:class:`FittableImageModel` for the
purpose of overriding :py:func:`compute_interpolator`,
the :py:func:`evaluate` may need to overriden as well depending
on the behavior of the new interpolator. In addition, for
improved future compatibility, make sure
that the overriding method stores keyword arguments ``ikwargs``
by calling ``_store_interpolator_kwargs`` method.
* Use caution when modifying interpolator's degree or smoothness in
a computationally intensive part of the code as it may decrease
code performance due to the need to recompute interpolator. | Below is the the instruction that describes the task:
### Input:
Compute/define the interpolating spline. This function can be overriden
in a subclass to define custom interpolators.
Parameters
----------
ikwargs : dict, optional
Additional optional keyword arguments. Possible values are:
- **degree** : int, tuple, optional
Degree of the interpolating spline. A tuple can be used to
provide different degrees for the X- and Y-axes.
Default value is degree=3.
- **s** : float, optional
Non-negative smoothing factor. Default value s=0 corresponds to
interpolation.
See :py:class:`~scipy.interpolate.RectBivariateSpline` for more
details.
Notes
-----
* When subclassing :py:class:`FittableImageModel` for the
purpose of overriding :py:func:`compute_interpolator`,
the :py:func:`evaluate` may need to overriden as well depending
on the behavior of the new interpolator. In addition, for
improved future compatibility, make sure
that the overriding method stores keyword arguments ``ikwargs``
by calling ``_store_interpolator_kwargs`` method.
* Use caution when modifying interpolator's degree or smoothness in
a computationally intensive part of the code as it may decrease
code performance due to the need to recompute interpolator.
### Response:
def compute_interpolator(self, ikwargs={}):
"""
Compute/define the interpolating spline. This function can be overriden
in a subclass to define custom interpolators.
Parameters
----------
ikwargs : dict, optional
Additional optional keyword arguments. Possible values are:
- **degree** : int, tuple, optional
Degree of the interpolating spline. A tuple can be used to
provide different degrees for the X- and Y-axes.
Default value is degree=3.
- **s** : float, optional
Non-negative smoothing factor. Default value s=0 corresponds to
interpolation.
See :py:class:`~scipy.interpolate.RectBivariateSpline` for more
details.
Notes
-----
* When subclassing :py:class:`FittableImageModel` for the
purpose of overriding :py:func:`compute_interpolator`,
the :py:func:`evaluate` may need to overriden as well depending
on the behavior of the new interpolator. In addition, for
improved future compatibility, make sure
that the overriding method stores keyword arguments ``ikwargs``
by calling ``_store_interpolator_kwargs`` method.
* Use caution when modifying interpolator's degree or smoothness in
a computationally intensive part of the code as it may decrease
code performance due to the need to recompute interpolator.
"""
from scipy.interpolate import RectBivariateSpline
if 'degree' in ikwargs:
degree = ikwargs['degree']
if hasattr(degree, '__iter__') and len(degree) == 2:
degx = int(degree[0])
degy = int(degree[1])
else:
degx = int(degree)
degy = int(degree)
if degx < 0 or degy < 0:
raise ValueError("Interpolator degree must be a non-negative "
"integer")
else:
degx = 3
degy = 3
if 's' in ikwargs:
smoothness = ikwargs['s']
else:
smoothness = 0
x = np.arange(self._nx, dtype=np.float)
y = np.arange(self._ny, dtype=np.float)
self.interpolator = RectBivariateSpline(
x, y, self._data.T, kx=degx, ky=degy, s=smoothness
)
self._store_interpolator_kwargs(ikwargs) |
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names. | Below is the the instruction that describes the task:
### Input:
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
### Response:
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) |
def calcFashionEvoFunc(pNow):
'''
Calculates a new approximate dynamic rule for the evolution of the proportion
of punks as a linear function and a "shock width".
Parameters
----------
pNow : [float]
List describing the history of the proportion of punks in the population.
Returns
-------
(unnamed) : FashionEvoFunc
A new rule for the evolution of the population punk proportion, based on
the history in input pNow.
'''
pNowX = np.array(pNow)
T = pNowX.size
p_t = pNowX[100:(T-1)]
p_tp1 = pNowX[101:T]
pNextSlope, pNextIntercept, trash1, trash2, trash3 = stats.linregress(p_t,p_tp1)
pPopExp = pNextIntercept + pNextSlope*p_t
pPopErrSq= (pPopExp - p_tp1)**2
pNextStd = np.sqrt(np.mean(pPopErrSq))
print(str(pNextIntercept) + ', ' + str(pNextSlope) + ', ' + str(pNextStd))
return FashionEvoFunc(pNextIntercept,pNextSlope,2*pNextStd) | Calculates a new approximate dynamic rule for the evolution of the proportion
of punks as a linear function and a "shock width".
Parameters
----------
pNow : [float]
List describing the history of the proportion of punks in the population.
Returns
-------
(unnamed) : FashionEvoFunc
A new rule for the evolution of the population punk proportion, based on
the history in input pNow. | Below is the the instruction that describes the task:
### Input:
Calculates a new approximate dynamic rule for the evolution of the proportion
of punks as a linear function and a "shock width".
Parameters
----------
pNow : [float]
List describing the history of the proportion of punks in the population.
Returns
-------
(unnamed) : FashionEvoFunc
A new rule for the evolution of the population punk proportion, based on
the history in input pNow.
### Response:
def calcFashionEvoFunc(pNow):
'''
Calculates a new approximate dynamic rule for the evolution of the proportion
of punks as a linear function and a "shock width".
Parameters
----------
pNow : [float]
List describing the history of the proportion of punks in the population.
Returns
-------
(unnamed) : FashionEvoFunc
A new rule for the evolution of the population punk proportion, based on
the history in input pNow.
'''
pNowX = np.array(pNow)
T = pNowX.size
p_t = pNowX[100:(T-1)]
p_tp1 = pNowX[101:T]
pNextSlope, pNextIntercept, trash1, trash2, trash3 = stats.linregress(p_t,p_tp1)
pPopExp = pNextIntercept + pNextSlope*p_t
pPopErrSq= (pPopExp - p_tp1)**2
pNextStd = np.sqrt(np.mean(pPopErrSq))
print(str(pNextIntercept) + ', ' + str(pNextSlope) + ', ' + str(pNextStd))
return FashionEvoFunc(pNextIntercept,pNextSlope,2*pNextStd) |
def run(self):
"Run cli, processing arguments and executing subcommands."
arguments = self.argument_parser.parse_args()
argspec = inspect.getargspec(arguments.func)
vargs = []
for arg in argspec.args:
vargs.append(getattr(arguments, arg))
if argspec.varargs:
vargs.extend(getattr(arguments, argspec.varargs))
output = arguments.func(*vargs)
if getattr(arguments.func, '_cli_test_command', False):
self.exit_code = 0 if output else 1
output = ''
if getattr(arguments.func, '_cli_no_output', False):
output = ''
self.formatter.format_output(output, arguments.format)
if charmhelpers.core.unitdata._KV:
charmhelpers.core.unitdata._KV.flush() | Run cli, processing arguments and executing subcommands. | Below is the the instruction that describes the task:
### Input:
Run cli, processing arguments and executing subcommands.
### Response:
def run(self):
"Run cli, processing arguments and executing subcommands."
arguments = self.argument_parser.parse_args()
argspec = inspect.getargspec(arguments.func)
vargs = []
for arg in argspec.args:
vargs.append(getattr(arguments, arg))
if argspec.varargs:
vargs.extend(getattr(arguments, argspec.varargs))
output = arguments.func(*vargs)
if getattr(arguments.func, '_cli_test_command', False):
self.exit_code = 0 if output else 1
output = ''
if getattr(arguments.func, '_cli_no_output', False):
output = ''
self.formatter.format_output(output, arguments.format)
if charmhelpers.core.unitdata._KV:
charmhelpers.core.unitdata._KV.flush() |
def print_logins(logins):
"""Prints out the login history for a user"""
table = formatting.Table(['Date', 'IP Address', 'Successufl Login?'])
for login in logins:
table.add_row([login.get('createDate'), login.get('ipAddress'), login.get('successFlag')])
return table | Prints out the login history for a user | Below is the the instruction that describes the task:
### Input:
Prints out the login history for a user
### Response:
def print_logins(logins):
"""Prints out the login history for a user"""
table = formatting.Table(['Date', 'IP Address', 'Successufl Login?'])
for login in logins:
table.add_row([login.get('createDate'), login.get('ipAddress'), login.get('successFlag')])
return table |
def nvmlDeviceGetIndex(handle):
r"""
/**
* Retrieves the NVML index of this device.
*
* For all products.
*
* Valid indices are derived from the \a accessibleDevices count returned by
* \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices
* are 0 and 1, corresponding to GPU 0 and GPU 1.
*
* The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
* is recommended that devices be looked up by their PCI ids or GPU UUID. See
* \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID().
*
* Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
*
* @param device The identifier of the target device
* @param index Reference in which to return the NVML index of the device
*
* @return
* - \ref NVML_SUCCESS if \a index has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetHandleByIndex()
* @see nvmlDeviceGetCount()
*/
nvmlReturn_t DECLDIR nvmlDeviceGetIndex
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceGetIndex")
c_index = c_uint()
ret = fn(handle, byref(c_index))
_nvmlCheckReturn(ret)
return bytes_to_str(c_index.value) | r"""
/**
* Retrieves the NVML index of this device.
*
* For all products.
*
* Valid indices are derived from the \a accessibleDevices count returned by
* \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices
* are 0 and 1, corresponding to GPU 0 and GPU 1.
*
* The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
* is recommended that devices be looked up by their PCI ids or GPU UUID. See
* \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID().
*
* Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
*
* @param device The identifier of the target device
* @param index Reference in which to return the NVML index of the device
*
* @return
* - \ref NVML_SUCCESS if \a index has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetHandleByIndex()
* @see nvmlDeviceGetCount()
*/
nvmlReturn_t DECLDIR nvmlDeviceGetIndex | Below is the the instruction that describes the task:
### Input:
r"""
/**
* Retrieves the NVML index of this device.
*
* For all products.
*
* Valid indices are derived from the \a accessibleDevices count returned by
* \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices
* are 0 and 1, corresponding to GPU 0 and GPU 1.
*
* The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
* is recommended that devices be looked up by their PCI ids or GPU UUID. See
* \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID().
*
* Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
*
* @param device The identifier of the target device
* @param index Reference in which to return the NVML index of the device
*
* @return
* - \ref NVML_SUCCESS if \a index has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetHandleByIndex()
* @see nvmlDeviceGetCount()
*/
nvmlReturn_t DECLDIR nvmlDeviceGetIndex
### Response:
def nvmlDeviceGetIndex(handle):
r"""
/**
* Retrieves the NVML index of this device.
*
* For all products.
*
* Valid indices are derived from the \a accessibleDevices count returned by
* \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices
* are 0 and 1, corresponding to GPU 0 and GPU 1.
*
* The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it
* is recommended that devices be looked up by their PCI ids or GPU UUID. See
* \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID().
*
* Note: The NVML index may not correlate with other APIs, such as the CUDA device index.
*
* @param device The identifier of the target device
* @param index Reference in which to return the NVML index of the device
*
* @return
* - \ref NVML_SUCCESS if \a index has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetHandleByIndex()
* @see nvmlDeviceGetCount()
*/
nvmlReturn_t DECLDIR nvmlDeviceGetIndex
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceGetIndex")
c_index = c_uint()
ret = fn(handle, byref(c_index))
_nvmlCheckReturn(ret)
return bytes_to_str(c_index.value) |
def start_polling(dispatcher, *, loop=None, skip_updates=False, reset_webhook=True,
on_startup=None, on_shutdown=None, timeout=20, fast=True):
"""
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
"""
executor = Executor(dispatcher, skip_updates=skip_updates, loop=loop)
_setup_callbacks(executor, on_startup, on_shutdown)
executor.start_polling(reset_webhook=reset_webhook, timeout=timeout, fast=fast) | Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout: | Below is the the instruction that describes the task:
### Input:
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
### Response:
def start_polling(dispatcher, *, loop=None, skip_updates=False, reset_webhook=True,
on_startup=None, on_shutdown=None, timeout=20, fast=True):
"""
Start bot in long-polling mode
:param dispatcher:
:param loop:
:param skip_updates:
:param reset_webhook:
:param on_startup:
:param on_shutdown:
:param timeout:
"""
executor = Executor(dispatcher, skip_updates=skip_updates, loop=loop)
_setup_callbacks(executor, on_startup, on_shutdown)
executor.start_polling(reset_webhook=reset_webhook, timeout=timeout, fast=fast) |
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True) | Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)] | Below is the the instruction that describes the task:
### Input:
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
### Response:
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True) |
def intersect_exposure_and_aggregate_hazard(self):
"""This function intersects the exposure with the aggregate hazard.
If the the exposure is a continuous raster exposure, this function
will set the aggregate hazard layer.
However, this function will set the impact layer.
"""
LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard')
if is_raster_layer(self.exposure):
self.set_state_process(
'impact function',
'Zonal stats between exposure and aggregate hazard')
# Be careful, our own zonal stats will take care of different
# projections between the two layers. We don't want to reproject
# rasters.
# noinspection PyTypeChecker
self._aggregate_hazard_impacted = zonal_stats(
self.exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
self.set_state_process('impact function', 'Add default values')
self._aggregate_hazard_impacted = add_default_values(
self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
# I know it's redundant, it's just to be sure that we don't have
# any impact layer for that IF.
self._exposure_summary = None
else:
indivisible_keys = [f['key'] for f in indivisible_exposure]
geometry = self.exposure.geometryType()
exposure = self.exposure.keywords.get('exposure')
is_divisible = exposure not in indivisible_keys
if geometry in [
QgsWkbTypes.LineGeometry,
QgsWkbTypes.PolygonGeometry] and is_divisible:
self.set_state_process(
'exposure', 'Make exposure layer valid')
self._exposure = clean_layer(self.exposure)
self.debug_layer(self.exposure)
self.set_state_process(
'impact function', 'Make aggregate hazard layer valid')
self._aggregate_hazard_impacted = clean_layer(
self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
self.set_state_process(
'impact function',
'Intersect divisible features with the aggregate hazard')
self._exposure_summary = intersection(
self._exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._exposure_summary)
# If the layer has the size field, it means we need to
# recompute counts based on the old and new size.
fields = self._exposure_summary.keywords['inasafe_fields']
if size_field['key'] in fields:
self.set_state_process(
'impact function',
'Recompute counts')
LOGGER.info(
'InaSAFE will not use these counts, as we have ratios '
'since the exposure preparation step.')
self._exposure_summary = recompute_counts(
self._exposure_summary)
self.debug_layer(self._exposure_summary)
else:
self.set_state_process(
'impact function',
'Highest class of hazard is assigned to the exposure')
self._exposure_summary = assign_highest_value(
self._exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._exposure_summary)
# set title using definition
# the title will be overwritten anyway by standard title
# set this as fallback.
self._exposure_summary.keywords['title'] = (
layer_purpose_exposure_summary['name'])
if qgis_version() >= 21800:
self._exposure_summary.setName(
self._exposure_summary.keywords['title'])
else:
self._exposure_summary.setLayerName(
self._exposure_summary.keywords['title']) | This function intersects the exposure with the aggregate hazard.
If the the exposure is a continuous raster exposure, this function
will set the aggregate hazard layer.
However, this function will set the impact layer. | Below is the the instruction that describes the task:
### Input:
This function intersects the exposure with the aggregate hazard.
If the the exposure is a continuous raster exposure, this function
will set the aggregate hazard layer.
However, this function will set the impact layer.
### Response:
def intersect_exposure_and_aggregate_hazard(self):
"""This function intersects the exposure with the aggregate hazard.
If the the exposure is a continuous raster exposure, this function
will set the aggregate hazard layer.
However, this function will set the impact layer.
"""
LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard')
if is_raster_layer(self.exposure):
self.set_state_process(
'impact function',
'Zonal stats between exposure and aggregate hazard')
# Be careful, our own zonal stats will take care of different
# projections between the two layers. We don't want to reproject
# rasters.
# noinspection PyTypeChecker
self._aggregate_hazard_impacted = zonal_stats(
self.exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
self.set_state_process('impact function', 'Add default values')
self._aggregate_hazard_impacted = add_default_values(
self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
# I know it's redundant, it's just to be sure that we don't have
# any impact layer for that IF.
self._exposure_summary = None
else:
indivisible_keys = [f['key'] for f in indivisible_exposure]
geometry = self.exposure.geometryType()
exposure = self.exposure.keywords.get('exposure')
is_divisible = exposure not in indivisible_keys
if geometry in [
QgsWkbTypes.LineGeometry,
QgsWkbTypes.PolygonGeometry] and is_divisible:
self.set_state_process(
'exposure', 'Make exposure layer valid')
self._exposure = clean_layer(self.exposure)
self.debug_layer(self.exposure)
self.set_state_process(
'impact function', 'Make aggregate hazard layer valid')
self._aggregate_hazard_impacted = clean_layer(
self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
self.set_state_process(
'impact function',
'Intersect divisible features with the aggregate hazard')
self._exposure_summary = intersection(
self._exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._exposure_summary)
# If the layer has the size field, it means we need to
# recompute counts based on the old and new size.
fields = self._exposure_summary.keywords['inasafe_fields']
if size_field['key'] in fields:
self.set_state_process(
'impact function',
'Recompute counts')
LOGGER.info(
'InaSAFE will not use these counts, as we have ratios '
'since the exposure preparation step.')
self._exposure_summary = recompute_counts(
self._exposure_summary)
self.debug_layer(self._exposure_summary)
else:
self.set_state_process(
'impact function',
'Highest class of hazard is assigned to the exposure')
self._exposure_summary = assign_highest_value(
self._exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._exposure_summary)
# set title using definition
# the title will be overwritten anyway by standard title
# set this as fallback.
self._exposure_summary.keywords['title'] = (
layer_purpose_exposure_summary['name'])
if qgis_version() >= 21800:
self._exposure_summary.setName(
self._exposure_summary.keywords['title'])
else:
self._exposure_summary.setLayerName(
self._exposure_summary.keywords['title']) |
def distance(self,rng):
"""The distance between two ranges.
:param rng: another range
:type rng: GenomicRange
:returns: bases separting, 0 if overlapped or adjacent, -1 if on different chromsomes
:rtype: int
"""
if self.chr != rng.chr: return -1
c = self.cmp(rng)
if c == 0: return 0
if c < 0:
return rng.start - self.end-1
return self.start - rng.end-1 | The distance between two ranges.
:param rng: another range
:type rng: GenomicRange
:returns: bases separting, 0 if overlapped or adjacent, -1 if on different chromsomes
:rtype: int | Below is the the instruction that describes the task:
### Input:
The distance between two ranges.
:param rng: another range
:type rng: GenomicRange
:returns: bases separting, 0 if overlapped or adjacent, -1 if on different chromsomes
:rtype: int
### Response:
def distance(self,rng):
"""The distance between two ranges.
:param rng: another range
:type rng: GenomicRange
:returns: bases separting, 0 if overlapped or adjacent, -1 if on different chromsomes
:rtype: int
"""
if self.chr != rng.chr: return -1
c = self.cmp(rng)
if c == 0: return 0
if c < 0:
return rng.start - self.end-1
return self.start - rng.end-1 |
def iter_predict_proba(self, X, include_init=False):
"""Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage
"""
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64)
for y_pred in super().iter_predict(X, include_init=include_init):
if len(self.classes_) == 2:
probas[:, 1] = sigmoid(y_pred[:, 0])
probas[:, 0] = 1. - probas[:, 1]
else:
probas[:] = softmax(y_pred)
yield probas | Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage | Below is the the instruction that describes the task:
### Input:
Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage
### Response:
def iter_predict_proba(self, X, include_init=False):
"""Returns the predicted probabilities for ``X`` at every stage of the boosting procedure.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples, n_classes) containing the predicted
probabilities at each stage
"""
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
probas = np.empty(shape=(len(X), len(self.classes_)), dtype=np.float64)
for y_pred in super().iter_predict(X, include_init=include_init):
if len(self.classes_) == 2:
probas[:, 1] = sigmoid(y_pred[:, 0])
probas[:, 0] = 1. - probas[:, 1]
else:
probas[:] = softmax(y_pred)
yield probas |
def diet_adam_optimizer_params():
"""Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object.
"""
return hparam.HParams(
quantize=True, # use 16-bit fixed-point
quantization_scale=10.0 / tf.int16.max,
optimizer="DietAdam",
learning_rate=1.0,
learning_rate_warmup_steps=2000,
learning_rate_decay_scheme="noam", # "noam" or "none"
epsilon=1e-10,
beta1=0.0, # we can save memory if beta1=0
beta2=0.98,
factored_second_moment_accumulator=True, # this saves memory
) | Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object. | Below is the the instruction that describes the task:
### Input:
Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object.
### Response:
def diet_adam_optimizer_params():
"""Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object.
"""
return hparam.HParams(
quantize=True, # use 16-bit fixed-point
quantization_scale=10.0 / tf.int16.max,
optimizer="DietAdam",
learning_rate=1.0,
learning_rate_warmup_steps=2000,
learning_rate_decay_scheme="noam", # "noam" or "none"
epsilon=1e-10,
beta1=0.0, # we can save memory if beta1=0
beta2=0.98,
factored_second_moment_accumulator=True, # this saves memory
) |
def etd_ms_dict2xmlfile(filename, metadata_dict):
"""Create an ETD MS XML file."""
try:
f = open(filename, 'w')
f.write(generate_etd_ms_xml(metadata_dict).encode("utf-8"))
f.close()
except:
raise MetadataGeneratorException(
'Failed to create an XML file. Filename: %s' % (filename)
) | Create an ETD MS XML file. | Below is the the instruction that describes the task:
### Input:
Create an ETD MS XML file.
### Response:
def etd_ms_dict2xmlfile(filename, metadata_dict):
"""Create an ETD MS XML file."""
try:
f = open(filename, 'w')
f.write(generate_etd_ms_xml(metadata_dict).encode("utf-8"))
f.close()
except:
raise MetadataGeneratorException(
'Failed to create an XML file. Filename: %s' % (filename)
) |
def identifier_director(**kwargs):
"""Direct how to handle the identifier element."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
# Set default scheme if it is None or is not supplied.
scheme = kwargs.get('scheme') or 'http'
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', '')
# See if the ark and domain name were given.
if ark and qualifier == 'ark':
content = 'ark: %s' % ark
if domain_name and ark and qualifier == 'permalink':
# Create the permalink URL.
if not domain_name.endswith('/'):
domain_name += '/'
permalink_url = '%s://%s%s' % (scheme, domain_name, ark)
# Make sure it has a trailing slash.
if not permalink_url.endswith('/'):
permalink_url += '/'
content = permalink_url
else:
if qualifier:
content = '%s: %s' % (string.lower(qualifier), content)
return DCIdentifier(content=content) | Direct how to handle the identifier element. | Below is the the instruction that describes the task:
### Input:
Direct how to handle the identifier element.
### Response:
def identifier_director(**kwargs):
"""Direct how to handle the identifier element."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
# Set default scheme if it is None or is not supplied.
scheme = kwargs.get('scheme') or 'http'
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', '')
# See if the ark and domain name were given.
if ark and qualifier == 'ark':
content = 'ark: %s' % ark
if domain_name and ark and qualifier == 'permalink':
# Create the permalink URL.
if not domain_name.endswith('/'):
domain_name += '/'
permalink_url = '%s://%s%s' % (scheme, domain_name, ark)
# Make sure it has a trailing slash.
if not permalink_url.endswith('/'):
permalink_url += '/'
content = permalink_url
else:
if qualifier:
content = '%s: %s' % (string.lower(qualifier), content)
return DCIdentifier(content=content) |
def _process_feature_dbxref(self, limit):
"""
This is the mapping between the flybase features and external
repositories. Generally we want to leave the flybase feature id
as the primary identifier. But we need to make the equivalences/sameAs.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'feature_dbxref'))
LOG.info("processing feature_dbxref mappings")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(feature_dbxref_id, feature_id, dbxref_id, is_current) = line
# 431890 3091292 596211 t
# 2 9 55044 t
# 3 9 55045 t
# 437595 4551668 277309 t
# 437596 4551662 277307 t
if is_current == 'f':
# not sure what to do with it?
continue
feature_key = feature_id
if self.test_mode and int(feature_key) not in \
self.test_keys['gene'] + self.test_keys['allele']:
continue
if feature_key not in self.idhash['feature']:
# some features may not be found in the hash
# if they are "analysis features"
# LOG.debug("Feature %s not found in hash", feature_key)
continue
feature_id = self.idhash['feature'][feature_key]
dbxref_key = dbxref_id
dbxrefs = self.dbxrefs.get(dbxref_key)
if dbxrefs is not None:
for d in dbxrefs:
# need to filter based on db ?
# TODO make other species' identifiers primary??
# instead of flybase?
did = dbxrefs.get(d)
if did.endswith('&class=protein'):
did = did[0:len(dbxrefs)-15]
# don't make something sameAs itself
if did == feature_id:
continue
dlabel = self.label_hash.get(did)
if re.search(r'FB(gn|og)', feature_id):
# only want to add equivalences for fly things
if not re.match(r'OMIM', did):
# these are only omim diseases, not genes;
# we shouldn't be adding these here anyway
# model.addClassToGraph(did, dlabel)
# model.addXref(feature_id, did)
pass # True # that
elif did is not None and dlabel is not None \
and feature_id is not None:
model.addIndividualToGraph(did, dlabel)
model.addXref(feature_id, did)
line_counter += 1
if not self.test_mode \
and limit is not None and line_counter > limit:
break
# FIXME - some flybase genes are xrefed to OMIM diseases!!!!!!
# for example,
# FBog0000375495 xref to omim 601181 (gene)
# and 608033 (phenotype)
return | This is the mapping between the flybase features and external
repositories. Generally we want to leave the flybase feature id
as the primary identifier. But we need to make the equivalences/sameAs.
:param limit:
:return: | Below is the the instruction that describes the task:
### Input:
This is the mapping between the flybase features and external
repositories. Generally we want to leave the flybase feature id
as the primary identifier. But we need to make the equivalences/sameAs.
:param limit:
:return:
### Response:
def _process_feature_dbxref(self, limit):
"""
This is the mapping between the flybase features and external
repositories. Generally we want to leave the flybase feature id
as the primary identifier. But we need to make the equivalences/sameAs.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'feature_dbxref'))
LOG.info("processing feature_dbxref mappings")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(feature_dbxref_id, feature_id, dbxref_id, is_current) = line
# 431890 3091292 596211 t
# 2 9 55044 t
# 3 9 55045 t
# 437595 4551668 277309 t
# 437596 4551662 277307 t
if is_current == 'f':
# not sure what to do with it?
continue
feature_key = feature_id
if self.test_mode and int(feature_key) not in \
self.test_keys['gene'] + self.test_keys['allele']:
continue
if feature_key not in self.idhash['feature']:
# some features may not be found in the hash
# if they are "analysis features"
# LOG.debug("Feature %s not found in hash", feature_key)
continue
feature_id = self.idhash['feature'][feature_key]
dbxref_key = dbxref_id
dbxrefs = self.dbxrefs.get(dbxref_key)
if dbxrefs is not None:
for d in dbxrefs:
# need to filter based on db ?
# TODO make other species' identifiers primary??
# instead of flybase?
did = dbxrefs.get(d)
if did.endswith('&class=protein'):
did = did[0:len(dbxrefs)-15]
# don't make something sameAs itself
if did == feature_id:
continue
dlabel = self.label_hash.get(did)
if re.search(r'FB(gn|og)', feature_id):
# only want to add equivalences for fly things
if not re.match(r'OMIM', did):
# these are only omim diseases, not genes;
# we shouldn't be adding these here anyway
# model.addClassToGraph(did, dlabel)
# model.addXref(feature_id, did)
pass # True # that
elif did is not None and dlabel is not None \
and feature_id is not None:
model.addIndividualToGraph(did, dlabel)
model.addXref(feature_id, did)
line_counter += 1
if not self.test_mode \
and limit is not None and line_counter > limit:
break
# FIXME - some flybase genes are xrefed to OMIM diseases!!!!!!
# for example,
# FBog0000375495 xref to omim 601181 (gene)
# and 608033 (phenotype)
return |
def send_location(self, number, name, url, latitude, longitude):
"""
Send location message
:param str number: phone number with cc (country code)
:param str name: indentifier for the location
:param str url: location url
:param str longitude: location longitude
:param str latitude: location latitude
"""
location_message = LocationMediaMessageProtocolEntity(latitude, longitude, name, url, encoding="raw",
to=self.normalize_jid(number))
self.toLower(location_message)
return location_message | Send location message
:param str number: phone number with cc (country code)
:param str name: indentifier for the location
:param str url: location url
:param str longitude: location longitude
:param str latitude: location latitude | Below is the the instruction that describes the task:
### Input:
Send location message
:param str number: phone number with cc (country code)
:param str name: indentifier for the location
:param str url: location url
:param str longitude: location longitude
:param str latitude: location latitude
### Response:
def send_location(self, number, name, url, latitude, longitude):
"""
Send location message
:param str number: phone number with cc (country code)
:param str name: indentifier for the location
:param str url: location url
:param str longitude: location longitude
:param str latitude: location latitude
"""
location_message = LocationMediaMessageProtocolEntity(latitude, longitude, name, url, encoding="raw",
to=self.normalize_jid(number))
self.toLower(location_message)
return location_message |
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if text == "def" and token_type == tokenize.NAME:
# explicitly ignore function definitions, as oslo defines these
return
if (token_type == tokenize.NAME and
text in ["_", "_LI", "_LW", "_LE", "_LC"]):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(
start, "H701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(
start, "H701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(
start,
"H702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(
start,
"H702: Use bare string concatenation instead of +")
else:
raise LocalizationError(
start, "H702: Argument to _, _LI, _LW, _LC, or _LE "
"must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(
start, "H703: Multiple positional placeholders") | Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found. | Below is the the instruction that describes the task:
### Input:
Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
### Response:
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if text == "def" and token_type == tokenize.NAME:
# explicitly ignore function definitions, as oslo defines these
return
if (token_type == tokenize.NAME and
text in ["_", "_LI", "_LW", "_LE", "_LC"]):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(
start, "H701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(
start, "H701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(
start,
"H702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(
start,
"H702: Use bare string concatenation instead of +")
else:
raise LocalizationError(
start, "H702: Argument to _, _LI, _LW, _LC, or _LE "
"must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(
start, "H703: Multiple positional placeholders") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.