code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def from_ZNM(cls, Z, N, M, name=''):
"""
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
"""
df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M']
df.name = name
return cls(df=df, name=name)
|
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
|
def run_synthetic_SGLD():
"""Run synthetic SGLD"""
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in range(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax,
rescale_grad=X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
|
Run synthetic SGLD
|
def remove_notification_listener(self, notification_id):
""" Remove a previously added notification callback.
Args:
notification_id: The numeric id passed back from add_notification_listener
Returns:
The function returns boolean true if found and removed, false otherwise.
"""
for v in self.notifications.values():
toRemove = list(filter(lambda tup: tup[0] == notification_id, v))
if len(toRemove) > 0:
v.remove(toRemove[0])
return True
return False
|
Remove a previously added notification callback.
Args:
notification_id: The numeric id passed back from add_notification_listener
Returns:
The function returns boolean true if found and removed, false otherwise.
|
def space(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True,scale=None):
"""
Computes adequat binning for the dimension (on the values).
bins: number of bins or None
units: str or None
conversion_function: function to convert units to other units
resolution: step size or None
end_at_end: Boolean
only if `unit == 1`
whether or not the last point should be the last data point (True) or one after the last valid point (False)
scale: 'lin','log' or None
a spike container can also use 'unique', but not the LabelDimension itself!
if the LabelDimension.scale is 'unique', .bins() will return a linear spacing
"""
if scale in ['log'] or (scale is None and self.scale in ['log']):
return self.logspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=end_at_end)
return self.linspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=end_at_end)
|
Computes adequat binning for the dimension (on the values).
bins: number of bins or None
units: str or None
conversion_function: function to convert units to other units
resolution: step size or None
end_at_end: Boolean
only if `unit == 1`
whether or not the last point should be the last data point (True) or one after the last valid point (False)
scale: 'lin','log' or None
a spike container can also use 'unique', but not the LabelDimension itself!
if the LabelDimension.scale is 'unique', .bins() will return a linear spacing
|
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
|
Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
|
def grid_expansion_costs(network, without_generator_import=False):
"""
Calculates grid expansion costs for each reinforced transformer and line
in kEUR.
Attributes
----------
network : :class:`~.grid.network.Network`
without_generator_import : Boolean
If True excludes lines that were added in the generator import to
connect new generators to the grid from calculation of grid expansion
costs. Default: False.
Returns
-------
`pandas.DataFrame<dataframe>`
DataFrame containing type and costs plus in the case of lines the
line length and number of parallel lines of each reinforced
transformer and line. Index of the DataFrame is the respective object
that can either be a :class:`~.grid.components.Line` or a
:class:`~.grid.components.Transformer`. Columns are the following:
type: String
Transformer size or cable name
total_costs: float
Costs of equipment in kEUR. For lines the line length and number of
parallel lines is already included in the total costs.
quantity: int
For transformers quantity is always one, for lines it specifies the
number of parallel lines.
line_length: float
Length of line or in case of parallel lines all lines in km.
voltage_level : :obj:`str` {'lv' | 'mv' | 'mv/lv'}
Specifies voltage level the equipment is in.
mv_feeder : :class:`~.grid.components.Line`
First line segment of half-ring used to identify in which
feeder the grid expansion was conducted in.
Notes
-------
Total grid expansion costs can be obtained through
self.grid_expansion_costs.total_costs.sum().
"""
def _get_transformer_costs(transformer):
if isinstance(transformer.grid, LVGrid):
return float(network.config['costs_transformers']['lv'])
elif isinstance(transformer.grid, MVGrid):
return float(network.config['costs_transformers']['mv'])
def _get_line_costs(line, quantity):
# get voltage level
if isinstance(line.grid, LVGrid):
voltage_level = 'lv'
elif isinstance(line.grid, MVGrid):
voltage_level = 'mv'
else:
raise KeyError("Grid must be LVGrid or MVGrid.")
# get population density in people/km^2
# transform area to calculate area in km^2
projection = partial(
pyproj.transform,
pyproj.Proj(init='epsg:{}'.format(
int(network.config['geo']['srid']))),
pyproj.Proj(init='epsg:3035'))
sqm2sqkm = 1e6
population_density = (line.grid.grid_district['population'] /
(transform(projection,
line.grid.grid_district['geom']).area /
sqm2sqkm))
if population_density <= 500:
population_density = 'rural'
else:
population_density = 'urban'
# get costs from config
costs_cable = float(network.config['costs_cables']['{}_cable'.format(
voltage_level)])
costs_cable_earthwork = float(network.config['costs_cables'][
'{}_cable_incl_earthwork_{}'.format(
voltage_level,
population_density)])
return (costs_cable_earthwork * l.length +
costs_cable * l.length * (quantity - 1))
costs = pd.DataFrame()
if without_generator_import:
equipment_changes = network.results.equipment_changes.loc[
network.results.equipment_changes.iteration_step > 0]
else:
equipment_changes = network.results.equipment_changes
# costs for transformers
if not equipment_changes.empty:
transformers = equipment_changes[equipment_changes['equipment'].apply(
isinstance, args=(Transformer,))]
added_transformers = transformers[transformers['change'] == 'added']
removed_transformers = transformers[
transformers['change'] == 'removed']
# check if any of the added transformers were later removed
added_removed_transformers = added_transformers.loc[
added_transformers['equipment'].isin(
removed_transformers['equipment'])]
added_transformers = added_transformers[
~added_transformers['equipment'].isin(
added_removed_transformers.equipment)]
# calculate costs for each transformer
for t in added_transformers['equipment']:
costs = costs.append(pd.DataFrame(
{'type': t.type.name,
'total_costs': _get_transformer_costs(t),
'quantity': 1,
'voltage_level': 'mv/lv',
'mv_feeder': t.grid.station.mv_feeder if isinstance(
t.grid, LVGrid) else None},
index=[t]))
# costs for lines
# get changed lines
lines = equipment_changes.loc[equipment_changes.index[
equipment_changes.reset_index()['index'].apply(
isinstance, args=(Line,))]]
# calculate costs for each reinforced line
for l in list(lines.index.unique()):
# check if line connects aggregated units
aggr_lines = []
aggr_lines_generator = l.grid.graph.lines_by_attribute('line_aggr')
for aggr_line in aggr_lines_generator:
aggr_lines.append(repr(aggr_line['line']))
if not repr(l) in aggr_lines:
number_lines_added = equipment_changes[
(equipment_changes.index == l) &
(equipment_changes.equipment ==
l.type.name)]['quantity'].sum()
costs = costs.append(pd.DataFrame(
{'type': l.type.name,
'total_costs': _get_line_costs(l, number_lines_added),
'length': l.length * number_lines_added,
'quantity': number_lines_added,
'voltage_level': ('lv' if isinstance(l.grid, LVGrid)
else 'mv'),
'mv_feeder': get_mv_feeder_from_line(l)},
index=[l]))
# if no costs incurred write zero costs to DataFrame
if costs.empty:
costs = costs.append(pd.DataFrame(
{'type': ['N/A'],
'total_costs': [0],
'length': [0],
'quantity': [0],
'voltage_level': '',
'mv_feeder': ''
},
index=['No reinforced equipment.']))
return costs
|
Calculates grid expansion costs for each reinforced transformer and line
in kEUR.
Attributes
----------
network : :class:`~.grid.network.Network`
without_generator_import : Boolean
If True excludes lines that were added in the generator import to
connect new generators to the grid from calculation of grid expansion
costs. Default: False.
Returns
-------
`pandas.DataFrame<dataframe>`
DataFrame containing type and costs plus in the case of lines the
line length and number of parallel lines of each reinforced
transformer and line. Index of the DataFrame is the respective object
that can either be a :class:`~.grid.components.Line` or a
:class:`~.grid.components.Transformer`. Columns are the following:
type: String
Transformer size or cable name
total_costs: float
Costs of equipment in kEUR. For lines the line length and number of
parallel lines is already included in the total costs.
quantity: int
For transformers quantity is always one, for lines it specifies the
number of parallel lines.
line_length: float
Length of line or in case of parallel lines all lines in km.
voltage_level : :obj:`str` {'lv' | 'mv' | 'mv/lv'}
Specifies voltage level the equipment is in.
mv_feeder : :class:`~.grid.components.Line`
First line segment of half-ring used to identify in which
feeder the grid expansion was conducted in.
Notes
-------
Total grid expansion costs can be obtained through
self.grid_expansion_costs.total_costs.sum().
|
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig
|
Reformat a function signature to a more compact form.
|
def register_type(cls, name):
"""Register `name` as a type to validate as an instance of class `cls`."""
x = TypeDefinition(name, (cls,), ())
Validator.types_mapping[name] = x
|
Register `name` as a type to validate as an instance of class `cls`.
|
def VxLANTunnelState_originator_switch_info_switchIdentifier(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
VxLANTunnelState = ET.SubElement(config, "VxLANTunnelState", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(VxLANTunnelState, "originator-switch-info")
switchIdentifier = ET.SubElement(originator_switch_info, "switchIdentifier")
switchIdentifier.text = kwargs.pop('switchIdentifier')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
|
Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
|
def isConnected(self, fromName, toName):
""" Are these two layers connected this way? """
for c in self.connections:
if (c.fromLayer.name == fromName and
c.toLayer.name == toName):
return 1
return 0
|
Are these two layers connected this way?
|
def _submit(self, pool, args, callback):
"""If the caller has passed the magic 'single-threaded' flag, call the
function directly instead of pool.apply_async. The single-threaded flag
is intended for gathering more useful performance information about
what appens beneath `call_runner`, since python's default profiling
tools ignore child threads.
This does still go through the callback path for result collection.
"""
if self.config.args.single_threaded:
callback(self.call_runner(*args))
else:
pool.apply_async(self.call_runner, args=args, callback=callback)
|
If the caller has passed the magic 'single-threaded' flag, call the
function directly instead of pool.apply_async. The single-threaded flag
is intended for gathering more useful performance information about
what appens beneath `call_runner`, since python's default profiling
tools ignore child threads.
This does still go through the callback path for result collection.
|
def comments(self):
"""Return the text inside the comment area of the file."""
record_numbers = range(2, self.fward)
if not record_numbers:
return ''
data = b''.join(self.read_record(n)[0:1000] for n in record_numbers)
try:
return data[:data.find(b'\4')].decode('ascii').replace('\0', '\n')
except IndexError:
raise ValueError('DAF file comment area is missing its EOT byte')
except UnicodeDecodeError:
raise ValueError('DAF file comment area is not ASCII text')
|
Return the text inside the comment area of the file.
|
def _parse_tensor(self, indices=False):
'''Parse a tensor.'''
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor
|
Parse a tensor.
|
def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.left(f, n)
return self.right(f, n)
|
find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
|
def from_file(path):
"""
Crawls articles from the urls and extracts relevant information.
:param path: path to file containing urls (each line contains one URL)
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
urls = list(filter(None, content))
return NewsPlease.from_urls(urls)
|
Crawls articles from the urls and extracts relevant information.
:param path: path to file containing urls (each line contains one URL)
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
|
def get_active_pitch_range(self):
"""
Return the active pitch range as a tuple (lowest, highest).
Returns
-------
lowest : int
The lowest active pitch in the pianoroll.
highest : int
The highest active pitch in the pianoroll.
"""
if self.pianoroll.shape[1] < 1:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
lowest = 0
highest = 127
while lowest < highest:
if np.any(self.pianoroll[:, lowest]):
break
lowest += 1
if lowest == highest:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
while not np.any(self.pianoroll[:, highest]):
highest -= 1
return lowest, highest
|
Return the active pitch range as a tuple (lowest, highest).
Returns
-------
lowest : int
The lowest active pitch in the pianoroll.
highest : int
The highest active pitch in the pianoroll.
|
def no_use_pep517_callback(option, opt, value, parser):
"""
Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option.
"""
# Since --no-use-pep517 doesn't accept arguments, the value argument
# will be None if --no-use-pep517 is passed via the command-line.
# However, the value can be non-None if the option is triggered e.g.
# by an environment variable, for example "PIP_NO_USE_PEP517=true".
if value is not None:
msg = """A value was passed for --no-use-pep517,
probably using either the PIP_NO_USE_PEP517 environment variable
or the "no-use-pep517" config file option. Use an appropriate value
of the PIP_USE_PEP517 environment variable or the "use-pep517"
config file option instead.
"""
raise_option_error(parser, option=option, msg=msg)
# Otherwise, --no-use-pep517 was passed via the command-line.
parser.values.use_pep517 = False
|
Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option.
|
def find_page_of_state_m(self, state_m):
"""Return the identifier and page of a given state model
:param state_m: The state model to be searched
:return: page containing the state and the state_identifier
"""
for state_identifier, page_info in list(self.tabs.items()):
if page_info['state_m'] is state_m:
return page_info['page'], state_identifier
return None, None
|
Return the identifier and page of a given state model
:param state_m: The state model to be searched
:return: page containing the state and the state_identifier
|
def Rizk(mp, dp, rhog, D):
r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_ and many others.
.. math::
\mu=\left(\frac{1}{10^{1440d_p+1.96}}\right)\left(Fr_s\right)^{1100d_p+2.5}
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearanged to be explicit in terms of saltation velocity
internally.
Examples
--------
Example is from [3]_.
>>> Rizk(mp=0.25, dp=100E-6, rhog=1.2, D=.078)
9.8833092829357
References
----------
.. [1] Rizk, F. "Pneumatic conveying at optimal operation conditions and a
solution of Bath's equation." Proceedings of Pneumotransport 3,
paper D4. BHRA Fluid Engineering, Cranfield, England (1973)
.. [2] Klinzing, G. E., F. Rizk, R. Marcus, and L. S. Leung. Pneumatic
Conveying of Solids: A Theoretical and Practical Approach.
Springer, 2013.
.. [3] Rhodes, Martin J. Introduction to Particle Technology. Wiley, 2013.
'''
alpha = 1440*dp + 1.96
beta = 1100*dp + 2.5
term1 = 1./10**alpha
Frs_sorta = 1/(g*D)**0.5
expression1 = term1*Frs_sorta**beta
expression2 = mp/rhog/(pi/4*D**2)
V = (expression2/expression1)**(1./(1 + beta))
return V
|
r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_ and many others.
.. math::
\mu=\left(\frac{1}{10^{1440d_p+1.96}}\right)\left(Fr_s\right)^{1100d_p+2.5}
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearanged to be explicit in terms of saltation velocity
internally.
Examples
--------
Example is from [3]_.
>>> Rizk(mp=0.25, dp=100E-6, rhog=1.2, D=.078)
9.8833092829357
References
----------
.. [1] Rizk, F. "Pneumatic conveying at optimal operation conditions and a
solution of Bath's equation." Proceedings of Pneumotransport 3,
paper D4. BHRA Fluid Engineering, Cranfield, England (1973)
.. [2] Klinzing, G. E., F. Rizk, R. Marcus, and L. S. Leung. Pneumatic
Conveying of Solids: A Theoretical and Practical Approach.
Springer, 2013.
.. [3] Rhodes, Martin J. Introduction to Particle Technology. Wiley, 2013.
|
def task_ref_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
"""Process a role that references the target nodes created by the
``lsst-task`` directive.
Parameters
----------
name
The role name used in the document.
rawtext
The entire markup snippet, with role.
text
The text marked with the role.
lineno
The line number where ``rawtext`` appears in the input.
inliner
The inliner instance that called us.
options
Directive options for customization.
content
The directive content for customization.
Returns
-------
nodes : `list`
List of nodes to insert into the document.
messages : `list`
List of system messages.
"""
# app = inliner.document.settings.env.app
node = pending_task_xref(rawsource=text)
return [node], []
|
Process a role that references the target nodes created by the
``lsst-task`` directive.
Parameters
----------
name
The role name used in the document.
rawtext
The entire markup snippet, with role.
text
The text marked with the role.
lineno
The line number where ``rawtext`` appears in the input.
inliner
The inliner instance that called us.
options
Directive options for customization.
content
The directive content for customization.
Returns
-------
nodes : `list`
List of nodes to insert into the document.
messages : `list`
List of system messages.
|
def predict_is(self, h):
""" Outputs predictions for the Aggregate algorithm on the in-sample data
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of ensemble predictions
"""
result = pd.DataFrame([self.run(h=h)[2]]).T
result.index = self.index[-h:]
return result
|
Outputs predictions for the Aggregate algorithm on the in-sample data
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of ensemble predictions
|
def save(self, data):
"""Save a document or list of documents"""
if not self.is_connected:
raise Exception("No database selected")
if not data:
return False
if isinstance(data, dict):
doc = couchdb.Document()
doc.update(data)
self.db.create(doc)
elif isinstance(data, couchdb.Document):
self.db.update(data)
elif isinstance(data, list):
self.db.update(data)
return True
|
Save a document or list of documents
|
def _write_color (self, text, color=None):
"""Print text with given color. If color is None, print text as-is."""
if color is None:
self.fp.write(text)
else:
write_color(self.fp, text, color)
|
Print text with given color. If color is None, print text as-is.
|
def user_field_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/user_fields#create-user-fields"
api_path = "/api/v2/user_fields.json"
return self.call(api_path, method="POST", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/user_fields#create-user-fields
|
def tokenize(text, to_lower=False, delimiters=DEFAULT_DELIMITERS):
"""
Tokenize the input SArray of text strings and return the list of tokens.
Parameters
----------
text : SArray[str]
Input data of strings representing English text. This tokenizer is not
intended to process XML, HTML, or other structured text formats.
to_lower : bool, optional
If True, all strings are converted to lower case before tokenization.
delimiters : list[str], None, optional
Input strings are tokenized using delimiter characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[list]
Each text string in the input is mapped to a list of tokens.
See Also
--------
count_words, count_ngrams, tf_idf
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
>>> docs = turicreate.SArray(['This is the first sentence.',
"This one, it's the second sentence."])
# Default tokenization by space characters
>>> turicreate.text_analytics.tokenize(docs)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence.'],
['This', 'one,', "it's", 'the', 'second', 'sentence.']]
# Penn treebank-style tokenization
>>> turicreate.text_analytics.tokenize(docs, delimiters=None)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence', '.'],
['This', 'one', ',', 'it', "'s", 'the', 'second', 'sentence', '.']]
"""
_raise_error_if_not_sarray(text, "text")
## Compute word counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.Tokenizer(features='docs',
to_lower=to_lower,
delimiters=delimiters,
output_column_prefix=None)
tokens = fe.fit_transform(sf)
return tokens['docs']
|
Tokenize the input SArray of text strings and return the list of tokens.
Parameters
----------
text : SArray[str]
Input data of strings representing English text. This tokenizer is not
intended to process XML, HTML, or other structured text formats.
to_lower : bool, optional
If True, all strings are converted to lower case before tokenization.
delimiters : list[str], None, optional
Input strings are tokenized using delimiter characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[list]
Each text string in the input is mapped to a list of tokens.
See Also
--------
count_words, count_ngrams, tf_idf
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
>>> docs = turicreate.SArray(['This is the first sentence.',
"This one, it's the second sentence."])
# Default tokenization by space characters
>>> turicreate.text_analytics.tokenize(docs)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence.'],
['This', 'one,', "it's", 'the', 'second', 'sentence.']]
# Penn treebank-style tokenization
>>> turicreate.text_analytics.tokenize(docs, delimiters=None)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence', '.'],
['This', 'one', ',', 'it', "'s", 'the', 'second', 'sentence', '.']]
|
def end_element (self, tag):
"""
Print HTML end element.
@param tag: tag name
@type tag: string
@return: None
"""
tag = tag.encode(self.encoding, "ignore")
self.fd.write("</%s>" % tag)
|
Print HTML end element.
@param tag: tag name
@type tag: string
@return: None
|
def hash_vector(self, v, querying=False):
"""
Hashes the vector and returns the bucket key as string.
"""
bucket_keys = []
if querying:
# If we are querying, use the permuted indexes to get bucket keys
for child_hash in self.child_hashes:
lshash = child_hash['hash']
# Make sure the permuted index for this hash is existing
if not lshash.hash_name in self.permutation.permutedIndexs:
raise AttributeError('Permuted index is not existing for hash with name %s' % lshash.hash_name)
# Get regular bucket keys from hash
for bucket_key in lshash.hash_vector(v, querying):
#print 'Regular bucket key %s' % bucket_key
# Get neighbour keys from permuted index
neighbour_keys = self.permutation.get_neighbour_keys(lshash.hash_name,bucket_key)
# Add them to result, but prefix with hash name
for n in neighbour_keys:
bucket_keys.append(lshash.hash_name+'_'+n)
else:
# If we are indexing (storing) just use child hashes without permuted index
for child_hash in self.child_hashes:
lshash = child_hash['hash']
# Get regular bucket keys from hash
for bucket_key in lshash.hash_vector(v, querying):
# Register bucket key in child hash dict
child_hash['bucket_keys'][bucket_key] = bucket_key
# Append bucket key to result prefixed with child hash name
bucket_keys.append(lshash.hash_name+'_'+bucket_key)
# Return all the bucket keys
return bucket_keys
|
Hashes the vector and returns the bucket key as string.
|
def neighbours(self, word, size = 10):
"""
Get nearest words with KDTree, ranking by cosine distance
"""
word = word.strip()
v = self.word_vec(word)
[distances], [points] = self.kdt.query(array([v]), k = size, return_distance = True)
assert len(distances) == len(points), "distances and points should be in same shape."
words, scores = [], {}
for (x,y) in zip(points, distances):
w = self.index2word[x]
if w == word: s = 1.0
else: s = cosine(v, self.syn0[x])
if s < 0: s = abs(s)
words.append(w)
scores[w] = min(s, 1.0)
for x in sorted(words, key=scores.get, reverse=True):
yield x, scores[x]
|
Get nearest words with KDTree, ranking by cosine distance
|
def save_figure(self, event=None, transparent=False, dpi=600):
""" save figure image to file"""
file_choices = "PNG (*.png)|*.png|SVG (*.svg)|*.svg|PDF (*.pdf)|*.pdf"
try:
ofile = self.conf.title.strip()
except:
ofile = 'Image'
if len(ofile) > 64:
ofile = ofile[:63].strip()
if len(ofile) < 1:
ofile = 'plot'
for c in ' :";|/\\': # "
ofile = ofile.replace(c, '_')
ofile = ofile + '.png'
orig_dir = os.path.abspath(os.curdir)
dlg = wx.FileDialog(self, message='Save Plot Figure as...',
defaultDir = os.getcwd(),
defaultFile=ofile,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if hasattr(self, 'fig'):
self.fig.savefig(path, transparent=transparent, dpi=dpi)
else:
self.canvas.print_figure(path, transparent=transparent, dpi=dpi)
if (path.find(self.launch_dir) == 0):
path = path[len(self.launch_dir)+1:]
self.write_message('Saved plot to %s' % path)
os.chdir(orig_dir)
|
save figure image to file
|
def set_num_special_tokens(self, num_special_tokens):
""" Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
"""
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
|
Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings
|
def get_instance(self, payload):
"""
Build an instance of TaskQueueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
"""
return TaskQueueInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
|
Build an instance of TaskQueueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
|
def is_symbol(string):
"""
Return true if the string is a mathematical symbol.
"""
return (
is_int(string) or is_float(string) or
is_constant(string) or is_unary(string) or
is_binary(string) or
(string == '(') or (string == ')')
)
|
Return true if the string is a mathematical symbol.
|
def map_trigger(library, session, trigger_source, trigger_destination, mode):
"""Map the specified trigger source line to the specified destination line.
Corresponds to viMapTrigger function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param trigger_source: Source line from which to map. (Constants.TRIG*)
:param trigger_destination: Destination line to which to map. (Constants.TRIG*)
:param mode:
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viMapTrigger(session, trigger_source, trigger_destination, mode)
|
Map the specified trigger source line to the specified destination line.
Corresponds to viMapTrigger function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param trigger_source: Source line from which to map. (Constants.TRIG*)
:param trigger_destination: Destination line to which to map. (Constants.TRIG*)
:param mode:
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
|
def getAllSystemVariables(self, remote):
"""Get all system variables from CCU / Homegear"""
variables = {}
if self.remotes[remote]['username'] and self.remotes[remote]['password']:
LOG.debug(
"ServerThread.getAllSystemVariables: Getting all System variables via JSON-RPC")
session = self.jsonRpcLogin(remote)
if not session:
return
try:
params = {"_session_id_": session}
response = self._rpcfunctions.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "SysVar.getAll", params)
if response['error'] is None and response['result']:
for var in response['result']:
key, value = self.parseCCUSysVar(var)
variables[key] = value
self.jsonRpcLogout(remote, session)
except Exception as err:
self.jsonRpcLogout(remote, session)
LOG.warning(
"ServerThread.getAllSystemVariables: Exception: %s" % str(err))
else:
try:
variables = self.proxies[
"%s-%s" % (self._interface_id, remote)].getAllSystemVariables()
except Exception as err:
LOG.debug(
"ServerThread.getAllSystemVariables: Exception: %s" % str(err))
return variables
|
Get all system variables from CCU / Homegear
|
def build_time(start_time):
"""
Calculate build time per package
"""
diff_time = round(time.time() - start_time, 2)
if diff_time <= 59.99:
sum_time = str(diff_time) + " Sec"
elif diff_time > 59.99 and diff_time <= 3599.99:
sum_time = round(diff_time / 60, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Min {1} Sec".format(sum_time_list[0],
sum_time_list[1]))
elif diff_time > 3599.99:
sum_time = round(diff_time / 3600, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Hours {1} Min".format(sum_time_list[0],
sum_time_list[1]))
return sum_time
|
Calculate build time per package
|
def interfaces_info():
"""Returns interfaces data.
"""
def replace(value):
if value == netifaces.AF_LINK:
return 'link'
if value == netifaces.AF_INET:
return 'ipv4'
if value == netifaces.AF_INET6:
return 'ipv6'
return value
results = {}
for iface in netifaces.interfaces():
addrs = netifaces.ifaddresses(iface)
results[iface] = {replace(k): v for k, v in addrs.items()}
return results
|
Returns interfaces data.
|
def _append_array(self, value, _file):
"""Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_labs = ' ['
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = ',' if self._vctr[self._tctr] else ''
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs = ' ]'
_file.write(_labs)
|
Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
|
def text(self):
"""
Return string value of scalar, whatever value it was parsed as.
"""
if isinstance(self._value, CommentedMap):
raise TypeError("{0} is a mapping, has no text value.".format(repr(self)))
if isinstance(self._value, CommentedSeq):
raise TypeError("{0} is a sequence, has no text value.".format(repr(self)))
return self._text
|
Return string value of scalar, whatever value it was parsed as.
|
def kids(tup_tree):
"""
Return a list with the child elements of tup_tree.
The child elements are represented as tupletree nodes.
Child nodes that are not XML elements (e.g. text nodes) in tup_tree are
filtered out.
"""
k = tup_tree[2]
if k is None:
return []
# pylint: disable=unidiomatic-typecheck
return [x for x in k if type(x) == tuple]
|
Return a list with the child elements of tup_tree.
The child elements are represented as tupletree nodes.
Child nodes that are not XML elements (e.g. text nodes) in tup_tree are
filtered out.
|
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other : datetime
Returns
-------
day : int
"""
mstart = datetime(other.year, other.month, 1)
wday = mstart.weekday()
shift_days = (self.weekday - wday) % 7
return 1 + shift_days + self.week * 7
|
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other : datetime
Returns
-------
day : int
|
def read_properties(group):
"""Returns properties loaded from a group"""
if 'properties' not in group:
raise IOError('no properties in group')
data = group['properties'][...][0].replace(b'__NULL__', b'\x00')
return pickle.loads(data)
|
Returns properties loaded from a group
|
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
pid = os.fork()
if pid < 0:
raise ExceptionPexpect, "Error! Failed os.fork()."
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
|
This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
|
def _html(title: str, field_names: List[str]) -> str:
"""
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model.
"""
inputs = ''.join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name)
for field_name in field_names)
quoted_field_names = [f"'{field_name}'" for field_name in field_names]
quoted_field_list = f"[{','.join(quoted_field_names)}]"
return _PAGE_TEMPLATE.substitute(title=title,
css=_CSS,
inputs=inputs,
qfl=quoted_field_list)
|
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model.
|
def fromstring(text, schema=None):
"""Parses a KML text string
This function parses a KML text string and optionally validates it against
a provided schema object"""
if schema:
parser = objectify.makeparser(schema = schema.schema)
return objectify.fromstring(text, parser=parser)
else:
return objectify.fromstring(text)
|
Parses a KML text string
This function parses a KML text string and optionally validates it against
a provided schema object
|
def is_valid_name_error(name: str, node: Node = None) -> Optional[GraphQLError]:
"""Return an Error if a name is invalid."""
if not isinstance(name, str):
raise TypeError("Expected string")
if name.startswith("__"):
return GraphQLError(
f"Name {name!r} must not begin with '__',"
" which is reserved by GraphQL introspection.",
node,
)
if not re_name.match(name):
return GraphQLError(
f"Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but {name!r} does not.", node
)
return None
|
Return an Error if a name is invalid.
|
def start(self):
"""
Starts the GNS3 VM.
"""
vms = yield from self.list()
for vm in vms:
if vm["vmname"] == self.vmname:
self._vmx_path = vm["vmx_path"]
break
# check we have a valid VMX file path
if not self._vmx_path:
raise GNS3VMError("VMWare VM {} not found".format(self.vmname))
if not os.path.exists(self._vmx_path):
raise GNS3VMError("VMware VMX file {} doesn't exist".format(self._vmx_path))
# check if the VMware guest tools are installed
vmware_tools_state = yield from self._execute("checkToolsState", [self._vmx_path])
if vmware_tools_state not in ("installed", "running"):
raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname))
try:
running = yield from self._is_running()
except VMwareError as e:
raise GNS3VMError("Could not list VMware VMs: {}".format(str(e)))
if not running:
log.info("Update GNS3 VM settings")
# set the number of vCPUs and amount of RAM
yield from self._set_vcpus_ram(self.vcpus, self.ram)
yield from self._set_extra_options()
# start the VM
args = [self._vmx_path]
if self._headless:
args.extend(["nogui"])
yield from self._execute("start", args)
log.info("GNS3 VM has been started")
# get the guest IP address (first adapter only)
trial = 120
guest_ip_address = ""
log.info("Waiting for GNS3 VM IP")
while True:
guest_ip_address = yield from self._execute("readVariable", [self._vmx_path, "guestVar", "gns3.eth0"], timeout=120, log_level=logging.DEBUG)
guest_ip_address = guest_ip_address.strip()
if len(guest_ip_address) != 0:
break
trial -= 1
# If ip not found fallback on old method
if trial == 0:
log.warning("No IP found for the VM via readVariable fallback to getGuestIPAddress")
guest_ip_address = yield from self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120)
break
yield from asyncio.sleep(1)
self.ip_address = guest_ip_address
log.info("GNS3 VM IP address set to {}".format(guest_ip_address))
self.running = True
|
Starts the GNS3 VM.
|
def decode_union_old(self, data_type, obj):
"""
The data_type argument must be a Union.
See json_compat_obj_decode() for argument descriptions.
"""
val = None
if isinstance(obj, six.string_types):
# Union member has no associated value
tag = obj
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(tag,
self.caller_permissions)
if not isinstance(val_data_type, (bv.Void, bv.Nullable)):
raise bv.ValidationError(
"expected object for '%s', got symbol" % tag)
else:
if not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag '%s'" % tag)
elif isinstance(obj, dict):
# Union member has value
if len(obj) != 1:
raise bv.ValidationError('expected 1 key, got %s' % len(obj))
tag = list(obj)[0]
raw_val = obj[tag]
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(tag,
self.caller_permissions)
if isinstance(val_data_type, bv.Nullable) and raw_val is None:
val = None
elif isinstance(val_data_type, bv.Void):
if raw_val is None or not self.strict:
# If raw_val is None, then this is the more verbose
# representation of a void union member. If raw_val isn't
# None, then maybe the spec has changed, so check if we're
# in strict mode.
val = None
else:
raise bv.ValidationError('expected null, got %s' %
bv.generic_type_name(raw_val))
else:
try:
val = self.json_compat_obj_decode_helper(val_data_type, raw_val)
except bv.ValidationError as e:
e.add_parent(tag)
raise
else:
if not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag '%s'" % tag)
else:
raise bv.ValidationError("expected string or object, got %s" %
bv.generic_type_name(obj))
return data_type.definition(tag, val)
|
The data_type argument must be a Union.
See json_compat_obj_decode() for argument descriptions.
|
def __find_sentence_initial_proper_names(self, docs):
""" Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad;
"""
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
# 2) potentsiaalne loendi algus (arv, millele järgneb punkt või
# sulg ja mis ei ole kuupäev);
if not re.match('^[1234567890]*$', word[TEXT] ) and \
not re.match('^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$', word[TEXT] ) and \
re.match("^[1234567890.()]*$", word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
if sentencePos == 0:
# Vaatame lausealgulisi sõnu, millel on nii pärisnimeanalüüs(e)
# kui ka mitte-pärisnimeanalüüs(e)
h_postags = [ a[POSTAG] == 'H' for a in word[ANALYSIS] ]
if any( h_postags ) and not all( h_postags ):
for analysis in word[ANALYSIS]:
# Jätame meelde kõik unikaalsed pärisnimelemmad
if analysis[POSTAG] == 'H':
sentInitialNames.add( analysis[ROOT] )
sentencePos += 1
return sentInitialNames
|
Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad;
|
def subscribe_multi(self, topics):
"""Subscribe to some topics."""
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.logger.info("SUBSCRIBE: %s", ', '.join([t for (t,q) in topics]))
return self.send_subscribe(False, [(utf8encode(topic), qos) for (topic, qos) in topics])
|
Subscribe to some topics.
|
def before_after_send_handling(self):
"""Context manager that allows to execute send wrapped
in before_send() and after_send().
"""
self._init_delivery_statuses_dict()
self.before_send()
try:
yield
finally:
self.after_send()
self._update_dispatches()
|
Context manager that allows to execute send wrapped
in before_send() and after_send().
|
def _set_nsx_controller(self, v, load=False):
"""
Setter method for nsx_controller, mapped from YANG variable /nsx_controller (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_nsx_controller is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nsx_controller() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",nsx_controller.nsx_controller, yang_name="nsx-controller", rest_name="nsx-controller", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'NSX controller configuration', u'sort-priority': u'RUNNCFG_LEVEL_NVP_CONTROLLER_CONFIG', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'nvp-controller-config'}}), is_container='list', yang_name="nsx-controller", rest_name="nsx-controller", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NSX controller configuration', u'sort-priority': u'RUNNCFG_LEVEL_NVP_CONTROLLER_CONFIG', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'nvp-controller-config'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nsx_controller must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",nsx_controller.nsx_controller, yang_name="nsx-controller", rest_name="nsx-controller", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'NSX controller configuration', u'sort-priority': u'RUNNCFG_LEVEL_NVP_CONTROLLER_CONFIG', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'nvp-controller-config'}}), is_container='list', yang_name="nsx-controller", rest_name="nsx-controller", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NSX controller configuration', u'sort-priority': u'RUNNCFG_LEVEL_NVP_CONTROLLER_CONFIG', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'nvp-controller-config'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__nsx_controller = t
if hasattr(self, '_set'):
self._set()
|
Setter method for nsx_controller, mapped from YANG variable /nsx_controller (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_nsx_controller is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nsx_controller() directly.
|
def _validate_geometry(self, geometry):
"""Validates geometry, raising error if invalid."""
if geometry is not None and geometry not in self.valid_geometries:
raise InvalidParameterError("{} is not a valid geometry".format(geometry))
return geometry
|
Validates geometry, raising error if invalid.
|
def convert_conelp(c, G, h, dims, A = None, b = None, **kwargs):
"""
Applies the clique conversion method of Fukuda et al. to the positive semidefinite blocks of a cone LP.
:param c: :py:class:`matrix`
:param G: :py:class:`spmatrix`
:param h: :py:class:`matrix`
:param dims: dictionary
:param A: :py:class:`spmatrix` or :py:class:`matrix`
:param b: :py:class:`matrix`
The following example illustrates how to convert a cone LP:
.. code-block:: python
prob = (c,G,h,dims,A,b)
probc, blk2sparse, symbs = convert_conelp(*prob)
The return value `blk2sparse` is a list of 4-tuples
(`blki,I,J,n`) that each defines a mapping between the sparse
matrix representation and the converted block-diagonal
representation, and `symbs` is a list of symbolic factorizations
corresponding to each of the semidefinite blocks in the original cone LP.
.. seealso::
M. Fukuda, M. Kojima, K. Murota, and K. Nakata, `Exploiting Sparsity
in Semidefinite Programming via Matrix Completion I: General Framework
<http://dx.doi.org/10.1137/S1052623400366218>`_,
SIAM Journal on Optimization, 11:3, 2001, pp. 647-674.
S. Kim, M. Kojima, M. Mevissen, and M. Yamashita, `Exploiting Sparsity
in Linear and Nonlinear Matrix Inequalities via Positive Semidefinite
Matrix Completion <http://dx.doi.org/10.1007/s10107-010-0402-6>`_,
Mathematical Programming, 129:1, 2011, pp.. 33-68.
"""
# extract linear and socp constraints
offsets = dims['l'] + sum(dims['q'])
G_lq = G[:offsets,:]
h_lq = h[:offsets,0]
# extract semidefinite blocks
G_s = G[offsets:,:]
h_s = h[offsets:,0]
G_converted = [G_lq]; h_converted = [h_lq]
G_coupling = []
dims_list = []
symbs = []
offset = 0
block_to_sparse = []
for k, si in enumerate(dims['s']):
# extract block
G_b = G_s[offset:offset+si**2,:]
h_b = h_s[offset:offset+si**2,0]
offset += si**2
# convert block
blkk, b2s, F = convert_block(G_b, h_b, si, **kwargs)
G1, h1, G2, blkdims = blkk
G_converted.append(G1)
h_converted.append(h1)
dims_list.extend(blkdims)
block_to_sparse.append(b2s)
symbs.append(F)
if G2 is not None: G_coupling.append(G2)
G1 = sparse(G_converted)
I,J,V = [],[],[]
offset = [G_lq.size[0], 0]
for Gcpl in G_coupling:
I.append(Gcpl.I + offset[0])
J.append(Gcpl.J + offset[1])
V.append(Gcpl.V)
offset[0] += Gcpl.size[0]
offset[1] += Gcpl.size[1]
G2 = spmatrix([v for v in itertools.chain(*V)],
[v for v in itertools.chain(*I)],
[v for v in itertools.chain(*J)],tuple(offset))
if offset[0] == 0 or offset[1] == 0:
G = G1
else:
G = sparse([[G1],[G2]])
ct = matrix([c,matrix(0.0,(G2.size[1],1))])
if A is not None:
return (ct, G, matrix(h_converted),\
{'l':dims['l'],'q':dims['q'],'s':dims_list},\
sparse([[A],[spmatrix([],[],[],(A.size[0],G2.size[1]))]]),\
b), block_to_sparse
else:
return (ct, G, matrix(h_converted),\
{'l':dims['l'],'q':dims['q'],'s':dims_list}), block_to_sparse, symbs
|
Applies the clique conversion method of Fukuda et al. to the positive semidefinite blocks of a cone LP.
:param c: :py:class:`matrix`
:param G: :py:class:`spmatrix`
:param h: :py:class:`matrix`
:param dims: dictionary
:param A: :py:class:`spmatrix` or :py:class:`matrix`
:param b: :py:class:`matrix`
The following example illustrates how to convert a cone LP:
.. code-block:: python
prob = (c,G,h,dims,A,b)
probc, blk2sparse, symbs = convert_conelp(*prob)
The return value `blk2sparse` is a list of 4-tuples
(`blki,I,J,n`) that each defines a mapping between the sparse
matrix representation and the converted block-diagonal
representation, and `symbs` is a list of symbolic factorizations
corresponding to each of the semidefinite blocks in the original cone LP.
.. seealso::
M. Fukuda, M. Kojima, K. Murota, and K. Nakata, `Exploiting Sparsity
in Semidefinite Programming via Matrix Completion I: General Framework
<http://dx.doi.org/10.1137/S1052623400366218>`_,
SIAM Journal on Optimization, 11:3, 2001, pp. 647-674.
S. Kim, M. Kojima, M. Mevissen, and M. Yamashita, `Exploiting Sparsity
in Linear and Nonlinear Matrix Inequalities via Positive Semidefinite
Matrix Completion <http://dx.doi.org/10.1007/s10107-010-0402-6>`_,
Mathematical Programming, 129:1, 2011, pp.. 33-68.
|
def mode_reader(self):
"""MODE READER command.
Instructs a mode-switching server to switch modes.
See <http://tools.ietf.org/html/rfc3977#section-5.3>
Returns:
Boolean value indicating whether posting is allowed or not.
"""
code, message = self.command("MODE READER")
if not code in [200, 201]:
raise NNTPReplyError(code, message)
return code == 200
|
MODE READER command.
Instructs a mode-switching server to switch modes.
See <http://tools.ietf.org/html/rfc3977#section-5.3>
Returns:
Boolean value indicating whether posting is allowed or not.
|
def _capture_as_text(capture: Callable[..., Any]) -> str:
"""Convert the capture function into its text representation by parsing the source code of the decorator."""
if not icontract._represent._is_lambda(a_function=capture):
signature = inspect.signature(capture)
param_names = list(signature.parameters.keys())
return "{}({})".format(capture.__qualname__, ", ".join(param_names))
lines, lineno = inspect.findsource(capture)
filename = inspect.getsourcefile(capture)
decorator_inspection = icontract._represent.inspect_decorator(lines=lines, lineno=lineno, filename=filename)
call_node = decorator_inspection.node
capture_node = None # type: Optional[ast.Lambda]
if len(call_node.args) > 0:
assert isinstance(call_node.args[0], ast.Lambda), \
("Expected the first argument to the snapshot decorator to be a condition as lambda AST node, "
"but got: {}").format(type(call_node.args[0]))
capture_node = call_node.args[0]
elif len(call_node.keywords) > 0:
for keyword in call_node.keywords:
if keyword.arg == "capture":
assert isinstance(keyword.value, ast.Lambda), \
"Expected lambda node as value of the 'capture' argument to the decorator."
capture_node = keyword.value
break
assert capture_node is not None, "Expected to find a keyword AST node with 'capture' arg, but found none"
else:
raise AssertionError(
"Expected a call AST node of a snapshot decorator to have either args or keywords, but got: {}".format(
ast.dump(call_node)))
capture_text = decorator_inspection.atok.get_text(capture_node.body)
return capture_text
|
Convert the capture function into its text representation by parsing the source code of the decorator.
|
def chuid(name, uid):
'''
Change the uid for a named user
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
'''
pre_info = info(name)
if not pre_info:
raise CommandExecutionError(
'User \'{0}\' does not exist'.format(name)
)
if uid == pre_info['uid']:
return True
cmd = ['pw', 'usermod', '-u', uid, '-n', name]
__salt__['cmd.run'](cmd, python_shell=False)
return info(name).get('uid') == uid
|
Change the uid for a named user
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
|
def _pys2row_heights(self, line):
"""Updates row_heights in code_array"""
# Split with maxsplit 3
split_line = self._split_tidy(line)
key = row, tab = self._get_key(*split_line[:2])
height = float(split_line[2])
shape = self.code_array.shape
try:
if row < shape[0] and tab < shape[2]:
self.code_array.row_heights[key] = height
except ValueError:
pass
|
Updates row_heights in code_array
|
def generate_name(self, name=None):
'''generate a Robot Name for the instance to use, if the user doesn't
supply one.
'''
# If no name provided, use robot name
if name == None:
name = self.RobotNamer.generate()
self.name = name.replace('-','_')
|
generate a Robot Name for the instance to use, if the user doesn't
supply one.
|
def batchccn(args):
"""
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
"""
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write()
|
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
|
def periodicvar_recovery(fakepfpkl,
simbasedir,
period_tolerance=1.0e-3):
'''Recovers the periodic variable status/info for the simulated PF result.
- Uses simbasedir and the lcfbasename stored in fakepfpkl to figure out
where the LC for this object is.
- Gets the actual_varparams, actual_varperiod, actual_vartype,
actual_varamplitude elements from the LC.
- Figures out if the current objectid is a periodic variable (using
actual_vartype).
- If it is a periodic variable, gets the canonical period assigned to it.
- Checks if the period was recovered in any of the five best periods
reported by any of the period-finders, checks if the period recovered was
a harmonic of the period.
- Returns the objectid, actual period and vartype, recovered period, and
recovery status.
Parameters
----------
fakepfpkl : str
This is a periodfinding-<objectid>.pkl[.gz] file produced in the
`simbasedir/periodfinding` subdirectory after `run_periodfinding` above
is done.
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
Returns
-------
dict
Returns a dict of period-recovery results.
'''
if fakepfpkl.endswith('.gz'):
infd = gzip.open(fakepfpkl,'rb')
else:
infd = open(fakepfpkl,'rb')
fakepf = pickle.load(infd)
infd.close()
# get info from the fakepf dict
objectid, lcfbasename = fakepf['objectid'], fakepf['lcfbasename']
lcfpath = os.path.join(simbasedir,'lightcurves',lcfbasename)
# if the LC doesn't exist, bail out
if not os.path.exists(lcfpath):
LOGERROR('light curve for %s does not exist at: %s' % (objectid,
lcfpath))
return None
# now, open the fakelc
fakelc = lcproc._read_pklc(lcfpath)
# get the actual_varparams, actual_varperiod, actual_varamplitude
actual_varparams, actual_varperiod, actual_varamplitude, actual_vartype = (
fakelc['actual_varparams'],
fakelc['actual_varperiod'],
fakelc['actual_varamplitude'],
fakelc['actual_vartype']
)
# get the moments too so we can track LC noise, etc.
actual_moments = fakelc['moments']
# get the magcols for this LC
magcols = fakelc['magcols']
# get the recovered info from each of the available methods
pfres = {
'objectid':objectid,
'simbasedir':simbasedir,
'magcols':magcols,
'fakelc':os.path.abspath(lcfpath),
'fakepf':os.path.abspath(fakepfpkl),
'actual_vartype':actual_vartype,
'actual_varperiod':actual_varperiod,
'actual_varamplitude':actual_varamplitude,
'actual_varparams':actual_varparams,
'actual_moments':actual_moments,
'recovery_periods':[],
'recovery_lspvals':[],
'recovery_pfmethods':[],
'recovery_magcols':[],
'recovery_status':[],
'recovery_pdiff':[],
}
# populate the pfres dict with the periods, pfmethods, and magcols
for magcol in magcols:
for pfm in lcproc.PFMETHODS:
if pfm in fakepf[magcol]:
# only get the unique recovered periods by using
# period_tolerance
for rpi, rp in enumerate(
fakepf[magcol][pfm]['nbestperiods']
):
if ((not np.any(np.isclose(
rp,
np.array(pfres['recovery_periods']),
rtol=period_tolerance
))) and np.isfinite(rp)):
# populate the recovery periods, pfmethods, and magcols
pfres['recovery_periods'].append(rp)
pfres['recovery_pfmethods'].append(pfm)
pfres['recovery_magcols'].append(magcol)
# normalize the periodogram peak value to between
# 0 and 1 so we can put in the results of multiple
# periodfinders on one scale
if pfm == 'pdm':
this_lspval = (
np.max(fakepf[magcol][pfm]['lspvals']) -
fakepf[magcol][pfm]['nbestlspvals'][rpi]
)
else:
this_lspval = (
fakepf[magcol][pfm]['nbestlspvals'][rpi] /
np.max(fakepf[magcol][pfm]['lspvals'])
)
# add the normalized lspval to the outdict for
# this object as well. later, we'll use this to
# construct a periodogram for objects that were actually
# not variables
pfres['recovery_lspvals'].append(this_lspval)
# convert the recovery_* lists to arrays
pfres['recovery_periods'] = np.array(pfres['recovery_periods'])
pfres['recovery_lspvals'] = np.array(pfres['recovery_lspvals'])
pfres['recovery_pfmethods'] = np.array(pfres['recovery_pfmethods'])
pfres['recovery_magcols'] = np.array(pfres['recovery_magcols'])
#
# now figure out recovery status
#
# if this is an actual periodic variable, characterize the recovery
if (actual_vartype and
actual_vartype in PERIODIC_VARTYPES and
np.isfinite(actual_varperiod)):
if pfres['recovery_periods'].size > 0:
for ri in range(pfres['recovery_periods'].size):
pfres['recovery_pdiff'].append(pfres['recovery_periods'][ri] -
np.asscalar(actual_varperiod))
# get the alias types
pfres['recovery_status'].append(
check_periodrec_alias(actual_varperiod,
pfres['recovery_periods'][ri],
tolerance=period_tolerance)
)
# turn the recovery_pdiff/status lists into arrays
pfres['recovery_status'] = np.array(pfres['recovery_status'])
pfres['recovery_pdiff'] = np.array(pfres['recovery_pdiff'])
# find the best recovered period and its status
rec_absdiff = np.abs(pfres['recovery_pdiff'])
best_recp_ind = rec_absdiff == rec_absdiff.min()
pfres['best_recovered_period'] = (
pfres['recovery_periods'][best_recp_ind]
)
pfres['best_recovered_pfmethod'] = (
pfres['recovery_pfmethods'][best_recp_ind]
)
pfres['best_recovered_magcol'] = (
pfres['recovery_magcols'][best_recp_ind]
)
pfres['best_recovered_status'] = (
pfres['recovery_status'][best_recp_ind]
)
pfres['best_recovered_pdiff'] = (
pfres['recovery_pdiff'][best_recp_ind]
)
else:
LOGWARNING(
'no finite periods recovered from period-finding for %s' %
fakepfpkl
)
pfres['recovery_status'] = np.array(['no_finite_periods_recovered'])
pfres['recovery_pdiff'] = np.array([np.nan])
pfres['best_recovered_period'] = np.array([np.nan])
pfres['best_recovered_pfmethod'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_magcol'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_status'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_pdiff'] = np.array([np.nan])
# if this is not actually a variable, get the recovered period,
# etc. anyway. this way, we can see what we need to look out for and avoid
# when getting these values for actual objects
else:
pfres['recovery_status'] = np.array(
['not_variable']*pfres['recovery_periods'].size
)
pfres['recovery_pdiff'] = np.zeros(pfres['recovery_periods'].size)
pfres['best_recovered_period'] = np.array([np.nan])
pfres['best_recovered_pfmethod'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_magcol'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_status'] = np.array(['not_variable'])
pfres['best_recovered_pdiff'] = np.array([np.nan])
return pfres
|
Recovers the periodic variable status/info for the simulated PF result.
- Uses simbasedir and the lcfbasename stored in fakepfpkl to figure out
where the LC for this object is.
- Gets the actual_varparams, actual_varperiod, actual_vartype,
actual_varamplitude elements from the LC.
- Figures out if the current objectid is a periodic variable (using
actual_vartype).
- If it is a periodic variable, gets the canonical period assigned to it.
- Checks if the period was recovered in any of the five best periods
reported by any of the period-finders, checks if the period recovered was
a harmonic of the period.
- Returns the objectid, actual period and vartype, recovered period, and
recovery status.
Parameters
----------
fakepfpkl : str
This is a periodfinding-<objectid>.pkl[.gz] file produced in the
`simbasedir/periodfinding` subdirectory after `run_periodfinding` above
is done.
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
Returns
-------
dict
Returns a dict of period-recovery results.
|
def to_json(self) -> dict:
'''export the Deck object to json-ready format'''
d = self.__dict__
d['p2th_wif'] = self.p2th_wif
return d
|
export the Deck object to json-ready format
|
def _psed(text,
before,
after,
limit,
flags):
'''
Does the actual work for file.psed, so that single lines can be passed in
'''
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = ''.join(comps[1:])
count = 1
if 'g' in flags:
count = 0
flags = flags.replace('g', '')
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
|
Does the actual work for file.psed, so that single lines can be passed in
|
async def get_soundfield(self) -> List[Setting]:
"""Get the current sound field settings."""
res = await self.services["audio"]["getSoundSettings"]({"target": "soundField"})
return Setting.make(**res[0])
|
Get the current sound field settings.
|
def _numpy_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'}
"""
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices
|
Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'}
|
def _from_dict(cls, _dict):
"""Initialize a QueryRelationsResponse object from a json dictionary."""
args = {}
if 'relations' in _dict:
args['relations'] = [
QueryRelationsRelationship._from_dict(x)
for x in (_dict.get('relations'))
]
return cls(**args)
|
Initialize a QueryRelationsResponse object from a json dictionary.
|
def getUnitCost(self, CorpNum):
""" 팩스 전송 단가 확인
args
CorpNum : 팝빌회원 사업자번호
return
전송 단가 by float
raise
PopbillException
"""
result = self._httpget('/FAX/UnitCost', CorpNum)
return int(result.unitCost)
|
팩스 전송 단가 확인
args
CorpNum : 팝빌회원 사업자번호
return
전송 단가 by float
raise
PopbillException
|
def cli(env, account_id):
"""List origin pull mappings."""
manager = SoftLayer.CDNManager(env.client)
origins = manager.get_origins(account_id)
table = formatting.Table(['id', 'media_type', 'cname', 'origin_url'])
for origin in origins:
table.add_row([origin['id'],
origin['mediaType'],
origin.get('cname', formatting.blank()),
origin['originUrl']])
env.fout(table)
|
List origin pull mappings.
|
def create_temporaries(self, r=True, f=True):
"""Allocate and store reusable temporaries.
Existing temporaries are overridden.
Parameters
----------
r : bool, optional
Create temporary for the real space
f : bool, optional
Create temporary for the frequency space
Notes
-----
To save memory, clear the temporaries when the transform is
no longer used.
See Also
--------
clear_temporaries
clear_fftw_plan : can also hold references to the temporaries
"""
inverse = isinstance(self, FourierTransformInverse)
if inverse:
rspace = self.range
fspace = self.domain
else:
rspace = self.domain
fspace = self.range
if r:
self._tmp_r = rspace.element().asarray()
if f:
self._tmp_f = fspace.element().asarray()
|
Allocate and store reusable temporaries.
Existing temporaries are overridden.
Parameters
----------
r : bool, optional
Create temporary for the real space
f : bool, optional
Create temporary for the frequency space
Notes
-----
To save memory, clear the temporaries when the transform is
no longer used.
See Also
--------
clear_temporaries
clear_fftw_plan : can also hold references to the temporaries
|
def process_header(self, headers):
"""Ignore the incomming header and replace it with the destination header"""
return [c.name for c in self.source.dest_table.columns][1:]
|
Ignore the incomming header and replace it with the destination header
|
def _upload_folder_recursive(local_folder,
parent_folder_id,
leaf_folders_as_items=False,
reuse_existing=False):
"""
Function to recursively upload a folder and all of its descendants.
:param local_folder: full path to local folder to be uploaded
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:param leaf_folders_as_items: (optional) whether leaf folders should have
all files uploaded as single items
:type leaf_folders_as_items: bool
:param reuse_existing: (optional) whether to accept an existing item of the
same name in the same location, or create a new one instead
:type reuse_existing: bool
"""
if leaf_folders_as_items and _has_only_files(local_folder):
print('Creating item from {0}'.format(local_folder))
_upload_folder_as_item(local_folder, parent_folder_id, reuse_existing)
return
else:
# do not need to check if folder exists, if it does, an attempt to
# create it will just return the existing id
print('Creating folder from {0}'.format(local_folder))
new_folder_id = _create_or_reuse_folder(local_folder, parent_folder_id,
reuse_existing)
for entry in sorted(os.listdir(local_folder)):
full_entry = os.path.join(local_folder, entry)
if os.path.islink(full_entry):
# os.walk skips symlinks by default
continue
elif os.path.isdir(full_entry):
_upload_folder_recursive(full_entry,
new_folder_id,
leaf_folders_as_items,
reuse_existing)
else:
print('Uploading item from {0}'.format(full_entry))
_upload_as_item(entry,
new_folder_id,
full_entry,
reuse_existing)
|
Function to recursively upload a folder and all of its descendants.
:param local_folder: full path to local folder to be uploaded
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:param leaf_folders_as_items: (optional) whether leaf folders should have
all files uploaded as single items
:type leaf_folders_as_items: bool
:param reuse_existing: (optional) whether to accept an existing item of the
same name in the same location, or create a new one instead
:type reuse_existing: bool
|
async def i2c_read_request(self, address, register, number_of_bytes,
read_type, cb=None, cb_type=None):
"""
This method requests the read of an i2c device. Results are retrieved
by a call to i2c_get_read_data(). or by callback.
If a callback method is provided, when data is received from the
device it will be sent to the callback method.
Some devices require that transmission be restarted
(e.g. MMA8452Q accelerometer).
Use Constants.I2C_READ | Constants.I2C_END_TX_MASK for those cases.
:param address: i2c device address
:param register: register number (can be set to zero)
:param number_of_bytes: number of bytes expected to be returned
:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY. I2C_END_TX_MASK
may be OR'ed when required
:param cb: Optional callback function to report i2c data as a
result of read command
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: No return value.
"""
if address not in self.i2c_map:
# self.i2c_map[address] = [None, cb]
self.i2c_map[address] = {'value': None, 'callback': cb,
'callback_type': cb_type}
data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f,
number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f]
await self._send_sysex(PrivateConstants.I2C_REQUEST, data)
|
This method requests the read of an i2c device. Results are retrieved
by a call to i2c_get_read_data(). or by callback.
If a callback method is provided, when data is received from the
device it will be sent to the callback method.
Some devices require that transmission be restarted
(e.g. MMA8452Q accelerometer).
Use Constants.I2C_READ | Constants.I2C_END_TX_MASK for those cases.
:param address: i2c device address
:param register: register number (can be set to zero)
:param number_of_bytes: number of bytes expected to be returned
:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY. I2C_END_TX_MASK
may be OR'ed when required
:param cb: Optional callback function to report i2c data as a
result of read command
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: No return value.
|
def write_config_file(self, params, path):
""" write a config file for this single exp in the folder path.
"""
cfgp = ConfigParser()
cfgp.add_section(params['name'])
for p in params:
if p == 'name':
continue
cfgp.set(params['name'], p, params[p])
f = open(os.path.join(path, 'experiment.cfg'), 'w')
cfgp.write(f)
f.close()
|
write a config file for this single exp in the folder path.
|
def text_bounding_box(self, size_pt, text):
"""
Return the bounding box of the given text
at the given font size.
:param int size_pt: the font size in points
:param string text: the text
:rtype: tuple (width, height)
"""
if size_pt == 12:
mult = {"h": 9, "w_digit": 5, "w_space": 2}
elif size_pt == 18:
mult = {"h": 14, "w_digit": 9, "w_space": 2}
num_chars = len(text)
return (num_chars * mult["w_digit"] + (num_chars - 1) * mult["w_space"] + 1, mult["h"])
|
Return the bounding box of the given text
at the given font size.
:param int size_pt: the font size in points
:param string text: the text
:rtype: tuple (width, height)
|
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
self.__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool]]
return data_for_compute.all(
axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs
)
return self._reduce_dimension(
self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
# Reduce to a scalar if axis is None.
result = self._reduce_dimension(
self._query_compiler.all(
axis=0, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
if isinstance(result, BasePandasDataset):
return result.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
return result
|
Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
|
def get_new_broks(self):
"""Get new broks from our satellites
:return: None
"""
for satellites in [self.schedulers, self.pollers, self.reactionners, self.receivers]:
for satellite_link in list(satellites.values()):
logger.debug("Getting broks from %s", satellite_link)
_t0 = time.time()
try:
tmp_broks = satellite_link.get_broks(self.name)
except LinkError:
logger.warning("Daemon %s connection failed, I could not get the broks!",
satellite_link)
else:
if tmp_broks:
logger.debug("Got %d Broks from %s in %s",
len(tmp_broks), satellite_link.name, time.time() - _t0)
statsmgr.gauge('get-new-broks-count.%s'
% (satellite_link.name), len(tmp_broks))
statsmgr.timer('get-new-broks-time.%s'
% (satellite_link.name), time.time() - _t0)
for brok in tmp_broks:
brok.instance_id = satellite_link.instance_id
# Add the broks to our global list
self.external_broks.extend(tmp_broks)
|
Get new broks from our satellites
:return: None
|
def pytwis_clt():
"""The main routine of this command-line tool."""
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict)
|
The main routine of this command-line tool.
|
def _get_field(self, field_name, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The grpc hook type adds custom UI elements
to the hook page, which allow admins to specify scopes, credential pem files, etc.
They get formatted as shown below.
"""
full_field_name = 'extra__grpc__{}'.format(field_name)
if full_field_name in self.extras:
return self.extras[full_field_name]
else:
return default
|
Fetches a field from extras, and returns it. This is some Airflow
magic. The grpc hook type adds custom UI elements
to the hook page, which allow admins to specify scopes, credential pem files, etc.
They get formatted as shown below.
|
def connect(self, slot):
"""
Connects the signal to any callable object
"""
if not callable(slot):
raise ValueError("Connection to non-callable '%s' object failed" % slot.__class__.__name__)
if (isinstance(slot, partial) or '<' in slot.__name__):
# If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find
if slot not in self._slots:
self._slots.append(slot)
elif inspect.ismethod(slot):
# Check if it's an instance method and store it with the instance as the key
slotSelf = slot.__self__
slotDict = weakref.WeakKeyDictionary()
slotDict[slotSelf] = slot.__func__
if slotDict not in self._slots:
self._slots.append(slotDict)
else:
# If it's just a function then just store it as a weakref.
newSlotRef = weakref.ref(slot)
if newSlotRef not in self._slots:
self._slots.append(newSlotRef)
|
Connects the signal to any callable object
|
def get_ip_interface_output_interface_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
vrf = ET.SubElement(interface, "vrf")
vrf.text = kwargs.pop('vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def entity(self):
"""
Returns the object this grant is for. The objects type depends on the
type of object this grant is applied to, and the object returned is
not populated (accessing its attributes will trigger an api request).
:returns: This grant's entity
:rtype: Linode, NodeBalancer, Domain, StackScript, Volume, or Longview
"""
# there are no grants for derived types, so this shouldn't happen
if not issubclass(self.cls, Base) or issubclass(self.cls, DerivedBase):
raise ValueError("Cannot get entity for non-base-class {}".format(self.cls))
return self.cls(self._client, self.id)
|
Returns the object this grant is for. The objects type depends on the
type of object this grant is applied to, and the object returned is
not populated (accessing its attributes will trigger an api request).
:returns: This grant's entity
:rtype: Linode, NodeBalancer, Domain, StackScript, Volume, or Longview
|
def list_namespaced_service_account(self, namespace, **kwargs):
"""
list or watch objects of kind ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_service_account(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ServiceAccountList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_service_account_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_service_account_with_http_info(namespace, **kwargs)
return data
|
list or watch objects of kind ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_service_account(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ServiceAccountList
If the method is called asynchronously,
returns the request thread.
|
def DeserializeMessage(self, response_type, data):
"""Deserialize the given data as method_config.response_type."""
try:
message = encoding.JsonToMessage(response_type, data)
except (exceptions.InvalidDataFromServerError,
messages.ValidationError, ValueError) as e:
raise exceptions.InvalidDataFromServerError(
'Error decoding response "%s" as type %s: %s' % (
data, response_type.__name__, e))
return message
|
Deserialize the given data as method_config.response_type.
|
def _update_prx(self):
"""Update `prx` from `phi`, `pi_codon`, and `beta`."""
qx = scipy.ones(N_CODON, dtype='float')
for j in range(3):
for w in range(N_NT):
qx[CODON_NT[j][w]] *= self.phi[w]
frx = self.pi_codon**self.beta
self.prx = frx * qx
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for r in range(self.nsites):
self.prx[r] /= self.prx[r].sum()
|
Update `prx` from `phi`, `pi_codon`, and `beta`.
|
def _read_next_line(self):
"""Read next line store in self._line and return old one"""
prev_line = self._line
self._line = self.stream.readline()
return prev_line
|
Read next line store in self._line and return old one
|
def summary(model, input_size):
""" Print summary of the model """
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split('.')[-1].split("'")[0]
module_idx = len(summary)
m_key = '%s-%i' % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]['input_shape'] = list(input[0].size())
summary[m_key]['input_shape'][0] = -1
if isinstance(output, (list, tuple)):
summary[m_key]['output_shape'] = [[-1] + list(o.size())[1:] for o in output]
else:
summary[m_key]['output_shape'] = list(output.size())
summary[m_key]['output_shape'][0] = -1
params = 0
if hasattr(module, 'weight') and hasattr(module.weight, 'size'):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]['trainable'] = module.weight.requires_grad
if hasattr(module, 'bias') and hasattr(module.bias, 'size'):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]['nb_params'] = params
if (not isinstance(module, nn.Sequential) and
not isinstance(module, nn.ModuleList) and
not (module == model)):
hooks.append(module.register_forward_hook(hook))
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
model = model.cuda()
else:
dtype = torch.FloatTensor
model = model.cpu()
# check if there are multiple inputs to the network
if isinstance(input_size[0], (list, tuple)):
x = [Variable(torch.rand(2, *in_size)).type(dtype) for in_size in input_size]
else:
x = Variable(torch.rand(2, *input_size)).type(dtype)
# print(type(x[0]))
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(x)
# remove these hooks
for h in hooks:
h.remove()
print('----------------------------------------------------------------')
line_new = '{:>20} {:>25} {:>15}'.format('Layer (type)', 'Output Shape', 'Param #')
print(line_new)
print('================================================================')
total_params = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = '{:>20} {:>25} {:>15}'.format(layer, str(summary[layer]['output_shape']),
'{0:,}'.format(summary[layer]['nb_params']))
total_params += summary[layer]['nb_params']
if 'trainable' in summary[layer]:
if summary[layer]['trainable'] == True:
trainable_params += summary[layer]['nb_params']
print(line_new)
print('================================================================')
print('Total params: {0:,}'.format(total_params))
print('Trainable params: {0:,}'.format(trainable_params))
print('Non-trainable params: {0:,}'.format(total_params - trainable_params))
print('----------------------------------------------------------------')
|
Print summary of the model
|
def can_group_commands(command, next_command):
"""
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
"""
multi_capable_commands = ('get', 'set', 'delete')
if next_command is None:
return False
name = command.get_name()
# TODO: support multi commands
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
# if the shared args (key, or key/value) do not match, we cannot group
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)
# then we cannot group
if command.get_kwargs() != next_command.get_kwargs():
return False
return True
|
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
|
def add_handler(self, type, actions, **kwargs):
"""
Add an event handler to be processed by this session.
type - The type of the event (pygame.QUIT, pygame.KEYUP ETC).
actions - The methods which should be called when an event matching this specification is received.
more than one action can be tied to a single event. This allows for secondary actions to occur along side already existing actions such as the down errow in the List.
You can either pass the actions or action as a single parameter or as a list.
kwargs - An arbitrary number of parameters which must be satisfied in order for the event to match.
The keywords are directly matched with the instance variables found in the current event
Each value for kwargs can optionally be a lambda which must evaluate to True in order for the match to work.
Example:
session.add_handler(pygame.QUIT, session.do_quit)
session.add_handler(pygame.KEYDOWN, lambda: ao2.speak("You pressed the enter key."), key = pygame.K_RETURN)
"""
l = self._events.get(type, [])
h = Handler(self, type, kwargs, actions)
l.append(h)
self._events[type] = l
return h
|
Add an event handler to be processed by this session.
type - The type of the event (pygame.QUIT, pygame.KEYUP ETC).
actions - The methods which should be called when an event matching this specification is received.
more than one action can be tied to a single event. This allows for secondary actions to occur along side already existing actions such as the down errow in the List.
You can either pass the actions or action as a single parameter or as a list.
kwargs - An arbitrary number of parameters which must be satisfied in order for the event to match.
The keywords are directly matched with the instance variables found in the current event
Each value for kwargs can optionally be a lambda which must evaluate to True in order for the match to work.
Example:
session.add_handler(pygame.QUIT, session.do_quit)
session.add_handler(pygame.KEYDOWN, lambda: ao2.speak("You pressed the enter key."), key = pygame.K_RETURN)
|
def literal_struct(cls, elems):
"""
Construct a literal structure constant made of the given members.
"""
tys = [el.type for el in elems]
return cls(types.LiteralStructType(tys), elems)
|
Construct a literal structure constant made of the given members.
|
def remove_repeat_coordinates(x, y, z):
r"""Remove all x, y, and z where (x,y) is repeated and keep the first occurrence only.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
Returns
-------
x, y, z
List of coordinate observation pairs without
repeated coordinates.
"""
coords = []
variable = []
for (x_, y_, t_) in zip(x, y, z):
if (x_, y_) not in coords:
coords.append((x_, y_))
variable.append(t_)
coords = np.array(coords)
x_ = coords[:, 0]
y_ = coords[:, 1]
z_ = np.array(variable)
return x_, y_, z_
|
r"""Remove all x, y, and z where (x,y) is repeated and keep the first occurrence only.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
Returns
-------
x, y, z
List of coordinate observation pairs without
repeated coordinates.
|
def compute_best_path(local_asn, path1, path2):
"""Compares given paths and returns best path.
Parameters:
-`local_asn`: asn of local bgpspeaker
-`path1`: first path to compare
-`path2`: second path to compare
Best path processing will involve following steps:
1. Select a path with a reachable next hop.
2. Select the path with the highest weight.
3. If path weights are the same, select the path with the highest
local preference value.
4. Prefer locally originated routes (network routes, redistributed
routes, or aggregated routes) over received routes.
5. Select the route with the shortest AS-path length.
6. If all paths have the same AS-path length, select the path based
on origin: IGP is preferred over EGP; EGP is preferred over
Incomplete.
7. If the origins are the same, select the path with lowest MED
value.
8. If the paths have the same MED values, select the path learned
via EBGP over one learned via IBGP.
9. Select the route with the lowest IGP cost to the next hop.
10. Select the route received from the peer with the lowest BGP
router ID.
11. Select the route received from the peer with the shorter
CLUSTER_LIST length.
Returns None if best-path among given paths cannot be computed else best
path.
Assumes paths from NC has source equal to None.
"""
best_path = None
best_path_reason = BPR_UNKNOWN
# Follow best path calculation algorithm steps.
if best_path is None:
best_path = _cmp_by_reachable_nh(path1, path2)
best_path_reason = BPR_REACHABLE_NEXT_HOP
if best_path is None:
best_path = _cmp_by_highest_wg(path1, path2)
best_path_reason = BPR_HIGHEST_WEIGHT
if best_path is None:
best_path = _cmp_by_local_pref(path1, path2)
best_path_reason = BPR_LOCAL_PREF
if best_path is None:
best_path = _cmp_by_local_origin(path1, path2)
best_path_reason = BPR_LOCAL_ORIGIN
if best_path is None:
best_path = _cmp_by_aspath(path1, path2)
best_path_reason = BPR_ASPATH
if best_path is None:
best_path = _cmp_by_origin(path1, path2)
best_path_reason = BPR_ORIGIN
if best_path is None:
best_path = _cmp_by_med(path1, path2)
best_path_reason = BPR_MED
if best_path is None:
best_path = _cmp_by_asn(local_asn, path1, path2)
best_path_reason = BPR_ASN
if best_path is None:
best_path = _cmp_by_igp_cost(path1, path2)
best_path_reason = BPR_IGP_COST
if best_path is None:
best_path = _cmp_by_router_id(local_asn, path1, path2)
best_path_reason = BPR_ROUTER_ID
if best_path is None:
best_path = _cmp_by_cluster_list(path1, path2)
best_path_reason = BPR_CLUSTER_LIST
if best_path is None:
best_path_reason = BPR_UNKNOWN
return best_path, best_path_reason
|
Compares given paths and returns best path.
Parameters:
-`local_asn`: asn of local bgpspeaker
-`path1`: first path to compare
-`path2`: second path to compare
Best path processing will involve following steps:
1. Select a path with a reachable next hop.
2. Select the path with the highest weight.
3. If path weights are the same, select the path with the highest
local preference value.
4. Prefer locally originated routes (network routes, redistributed
routes, or aggregated routes) over received routes.
5. Select the route with the shortest AS-path length.
6. If all paths have the same AS-path length, select the path based
on origin: IGP is preferred over EGP; EGP is preferred over
Incomplete.
7. If the origins are the same, select the path with lowest MED
value.
8. If the paths have the same MED values, select the path learned
via EBGP over one learned via IBGP.
9. Select the route with the lowest IGP cost to the next hop.
10. Select the route received from the peer with the lowest BGP
router ID.
11. Select the route received from the peer with the shorter
CLUSTER_LIST length.
Returns None if best-path among given paths cannot be computed else best
path.
Assumes paths from NC has source equal to None.
|
def getFaxStatsSessions(self):
"""Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats.
"""
if not self.hasFax():
return None
info_dict = {}
info_dict['total'] = 0
fax_types = ('g.711', 't.38')
fax_operations = ('send', 'recv')
fax_states = ('uninitialized', 'initialized', 'open',
'active', 'inactive', 'complete', 'unknown',)
info_dict['type'] = dict([(k,0) for k in fax_types])
info_dict['operation'] = dict([(k,0) for k in fax_operations])
info_dict['state'] = dict([(k,0) for k in fax_states])
cmdresp = self.executeCommand('fax show sessions')
sections = cmdresp.strip().split('\n\n')
if len(sections) >= 3:
for line in sections[1][1:]:
cols = re.split('\s\s+', line)
if len(cols) == 7:
info_dict['total'] += 1
if cols[3].lower() in fax_types:
info_dict['type'][cols[3].lower()] += 1
if cols[4] == 'receive':
info_dict['operation']['recv'] += 1
elif cols[4] == 'send':
info_dict['operation']['send'] += 1
if cols[5].lower() in fax_states:
info_dict['state'][cols[5].lower()] += 1
return info_dict
|
Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats.
|
def _worker_process(self):
# type: (LocalFileMd5Offload) -> None
"""Compute MD5 for local file
:param LocalFileMd5Offload self: this
"""
while not self.terminated:
try:
key, lpath, fpath, remote_md5, pagealign, lpview = \
self._task_queue.get(True, 0.1)
except queue.Empty:
continue
if lpview is None:
start = None
end = None
size = None
else:
start = lpview.fd_start
end = lpview.fd_end
size = end - start
md5 = blobxfer.operations.md5.compute_md5_for_file_asbase64(
fpath, pagealign, start, end)
logger.debug('pre-transfer MD5 check: {} <L..R> {} {}'.format(
md5, remote_md5, fpath))
self._done_cv.acquire()
self._done_queue.put((key, lpath, size, md5 == remote_md5))
self._done_cv.notify()
self._done_cv.release()
|
Compute MD5 for local file
:param LocalFileMd5Offload self: this
|
def create_project(self, name, **kwargs):
"""
Creates a project with a name. All other parameters are optional. They
are: `note`, `customer_id`, `budget`, `budget_type`,
`active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and
`archived`.
"""
data = self._wrap_dict("project", kwargs)
data["customer"]["name"] = name
return self.post("/projects.json", data=data)
|
Creates a project with a name. All other parameters are optional. They
are: `note`, `customer_id`, `budget`, `budget_type`,
`active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and
`archived`.
|
def _ip_is_usable(self, current_ip):
"""
Check if the current Tor's IP is usable.
:argument current_ip: current Tor IP
:type current_ip: str
:returns bool
"""
# Consider IP addresses only.
try:
ipaddress.ip_address(current_ip)
except ValueError:
return False
# Never use real IP.
if current_ip == self.real_ip:
return False
# Do dot allow IP reuse.
if not self._ip_is_safe(current_ip):
return False
return True
|
Check if the current Tor's IP is usable.
:argument current_ip: current Tor IP
:type current_ip: str
:returns bool
|
def iaf_flow(one_hot_assignments,
scale_weights,
scale_bias,
num_codes,
summary=True,
name=None):
"""Performs a single IAF flow using scale and normalization transformations.
Args:
one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size,
latent_size, num_codes].
scale_weights: Tensor corresponding to lower triangular matrix used to
autoregressively generate scale matrix from assignments. To ensure the
lower-triangular matrix has length of latent_size, scale_weights should
be a rank-one tensor with size latent_size * (latent_size + 1) / 2.
scale_bias: Bias tensor to be added to scale tensor, with shape
[latent_size, num_codes]. If scale weights are zero, initialize scale_bias
to be log(exp(1.) / 2. - 1) so initial transformation is identity.
num_codes: Number of codes in codebook.
summary: Whether to save summaries.
name: String used for name scope.
Returns:
flow_output: Transformed one-hot assignments.
inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding
to transformation.
"""
with tf.name_scope(name, default_name="iaf"):
# Pad the one_hot_assignments by zeroing out the first latent dimension and
# shifting the rest down by one (and removing the last dimension).
padded_assignments = tf.pad(
one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :]
scale_bijector = tfp.distributions.bijectors.Affine(
scale_tril=tfp.distributions.fill_triangular(scale_weights))
scale = scale_bijector.forward(
tf.transpose(padded_assignments, [0, 1, 3, 2]))
# Transpose the bijector output since it performs a batch matmul.
scale = tf.transpose(scale, [0, 1, 3, 2])
scale = tf.nn.softplus(scale)
scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...])
# Don't need last dimension since the transformation keeps it constant.
scale = scale[..., :-1]
z = one_hot_assignments[..., :-1]
unnormalized_probs = tf.concat([z * scale,
one_hot_assignments[..., -1, tf.newaxis]],
axis=-1)
normalizer = tf.reduce_sum(unnormalized_probs, axis=-1)
flow_output = unnormalized_probs / (normalizer[..., tf.newaxis])
inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1)
+ num_codes * tf.log(normalizer))
if summary:
tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1]))
tf.summary.histogram("iaf/inverse_log_det_jacobian",
tf.reshape(inverse_log_det_jacobian, [-1]))
return flow_output, inverse_log_det_jacobian
|
Performs a single IAF flow using scale and normalization transformations.
Args:
one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size,
latent_size, num_codes].
scale_weights: Tensor corresponding to lower triangular matrix used to
autoregressively generate scale matrix from assignments. To ensure the
lower-triangular matrix has length of latent_size, scale_weights should
be a rank-one tensor with size latent_size * (latent_size + 1) / 2.
scale_bias: Bias tensor to be added to scale tensor, with shape
[latent_size, num_codes]. If scale weights are zero, initialize scale_bias
to be log(exp(1.) / 2. - 1) so initial transformation is identity.
num_codes: Number of codes in codebook.
summary: Whether to save summaries.
name: String used for name scope.
Returns:
flow_output: Transformed one-hot assignments.
inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding
to transformation.
|
def prt_gene_aart_details(self, geneids, prt=sys.stdout):
"""For each gene, print ASCII art which represents its associated GO IDs."""
_go2nt = self.sortobj.grprobj.go2nt
patgene = self.datobj.kws["fmtgene2"]
patgo = self.datobj.kws["fmtgo2"]
itemid2name = self.datobj.kws.get("itemid2name")
chr2i = self.datobj.get_chr2idx()
for geneid in geneids:
gos_gene = self.gene2gos[geneid]
symbol = "" if itemid2name is None else itemid2name.get(geneid, "")
prt.write("\n")
prt.write(patgene.format(AART=self.gene2aart[geneid], ID=geneid, NAME=symbol))
go2nt = {go:(_go2nt[go], "".join(self.go2chrs[go])) for go in gos_gene}
for ntgo, abc in sorted(go2nt.values(),
key=lambda t: [chr2i[t[1][:1]], t[0].NS, -1*t[0].dcnt]):
prt.write("{ABC} ".format(ABC=abc))
prt.write(patgo.format(**ntgo._asdict()))
|
For each gene, print ASCII art which represents its associated GO IDs.
|
def add_data(self, conf):
"""
Add data to the graph object. May be called several times to add
additional data sets.
conf should be a dictionary including 'data' and 'title' keys
"""
self.validate_data(conf)
self.process_data(conf)
self.data.append(conf)
|
Add data to the graph object. May be called several times to add
additional data sets.
conf should be a dictionary including 'data' and 'title' keys
|
def cleanup(self):
"""
Clean up children and remove the directory.
Directory will only be removed if the cleanup flag is set.
"""
for k in self._children:
self._children[k].cleanup()
if self._cleanup:
self.remove(True)
|
Clean up children and remove the directory.
Directory will only be removed if the cleanup flag is set.
|
def color_palette(name=None, n_colors=6, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
"""
seaborn_palettes = dict(
deep=["#4C72B0", "#55A868", "#C44E52",
"#8172B2", "#CCB974", "#64B5CD"],
muted=["#4878CF", "#6ACC65", "#D65F5F",
"#B47CC7", "#C4AD66", "#77BEDB"],
pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
"#D0BBFF", "#FFFEA3", "#B0E0E6"],
bright=["#003FFF", "#03ED3A", "#E8000B",
"#8A2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#017517", "#8C0900",
"#7600A1", "#B8860B", "#006374"],
colorblind=["#0072B2", "#009E73", "#D55E00",
"#CC79A7", "#F0E442", "#56B4E9"],
)
if name is None:
palette = mpl.rcParams["axes.color_cycle"]
elif not isinstance(name, string_types):
palette = name
elif name == "hls":
palette = hls_palette(n_colors)
elif name == "husl":
palette = husl_palette(n_colors)
elif name in seaborn_palettes:
palette = seaborn_palettes[name]
elif name in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
elif name[:-2] in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
else:
raise ValueError("%s is not a valid palette name" % name)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(name))
return palette
|
Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.