text stringlengths 78 104k | score float64 0 0.18 |
|---|---|
def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000):
"""Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input.
"""
from lime.lime_text import LimeTextExplainer
if len(self._text_columns) > 1 and not column_name:
raise ValueError('There are multiple text columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._text_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
text_column_name = column_name if column_name else self._text_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_text_predict_fn(labels, instance, text_column_name)
explainer = LimeTextExplainer(class_names=labels)
exp = explainer.explain_instance(
instance[text_column_name], predict_fn, labels=range(len(labels)),
num_features=num_features, num_samples=num_samples)
return exp | 0.00647 |
def put(self, deviceId):
"""
Puts a new device into the device store
:param deviceId:
:return:
"""
device = request.get_json()
logger.debug("Received /devices/" + deviceId + " - " + str(device))
self._deviceController.accept(deviceId, device)
return None, 200 | 0.006042 |
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
# The following try-except block enables us to use the Tool
# in standalone mode (without the accompanying pdf.py),
# whenever we need an explicit call of gs via the Gs()
# Builder ...
try:
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
from SCons.Tool import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
except ImportError as e:
pass
gsbuilder = SCons.Builder.Builder(action = SCons.Action.Action('$GSCOM', '$GSCOMSTR'))
env['BUILDERS']['Gs'] = gsbuilder
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES' | 0.009375 |
def _UnregisterDatastoreArtifacts(self):
"""Remove artifacts that came from the datastore."""
to_remove = []
for name, artifact in iteritems(self._artifacts):
if artifact.loaded_from.startswith("datastore"):
to_remove.append(name)
for key in to_remove:
self._artifacts.pop(key) | 0.009585 |
async def run_task(context, to_cancellable_process):
"""Run the task, sending stdout+stderr to files.
https://github.com/python/asyncio/blob/master/examples/subprocess_shell.py
Args:
context (scriptworker.context.Context): the scriptworker context.
to_cancellable_process (types.Callable): tracks the process so that it can be stopped if the worker is shut down
Returns:
int: exit code
"""
kwargs = { # pragma: no branch
'stdout': PIPE,
'stderr': PIPE,
'stdin': None,
'close_fds': True,
'preexec_fn': lambda: os.setsid(),
}
subprocess = await asyncio.create_subprocess_exec(*context.config['task_script'], **kwargs)
context.proc = await to_cancellable_process(TaskProcess(subprocess))
timeout = context.config['task_max_timeout']
with get_log_filehandle(context) as log_filehandle:
stderr_future = asyncio.ensure_future(
pipe_to_log(context.proc.process.stderr, filehandles=[log_filehandle])
)
stdout_future = asyncio.ensure_future(
pipe_to_log(context.proc.process.stdout, filehandles=[log_filehandle])
)
try:
_, pending = await asyncio.wait(
[stderr_future, stdout_future], timeout=timeout
)
if pending:
message = "Exceeded task_max_timeout of {} seconds".format(timeout)
log.warning(message)
await context.proc.stop()
raise ScriptWorkerTaskException(message, exit_code=context.config['task_max_timeout_status'])
finally:
# in the case of a timeout, this will be -15.
# this code is in the finally: block so we still get the final
# log lines.
exitcode = await context.proc.process.wait()
# make sure we haven't lost any of the logs
await asyncio.wait([stdout_future, stderr_future])
# add an exit code line at the end of the log
status_line = "exit code: {}".format(exitcode)
if exitcode < 0:
status_line = "Automation Error: python exited with signal {}".format(exitcode)
log.info(status_line)
print(status_line, file=log_filehandle)
stopped_due_to_worker_shutdown = context.proc.stopped_due_to_worker_shutdown
context.proc = None
if stopped_due_to_worker_shutdown:
raise WorkerShutdownDuringTask
return exitcode | 0.003587 |
def _manual_repartition(self, axis, repartition_func, **kwargs):
"""This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
"""
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func) | 0.004435 |
def setParameter(self, parameterName, index, parameterValue):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.setParameter`.
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName in self._spatialArgNames:
setattr(self._sfdr, parameterName, parameterValue)
elif parameterName == "logPathInput":
self.logPathInput = parameterValue
# Close any existing log file
if self._fpLogSPInput:
self._fpLogSPInput.close()
self._fpLogSPInput = None
# Open a new log file
if parameterValue:
self._fpLogSPInput = open(self.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogSP:
self._fpLogSP.close()
self._fpLogSP = None
# Open a new log file
if parameterValue:
self._fpLogSP = open(self.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.logPathOutputDense = parameterValue
# Close any existing log file
if self._fpLogSPDense:
self._fpLogSPDense.close()
self._fpLogSPDense = None
# Open a new log file
if parameterValue:
self._fpLogSPDense = open(self.logPathOutputDense, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName) | 0.011838 |
def _add_gradient_scalar(self, name:str, scalar_value)->None:
"Writes a single scalar value for a gradient statistic to Tensorboard."
tag = self.name + '/gradients/' + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration) | 0.017544 |
def _body(self):
"""
The |_Body| instance containing the content for this document.
"""
if self.__body is None:
self.__body = _Body(self._element.body, self)
return self.__body | 0.008772 |
def set_var(self, name, value, user=None):
"""
Set a global or user variable
:param name: The name of the variable to set
:type name: str
:param value: The value of the variable to set
:type value: str
:param user: If defining a user variable, the user identifier
:type user: str
:raises UserNotDefinedError: The specified user does not exist
"""
# Set a user variable
if user is not None:
if user not in self._users:
raise UserNotDefinedError
self._users[user].set_var(name, value)
return
# Set a global variable
self._global_vars[name] = value | 0.002789 |
def write_skycatalog(self, filename, show_flux=False, show_id=False):
""" Write out the all_radec catalog for this image to a file.
"""
f = open(filename,'w')
f.write("#Sky positions for cumulative reference catalog. Initial catalog from: "+self.name+'\n')
header1 = "#RA Dec"
header2 = "#(deg) (deg)"
if show_flux:
header1 += " Flux"
header2 += " (counts)"
if show_id:
header1 += " ID Origin"
header2 += ""
header1 += "\n"
header2 += "\n"
f.write(header1)
f.write(header2)
show_details = show_flux or show_id
flux_end_char = ''
if show_details and show_flux:
if show_id:
flux_end_char = '\t'
for i in range(self.all_radec[0].shape[0]):
src_line = "{:.7f} {:.7f}" \
.format(self.all_radec[0][i], self.all_radec[1][i])
if show_details:
#src_line += " #"
if show_flux:
#src_line += " flux: {:.5g}{:s}" \
#.format(self.xy_catalog[2][i], flux_end_char)
src_line += " {:.5g}".format(self.xy_catalog[2][i])
if show_id:
#src_line += " ID: {:d}\torigin: '{:s}'" \
#.format(self.xy_catalog[3][i], self.xy_catalog[-1][i])
src_line += " {:d} {:s}".format(self.xy_catalog[3][i],
self.xy_catalog[-1][i])
f.write(src_line + '\n')
f.close() | 0.006611 |
def track_exception(self, type=None, value=None, tb=None, properties=None, measurements=None):
""" Send information about a single exception that occurred in the application.
Args:
type (Type). the type of the exception that was thrown.\n
value (:class:`Exception`). the exception that the client wants to send.\n
tb (:class:`Traceback`). the traceback information as returned by :func:`sys.exc_info`.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None)
"""
if not type or not value or not tb:
type, value, tb = sys.exc_info()
if not type or not value or not tb:
try:
raise Exception(NULL_CONSTANT_STRING)
except:
type, value, tb = sys.exc_info()
details = channel.contracts.ExceptionDetails()
details.id = 1
details.outer_id = 0
details.type_name = type.__name__
details.message = str(value)
details.has_full_stack = True
counter = 0
for tb_frame_file, tb_frame_line, tb_frame_function, tb_frame_text in traceback.extract_tb(tb):
frame = channel.contracts.StackFrame()
frame.assembly = 'Unknown'
frame.file_name = tb_frame_file
frame.level = counter
frame.line = tb_frame_line
frame.method = tb_frame_function
details.parsed_stack.append(frame)
counter += 1
details.parsed_stack.reverse()
data = channel.contracts.ExceptionData()
data.handled_at = 'UserCode'
data.exceptions.append(details)
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | 0.00497 |
def to_csv(args):
"""Take a sqlite filled database of results and return a csv file
:param str result_file: the path of the sqlite database
:param str output_file: the path of the csv output file
:param str delimiter: the desired delimiter for the output csv file
"""
result_file = args.result_file
output_file = args.output_file
delimiter = args.delimiter
if not os.path.isfile(result_file):
raise OSError("Results file does not exists")
headers = ['elapsed', 'epoch', 'turret_name', 'scriptrun_time', 'error']
headers_row = {}
set_database(result_file, db, {})
results = Result.select()
for item in results:
result_item = item.to_dict()
for k in result_item['custom_timers'].keys():
if k not in headers:
headers.append(k)
headers_row[k] = k
with open(output_file, "w+") as f:
writer = csv.DictWriter(f, fieldnames=headers, delimiter=delimiter)
headers_row.update({
'elapsed': 'elapsed time',
'epoch': 'epoch (in seconds)',
'turret_name': 'turret name',
'scriptrun_time': 'transaction time',
'error': 'error'
})
writer.writerow(headers_row)
for result_item in results:
line = result_item.to_dict()
for key, value in line['custom_timers'].items():
line[key] = value
del line['custom_timers']
writer.writerow(line) | 0.000662 |
def _merge_csv_model(models, pc, csvs):
"""
Add csv data to each column in chron model
:param dict models: Metadata
:return dict models: Metadata
"""
logger_csvs.info("enter merge_csv_model")
try:
for _name, _model in models.items():
if "summaryTable" in _model:
models[_name]["summaryTable"] = _merge_csv_table(_model["summaryTable"], pc, csvs)
if "ensembleTable" in _model:
models[_name]["ensembleTable"] = _merge_csv_table(_model["ensembleTable"], pc, csvs)
if "distributionTable" in _model:
models[_name]["distributionTable"] = _merge_csv_table(_model["distributionTable"], pc, csvs)
except Exception as e:
logger_csvs.error("merge_csv_model: {}",format(e))
logger_csvs.info("exit merge_csv_model")
return models | 0.005794 |
def write(self, text="", xy=(0,0), align="left", font=None, fontName=None, fontSize = 10, fill = 1, spacing = 0, screenCenter = False):
"""!
\~english
Print one line text or multi-line text on the screen
@param text: Text to be drawn. eg. "Hello World!" or "Hello/nWorld!"
@param xy: Top left corner of the text. defaule: (0,0)
@param align: "left", "center" or "right". defaule: "left"
@param fontName: Name of font or font instance. defaule: None (use default font)
@param fontSize: Font size. default: 10
@param fill: Color to use for the text. default: 1 (white)
@param spacing: The number of pixels between lines. default: 0
@param screenCenter: Keep the text center of screen. default: False
@note
How to use screenCenter?
1. align="left"; screenCenter=False
<pre>
+---------------------------------+
| Simple text line1 |
| Simple line2 |
| Simple |
| |
+---------------------------------+
</pre>
2. align="left"; screenCenter=True
<pre>
+---------------------------------+
| Simple text line1 |
| Simple line2 |
| Simple |
| |
+---------------------------------+
</pre>
\~chinese
在屏幕上打印一行文字或多行文字
@param text: 要输出的文字,可以单行也可以多行。例如: "Hello World!" 或 "Hello/nWorld!"
@param xy: 文字输出的坐标点。默认: (0,0)
@param align: 多行文字对齐方式,可选: "left", "center" 或 "right". 默认: "left"
@param fontName: 字体名或字体对象实例。默认:None(使用系统默认的字体)
@param fontSize: 字体大小。默认:10
@param fill: 文字颜色。默认: 1 (白色)
@param spacing: 行间距。默认:0
@param screenCenter: 让文本居中屏幕。
@note
screenCenter 效果示例:
1. align="left"; screenCenter=False
<pre>
+---------------------------------+
| Simple text line1 |
| Simple line2 |
| Simple |
| |
+---------------------------------+
</pre>
2. align="left"; screenCenter=True
<pre>
+---------------------------------+
| Simple text line1 |
| Simple line2 |
| Simple |
| |
+---------------------------------+
</pre>
"""
tx = xy[0]
try:
dwFont = font if font != None else DEF_SCR_FRONT if fontName==None else ImageFont.truetype(fontName, fontSize)
except:
dwFont = DEF_SCR_FRONT
try:
if screenCenter == True:
(fw, fh) = self.Canvas.multiline_textsize( text, font )
tx = xy[0] + (self._display_size[0]-fw)/2
self.Canvas.multiline_text( (tx, xy[1]) , text, font = dwFont, align=align, fill=fill, spacing=spacing)
except:
print("ERROR: canvas write error") | 0.011575 |
def _buffered_generation_process(source_gen, buffer_, sentinal):
""" helper for buffered_generator """
for data in source_gen:
buffer_.put(data, block=True)
# sentinel: signal the end of the iterator
buffer_.put(sentinal)
# unfortunately this does not suffice as a signal: if buffer_.get() was
# called and subsequently the buffer_ is closed, it will block forever.
buffer_.close() | 0.002398 |
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled()) | 0.006042 |
def create_assignment( # pylint: disable=too-many-arguments
self,
name,
short_name,
weight,
max_points,
due_date_str,
gradebook_id='',
**kwargs
):
"""Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
}
"""
data = {
'name': name,
'shortName': short_name,
'weight': weight,
'graderVisible': False,
'gradingSchemeType': 'NUMERIC',
'gradebookId': gradebook_id or self.gradebook_id,
'maxPointsTotal': max_points,
'dueDateString': due_date_str
}
data.update(kwargs)
log.info("Creating assignment %s", name)
response = self.post('assignment', data)
log.debug('Received response data: %s', response)
return response | 0.000875 |
def escapeQueryTerm(self, term):
"""
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \
"""
reserved = [
'+',
'-',
'&',
'|',
'!',
'(',
')',
'{',
'}',
'[',
']',
'^',
'"',
'~',
'*',
'?',
':',
]
term = term.replace('\\', '\\\\')
for c in reserved:
term = term.replace(c, "\%s" % c)
return term | 0.005464 |
def idatdecomp(self, lenient=False, max_length=0):
"""Iterator that yields decompressed ``IDAT`` strings."""
# Currently, with no max_length paramter to decompress, this
# routine will do one yield per IDAT chunk. So not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in self.idat(lenient):
# :todo: add a max_length argument here to limit output
# size.
yield bytearray(d.decompress(data))
yield bytearray(d.flush()) | 0.003175 |
def _query(path, method='GET', data=None, params=None, header_dict=None, decode=True):
'''
Perform a query directly against the Vultr REST API
'''
api_key = config.get_cloud_config_value(
'api_key',
get_configured_provider(),
__opts__,
search_global=False,
)
management_host = config.get_cloud_config_value(
'management_host',
get_configured_provider(),
__opts__,
search_global=False,
default='api.vultr.com'
)
url = 'https://{management_host}/v1/{path}?api_key={api_key}'.format(
management_host=management_host,
path=path,
api_key=api_key,
)
if header_dict is None:
header_dict = {}
result = __utils__['http.query'](
url,
method=method,
params=params,
data=data,
header_dict=header_dict,
port=443,
text=True,
decode=decode,
decode_type='json',
hide_fields=['api_key'],
opts=__opts__,
)
if 'dict' in result:
return result['dict']
return result | 0.001813 |
def btc_privkey_scriptsig_classify(private_key_info):
"""
What kind of scriptsig can this private key make?
"""
if btc_is_singlesig(private_key_info):
return 'p2pkh'
if btc_is_multisig(private_key_info):
return 'p2sh'
if btc_is_singlesig_segwit(private_key_info):
return 'p2sh-p2wpkh'
if btc_is_multisig_segwit(private_key_info):
return 'p2sh-p2wsh'
return None | 0.002331 |
def _get_inbound_stream(self, stream_id):
"""
Get or create the inbound stream with the specified ID.
"""
if stream_id not in self._inbound_streams:
self._inbound_streams[stream_id] = InboundStream()
return self._inbound_streams[stream_id] | 0.006873 |
def on_for_rotations(self, speed, rotations, brake=True, block=True):
"""
Rotate the motor at ``speed`` for ``rotations``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units.
"""
speed_sp = self._speed_native_units(speed)
self._set_rel_position_degrees_and_speed_sp(rotations * 360, speed_sp)
self._set_brake(brake)
self.run_to_rel_pos()
if block:
self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT)
self.wait_until_not_moving() | 0.003378 |
def run(self, fitness_function, n=None):
"""
Runs NEAT's genetic algorithm for at most n generations. If n
is None, run until solution is found or extinction occurs.
The user-provided fitness_function must take only two arguments:
1. The population as a list of (genome id, genome) tuples.
2. The current configuration object.
The return value of the fitness function is ignored, but it must assign
a Python float to the `fitness` member of each genome.
The fitness function is free to maintain external state, perform
evaluations in parallel, etc.
It is assumed that fitness_function does not modify the list of genomes,
the genomes themselves (apart from updating the fitness member),
or the configuration object.
"""
if self.config.no_fitness_termination and (n is None):
raise RuntimeError("Cannot have no generational limit with no fitness termination")
k = 0
while n is None or k < n:
k += 1
self.reporters.start_generation(self.generation)
# Evaluate all genomes using the user-provided function.
fitness_function(list(iteritems(self.population)), self.config)
# Gather and report statistics.
best = None
for g in itervalues(self.population):
if best is None or g.fitness > best.fitness:
best = g
self.reporters.post_evaluate(self.config, self.population, self.species, best)
# Track the best genome ever seen.
if self.best_genome is None or best.fitness > self.best_genome.fitness:
self.best_genome = best
if not self.config.no_fitness_termination:
# End if the fitness threshold is reached.
fv = self.fitness_criterion(g.fitness for g in itervalues(self.population))
if fv >= self.config.fitness_threshold:
self.reporters.found_solution(self.config, self.generation, best)
break
# Create the next generation from the current generation.
self.population = self.reproduction.reproduce(self.config, self.species,
self.config.pop_size, self.generation)
# Check for complete extinction.
if not self.species.species:
self.reporters.complete_extinction()
# If requested by the user, create a completely new population,
# otherwise raise an exception.
if self.config.reset_on_extinction:
self.population = self.reproduction.create_new(self.config.genome_type,
self.config.genome_config,
self.config.pop_size)
else:
raise CompleteExtinctionException()
# Divide the new population into species.
self.species.speciate(self.config, self.population, self.generation)
self.reporters.end_generation(self.config, self.population, self.species)
self.generation += 1
if self.config.no_fitness_termination:
self.reporters.found_solution(self.config, self.generation, self.best_genome)
return self.best_genome | 0.004595 |
def maximum_independent_set(G, sampler=None, lagrange=2.0, **sampler_args):
"""Returns an approximate maximum independent set.
Defines a QUBO with ground states corresponding to a
maximum independent set and uses the sampler to sample from
it.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximum
independent set is an independent set of largest possible size.
Parameters
----------
G : NetworkX graph
The graph on which to find a maximum cut independent set.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
lagrange : optional (default 2)
Lagrange parameter to weight constraints (no edges within set)
versus objective (largest set possible).
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
indep_nodes : list
List of nodes that form a maximum independent set, as
determined by the given sampler.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum
independent set for a graph of a Chimera unit cell created using the
`chimera_graph()` function.
>>> import dimod
>>> sampler = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> indep_nodes = dnx.maximum_independent_set(G, sampler)
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
References
----------
`Independent Set on Wikipedia <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_
`QUBO on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_
.. [AL] Lucas, A. (2014). Ising formulations of many NP problems.
Frontiers in Physics, Volume 2, Article 5.
"""
return maximum_weighted_independent_set(G, None, sampler, lagrange, **sampler_args) | 0.002369 |
def new_signal(celf, path, iface, name) :
"creates a new DBUS.MESSAGE_TYPE_SIGNAL message."
result = dbus.dbus_message_new_signal(path.encode(), iface.encode(), name.encode())
if result == None :
raise CallFailed("dbus_message_new_signal")
#end if
return \
celf(result) | 0.021021 |
def create_transformation(self, rotation=None, translation=None):
"""
Creates a transformation matrix woth rotations and translation.
Args:
rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
Returns:
A 4x4 matrix as a :py:class:`numpy.array`
"""
mat = None
if rotation is not None:
mat = Matrix44.from_eulers(Vector3(rotation))
if translation is not None:
trans = matrix44.create_from_translation(Vector3(translation))
if mat is None:
mat = trans
else:
mat = matrix44.multiply(mat, trans)
return mat | 0.005044 |
def create_task_spec_def():
"""Returns the a :class:`TaskSpecDef` based on the environment variables for distributed training.
References
----------
- `ML-engine trainer considerations <https://cloud.google.com/ml-engine/docs/trainer-considerations#use_tf_config>`__
- `TensorPort Distributed Computing <https://www.tensorport.com/documentation/code-details/>`__
"""
if 'TF_CONFIG' in os.environ:
# TF_CONFIG is used in ML-engine
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
cluster_data = env.get('cluster', None) or {'ps': None, 'worker': None, 'master': None}
return TaskSpecDef(
task_type=task_data['type'], index=task_data['index'],
trial=task_data['trial'] if 'trial' in task_data else None, ps_hosts=cluster_data['ps'],
worker_hosts=cluster_data['worker'], master=cluster_data['master'] if 'master' in cluster_data else None
)
elif 'JOB_NAME' in os.environ:
# JOB_NAME, TASK_INDEX, PS_HOSTS, WORKER_HOSTS and MASTER_HOST are used in TensorPort
return TaskSpecDef(
task_type=os.environ['JOB_NAME'], index=os.environ['TASK_INDEX'], ps_hosts=os.environ.get('PS_HOSTS', None),
worker_hosts=os.environ.get('WORKER_HOSTS', None), master=os.environ.get('MASTER_HOST', None)
)
else:
raise Exception('You need to setup TF_CONFIG or JOB_NAME to define the task.') | 0.00727 |
def WriteRow(self, values):
"""Writes a single row to the underlying buffer.
Args:
values: A dictionary mapping column names to values to be inserted into
the CSV output.
"""
precondition.AssertDictType(values, text, text)
row = []
for column in self._columns:
try:
value = values[column]
except KeyError:
raise ValueError("Row does not contain required column `%s`" % column)
row.append(value)
self._writer.WriteRow(row) | 0.007984 |
def cursors(self):
"""Cursors for rectangular selection.
1 cursor for every line
"""
cursors = []
if self._start is not None:
startLine, startVisibleCol = self._start
currentLine, currentCol = self._qpart.cursorPosition
if abs(startLine - currentLine) > self._MAX_SIZE or \
abs(startVisibleCol - currentCol) > self._MAX_SIZE:
# Too big rectangular selection freezes the GUI
self._qpart.userWarning.emit('Rectangular selection area is too big')
self._start = None
return []
currentBlockText = self._qpart.textCursor().block().text()
currentVisibleCol = self._realToVisibleColumn(currentBlockText, currentCol)
for lineNumber in range(min(startLine, currentLine),
max(startLine, currentLine) + 1):
block = self._qpart.document().findBlockByNumber(lineNumber)
cursor = QTextCursor(block)
realStartCol = self._visibleToRealColumn(block.text(), startVisibleCol)
realCurrentCol = self._visibleToRealColumn(block.text(), currentVisibleCol)
if realStartCol is None:
realStartCol = block.length() # out of range value
if realCurrentCol is None:
realCurrentCol = block.length() # out of range value
cursor.setPosition(cursor.block().position() + min(realStartCol, block.length() - 1))
cursor.setPosition(cursor.block().position() + min(realCurrentCol, block.length() - 1),
QTextCursor.KeepAnchor)
cursors.append(cursor)
return cursors | 0.004487 |
def deviance(self, y, mu, scaled=True):
"""
model deviance
for a gaussian linear model, this is equal to the SSE
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
scaled : boolean, default: True
whether to divide the deviance by the distribution scaled
Returns
-------
deviances : np.array of length n
"""
dev = (y - mu)**2
if scaled:
dev /= self.scale
return dev | 0.003384 |
def apply_fit_filters(self, choosers, alternatives):
"""
Filter `choosers` and `alternatives` for fitting.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing,
e.g. buildings.
Returns
-------
filtered_choosers, filtered_alts : pandas.DataFrame
"""
return super(SegmentedMNLDiscreteChoiceModel, self).apply_fit_filters(
choosers, alternatives) | 0.00317 |
def get_canonical_link(self):
"""
if the article has meta canonical link set in the url
"""
if self.article.final_url:
kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'}
meta = self.parser.getElementsByTag(self.article.doc, **kwargs)
if meta is not None and len(meta) > 0:
href = self.parser.getAttribute(meta[0], 'href')
if href:
href = href.strip()
o = urlparse(href)
if not o.hostname:
tmp = urlparse(self.article.final_url)
domain = '%s://%s' % (tmp.scheme, tmp.hostname)
href = urljoin(domain, href)
return href
return self.article.final_url | 0.002448 |
def quadrect(f, n, a, b, kind='lege', *args, **kwargs):
"""
Integrate the d-dimensional function f on a rectangle with lower and
upper bound for dimension i defined by a[i] and b[i], respectively;
using n[i] points.
Parameters
----------
f : function
The function to integrate over. This should be a function
that accepts as its first argument a matrix representing points
along each dimension (each dimension is a column). Other
arguments that need to be passed to the function are caught by
`*args` and `**kwargs`
n : int or array_like(float)
A length-d iterable of the number of nodes in each dimension
a : scalar or array_like(float)
A length-d iterable of lower endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
b : scalar or array_like(float)
A length-d iterable of upper endpoints. If a scalar is given,
that constant is repeated d times, where d is the number of
dimensions
kind : string, optional(default='lege')
Specifies which type of integration to perform. Valid
values are:
lege - Gauss-Legendre
cheb - Gauss-Chebyshev
trap - trapezoid rule
simp - Simpson rule
N - Neiderreiter equidistributed sequence
W - Weyl equidistributed sequence
H - Haber equidistributed sequence
R - Monte Carlo
*args, **kwargs :
Other arguments passed to the function f
Returns
-------
out : scalar (float)
The value of the integral on the region [a, b]
Notes
-----
Based of original function ``quadrect`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
if kind.lower() == "lege":
nodes, weights = qnwlege(n, a, b)
elif kind.lower() == "cheb":
nodes, weights = qnwcheb(n, a, b)
elif kind.lower() == "trap":
nodes, weights = qnwtrap(n, a, b)
elif kind.lower() == "simp":
nodes, weights = qnwsimp(n, a, b)
else:
nodes, weights = qnwequi(n, a, b, kind)
out = weights.dot(f(nodes, *args, **kwargs))
return out | 0.000425 |
def getHelpFileAsString(taskname,taskpath):
"""
This functions will return useful help as a string read from a file
in the task's installed directory called "<module>.help".
If no such file can be found, it will simply return an empty string.
Notes
-----
The location of the actual help file will be found under the task's
installed directory using 'irafutils.rglob' to search all sub-dirs to
find the file. This allows the help file to be either in the tasks
installed directory or in any sub-directory, such as a "help/" directory.
Parameters
----------
taskname: string
Value of `__taskname__` for a module/task
taskpath: string
Value of `__file__` for an installed module which defines the task
Returns
-------
helpString: string
multi-line string read from the file '<taskname>.help'
"""
#get the local library directory where the code is stored
pathsplit=os.path.split(taskpath) # taskpath should be task's __file__
if taskname.find('.') > -1: # if taskname is given as package.taskname...
helpname=taskname.split(".")[1] # taskname should be __taskname__ from task's module
else:
helpname = taskname
localdir = pathsplit[0]
if localdir == '':
localdir = '.'
helpfile=rglob(localdir,helpname+".help")[0]
if os.access(helpfile,os.R_OK):
fh=open(helpfile,'r')
ss=fh.readlines()
fh.close()
helpString=""
for line in ss:
helpString+=line
else:
helpString= ''
return helpString | 0.011194 |
def startAlertListener(self, callback=None):
""" Creates a websocket connection to the Plex Server to optionally recieve
notifications. These often include messages from Plex about media scans
as well as updates to currently running Transcode Sessions.
NOTE: You need websocket-client installed in order to use this feature.
>> pip install websocket-client
Parameters:
callback (func): Callback function to call on recieved messages.
raises:
:class:`plexapi.exception.Unsupported`: Websocket-client not installed.
"""
notifier = AlertListener(self, callback)
notifier.start()
return notifier | 0.009485 |
def from_request(cls, header_data, ignore_bad_cookies=False):
"Construct a Cookies object from request header data."
cookies = cls()
cookies.parse_request(
header_data, ignore_bad_cookies=ignore_bad_cookies)
return cookies | 0.007519 |
def vrrp_vip(self, **kwargs):
"""Set VRRP VIP.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc).
name (str): Name of interface. (1/0/5, 1/0/10, etc).
vrid (str): VRRPv3 ID.
vip (str): IPv4/IPv6 Virtual IP Address.
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `vrid`, or `vip` is not passed.
ValueError: if `int_type`, `name`, `vrid`, or `vip` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.anycast_mac(rbridge_id='225',
... mac='aabb.ccdd.eeff', delete=True)
... output = dev.services.vrrp(ip_version='6',
... enabled=True, rbridge_id='225')
... output = dev.services.vrrp(enabled=True,
... rbridge_id='225')
... output = dev.interface.set_ip('tengigabitethernet',
... '225/0/18', '10.1.1.2/24')
... output = dev.interface.ip_address(name='225/0/18',
... int_type='tengigabitethernet',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1', vip='10.1.1.1/24')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='fe80::cafe:beef:1000:1/64')
... dev.interface.vrrp_vip(int_type='tengigabitethernet',
... name='225/0/18', vrid='1',
... vip='2001:4818:f000:1ab:cafe:beef:1000:1/64')
... output = dev.interface.add_vlan_int('89')
... output = dev.interface.ip_address(name='89',
... int_type='ve', ip_addr='172.16.1.1/24',
... rbridge_id='225')
... output = dev.interface.ip_address(name='89',
... int_type='ve', rbridge_id='225',
... ip_addr='2002:4818:f000:1ab:cafe:beef:1000:2/64')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='172.16.1.2/24', rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='fe80::dafe:beef:1000:1/64',
... rbridge_id='225')
... dev.interface.vrrp_vip(int_type='ve', name='89',
... vrid='1', vip='2002:4818:f000:1ab:cafe:beef:1000:1/64',
... rbridge_id='225')
... output = dev.services.vrrp(ip_version='6',
... enabled=False, rbridge_id='225')
... output = dev.services.vrrp(enabled=False,
... rbridge_id='225')
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
vrid = kwargs.pop('vrid')
vip = kwargs.pop('vip')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel', 've']
ipaddress = ip_interface(unicode(vip))
vrrp_vip = None
vrrp_args = dict(name=name,
vrid=vrid,
virtual_ipaddr=str(ipaddress.ip))
method_class = self._interface
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
if ipaddress.version == 4:
vrrp_args['version'] = '3'
method_name = 'interface_%s_vrrp_virtual_ip_virtual_' \
'ipaddr' % int_type
elif ipaddress.version == 6:
method_name = 'interface_%s_ipv6_vrrpv3_group_virtual_ip_' \
'virtual_ipaddr' % int_type
if int_type == 've':
method_name = 'rbridge_id_%s' % method_name
if ipaddress.version == 6:
method_name = method_name.replace('group_', '')
method_class = self._rbridge
vrrp_args['rbridge_id'] = rbridge_id
if not pynos.utilities.valid_vlan_id(name):
raise InvalidVlanId("`name` must be between `1` and `8191`")
elif not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
vrrp_vip = getattr(method_class, method_name)
config = vrrp_vip(**vrrp_args)
return callback(config) | 0.000359 |
def word_counts(self):
"""Dictionary of word frequencies in this text.
"""
counts = defaultdict(int)
for word in self.words:
counts[word] += 1
return counts | 0.005435 |
def _analyse_status_type(line):
'''
Figure out the sections in drbdadm status
'''
spaces = _count_spaces_startswith(line)
if spaces is None:
return ''
switch = {
0: 'RESOURCE',
2: {' disk:': 'LOCALDISK', ' role:': 'PEERNODE', ' connection:': 'PEERNODE'},
4: {' peer-disk:': 'PEERDISK'}
}
ret = switch.get(spaces, 'UNKNOWN')
# isinstance(ret, str) only works when run directly, calling need unicode(six)
if isinstance(ret, six.text_type):
return ret
for x in ret:
if x in line:
return ret[x]
return 'UNKNOWN' | 0.004831 |
def check_vmware_version(self):
"""
Check VMware version
"""
if sys.platform.startswith("win"):
# look for vmrun.exe using the directory listed in the registry
ws_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
if ws_version is None:
player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")
if player_version:
log.debug("VMware Player version {} detected".format(player_version))
yield from self._check_vmware_player_requirements(player_version)
else:
log.warning("Could not find VMware version")
self._host_type = "ws"
else:
log.debug("VMware Workstation version {} detected".format(ws_version))
yield from self._check_vmware_workstation_requirements(ws_version)
else:
if sys.platform.startswith("darwin"):
if not os.path.isdir("/Applications/VMware Fusion.app"):
raise VMwareError("VMware Fusion is not installed in the standard location /Applications/VMware Fusion.app")
self._host_type = "fusion"
return # FIXME: no version checking on Mac OS X but we support all versions of fusion
vmware_path = VMware._get_linux_vmware_binary()
if vmware_path is None:
raise VMwareError("VMware is not installed (vmware or vmplayer executable could not be found in $PATH)")
try:
output = yield from subprocess_check_output(vmware_path, "-v")
match = re.search("VMware Workstation ([0-9]+)\.", output)
version = None
if match:
# VMware Workstation has been detected
version = match.group(1)
log.debug("VMware Workstation version {} detected".format(version))
yield from self._check_vmware_workstation_requirements(version)
match = re.search("VMware Player ([0-9]+)\.", output)
if match:
# VMware Player has been detected
version = match.group(1)
log.debug("VMware Player version {} detected".format(version))
yield from self._check_vmware_player_requirements(version)
if version is None:
log.warning("Could not find VMware version. Output of VMware: {}".format(output))
raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
except (OSError, subprocess.SubprocessError) as e:
log.error("Error while looking for the VMware version: {}".format(e))
raise VMwareError("Error while looking for the VMware version: {}".format(e)) | 0.006698 |
def create_factories(fs_provider, task_problem_types, hook_manager=None, course_class=Course, task_class=Task):
"""
Shorthand for creating Factories
:param fs_provider: A FileSystemProvider leading to the courses
:param hook_manager: an Hook Manager instance. If None, a new Hook Manager is created
:param course_class:
:param task_class:
:return: a tuple with two objects: the first being of type CourseFactory, the second of type TaskFactory
"""
if hook_manager is None:
hook_manager = HookManager()
task_factory = TaskFactory(fs_provider, hook_manager, task_problem_types, task_class)
return CourseFactory(fs_provider, task_factory, hook_manager, course_class), task_factory | 0.008219 |
def add_ds_ids_from_files(self):
"""Check files for more dynamically discovered datasets."""
for file_handlers in self.file_handlers.values():
try:
fh = file_handlers[0]
avail_ids = fh.available_datasets()
except NotImplementedError:
continue
# dynamically discover other available datasets
for ds_id, ds_info in avail_ids:
# don't overwrite an existing dataset
# especially from the yaml config
coordinates = ds_info.get('coordinates')
if isinstance(coordinates, list):
# xarray doesn't like concatenating attributes that are
# lists: https://github.com/pydata/xarray/issues/2060
ds_info['coordinates'] = tuple(ds_info['coordinates'])
self.ids.setdefault(ds_id, ds_info) | 0.002165 |
def getTarget(self):
""" Returns the location of the match click target (center by default, but may be offset) """
return self.getCenter().offset(self._target.x, self._target.y) | 0.015544 |
def value_from_person(self, array, role, default = 0):
"""
Get the value of ``array`` for the person with the unique role ``role``.
``array`` must have the dimension of the number of persons in the simulation
If such a person does not exist, return ``default`` instead
The result is a vector which dimension is the number of entities
"""
self.entity.check_role_validity(role)
if role.max != 1:
raise Exception(
'You can only use value_from_person with a role that is unique in {}. Role {} is not unique.'
.format(self.key, role.key)
)
self.members.check_array_compatible_with_entity(array)
members_map = self.ordered_members_map
result = self.filled_array(default, dtype = array.dtype)
if isinstance(array, EnumArray):
result = EnumArray(result, array.possible_values)
role_filter = self.members.has_role(role)
entity_filter = self.any(role_filter)
result[entity_filter] = array[members_map][role_filter[members_map]]
return result | 0.007813 |
def _apply(self, ctx: ExtensionContext) -> AugmentedDict:
"""
Replaces any {{var::*}} directives with it's actual variable value or a default.
Args:
ctx: The processing context.
Returns:
Returns the altered node key and value.
"""
node_key, node_value = ctx.node
def process(pattern: Pattern[str], _str: str) -> Any:
_match = pattern.match(_str)
if _match is None:
return _str
# We got a match
# Group 0: Whole match; Group 1: Our placeholder; Group 2: The environment variable
placeholder, varname = _match.group(1), _match.group(2)
varval = self.vars.get(varname, None)
if varval is None and self.fail_on_unset:
raise ExtensionError("Variable '{}' is unset.".format(varname))
return _str.replace(placeholder, varval or self.default)
_pattern = re.compile(self.__pattern__)
node_key = process(_pattern, node_key)
node_value = process(_pattern, node_value)
return {node_key: node_value} | 0.003534 |
def modify_symbol(sym: ast.Symbol, scope: ast.InstanceClass) -> None:
"""
Apply a modification to a symbol if the scope matches (or is None)
:param sym: symbol to apply modifications for
:param scope: scope of modification
"""
# We assume that we do not screw up the order of applying modifications
# when "moving up" with the scope.
apply_args = [x for x in sym.class_modification.arguments
if x.scope is None or x.scope.full_reference().to_tuple() == scope.full_reference().to_tuple()]
skip_args = [x for x in sym.class_modification.arguments
if x.scope is not None and x.scope.full_reference().to_tuple() != scope.full_reference().to_tuple()]
for class_mod_argument in apply_args:
argument = class_mod_argument.value
assert isinstance(argument, ast.ElementModification), \
"Found redeclaration modification which should already have been handled."
# TODO: Strip all non-symbol stuff.
if argument.component.name not in ast.Symbol.ATTRIBUTES:
raise Exception("Trying to set unknown symbol property {}".format(argument.component.name))
setattr(sym, argument.component.name, argument.modifications[0])
sym.class_modification.arguments = skip_args | 0.003864 |
def clear(self, actors=()):
"""Delete specified list of actors, by default delete all."""
if not utils.isSequence(actors):
actors = [actors]
if len(actors):
for a in actors:
self.removeActor(a)
else:
for a in settings.collectable_actors:
self.removeActor(a)
settings.collectable_actors = []
self.actors = []
for a in self.getActors():
self.renderer.RemoveActor(a)
for a in self.getVolumes():
self.renderer.RemoveVolume(a)
for s in self.sliders:
s.EnabledOff()
for b in self.buttons:
self.renderer.RemoveActor(b)
for w in self.widgets:
w.EnabledOff()
for c in self.scalarbars:
self.renderer.RemoveActor(c) | 0.002232 |
def outlineColor(self, value):
"""gets/sets the outlineColor"""
if isinstance(value, Color) and \
not self._outline is None:
self._outline['color'] = value | 0.015464 |
def get_filename(page, content_type, data):
"""
Generate a stable filename using the original filename of the type.
"""
avoid_collision = uuid.uuid4().hex[:8]
name_parts = data.name.split('.')
if len(name_parts) > 1:
name = slugify('.'.join(name_parts[:-1]), allow_unicode=True)
ext = slugify(name_parts[-1])
name = name + '.' + ext
else:
name = slugify(data.name)
filename = os.path.join(
settings.PAGE_UPLOAD_ROOT,
'page_' + str(page.id),
content_type + '-' + avoid_collision + '-' + name
)
return filename | 0.001647 |
def new_transaction(self, timeout, durability, transaction_type):
"""
Creates a Transaction object with given timeout, durability and transaction type.
:param timeout: (long), the timeout in seconds determines the maximum lifespan of a transaction.
:param durability: (int), the durability is the number of machines that can take over if a member fails during a
transaction commit or rollback
:param transaction_type: (Transaction Type), the transaction type which can be :const:`~hazelcast.transaction.TWO_PHASE` or :const:`~hazelcast.transaction.ONE_PHASE`
:return: (:class:`~hazelcast.transaction.Transaction`), new created Transaction.
"""
connection = self._connect()
return Transaction(self._client, connection, timeout, durability, transaction_type) | 0.009512 |
def _is_orphan(scc, graph):
"""
Return False iff the given scc is reachable from elsewhere.
"""
return all(p in scc for v in scc for p in graph.parents(v)) | 0.005814 |
def shuffle_egg(egg):
""" Shuffle an Egg's recalls"""
from .egg import Egg
pres, rec, features, dist_funcs = parse_egg(egg)
if pres.ndim==1:
pres = pres.reshape(1, pres.shape[0])
rec = rec.reshape(1, rec.shape[0])
features = features.reshape(1, features.shape[0])
for ilist in range(rec.shape[0]):
idx = np.random.permutation(rec.shape[1])
rec[ilist,:] = rec[ilist,idx]
return Egg(pres=pres, rec=rec, features=features, dist_funcs=dist_funcs) | 0.007859 |
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
caller_opts = self.opts.copy()
caller_opts['file_client'] = 'local'
self.caller = salt.client.Caller(mopts=caller_opts)
self.client = salt.client.get_local_client(self.opts['conf_file'])
cache = salt.cache.Cache(self.opts)
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = os.path.split('-'.join(comps[:-2]))
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = salt.utils.yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
formula_tar.close()
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
if optional:
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
if recommended:
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
dl_list = {}
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_info['packages']:
dl_package = False
repo_ver = repo_info['packages'][package]['info']['version']
repo_rel = repo_info['packages'][package]['info']['release']
repo_url = repo_info['info']['url']
if package in dl_list:
# Check package version, replace if newer version
if repo_ver == dl_list[package]['version']:
# Version is the same, check release
if repo_rel > dl_list[package]['release']:
dl_package = True
elif repo_rel == dl_list[package]['release']:
# Version and release are the same, give
# preference to local (file://) repos
if dl_list[package]['source'].startswith('file://'):
if not repo_url.startswith('file://'):
dl_package = True
elif repo_ver > dl_list[package]['version']:
dl_package = True
else:
dl_package = True
if dl_package is True:
# Put together download directory
cache_path = os.path.join(
self.opts['spm_cache_dir'],
repo
)
# Put together download paths
dl_url = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = os.path.join(
cache_path,
repo_info['packages'][package]['filename']
)
dl_list[package] = {
'version': repo_ver,
'release': repo_rel,
'source': dl_url,
'dest_dir': cache_path,
'dest_file': out_file,
}
for package in dl_list:
dl_url = dl_list[package]['source']
cache_path = dl_list[package]['dest_dir']
out_file = dl_list[package]['dest_file']
# Make sure download directory exists
if not os.path.exists(cache_path):
os.makedirs(cache_path)
# Download the package
if dl_url.startswith('file://'):
dl_url = dl_url.replace('file://', '')
shutil.copyfile(dl_url, out_file)
else:
with salt.utils.files.fopen(out_file, 'w') as outf:
outf.write(self._query_http(dl_url, repo_info['info']))
# First we download everything, then we install
for package in dl_list:
out_file = dl_list[package]['dest_file']
# Kick off the install
self._install_indv_pkg(package, out_file)
return | 0.001458 |
def set_attributes(self, doc, fields, parent_type=None):
"""
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Struct)
self.subtypes = []
# These are only set if this struct enumerates subtypes.
self._enumerated_subtypes = None # Optional[List[Tuple[str, DataType]]]
self._is_catch_all = None # Optional[Bool]
super(Struct, self).set_attributes(doc, fields, parent_type)
if self.parent_type:
self.parent_type.subtypes.append(self) | 0.00495 |
def has_field(self, dotted_name):
"""
Checks whether the layer has the given field name.
Can get a dotted name, i.e. layer.sublayer.subsublayer.field
"""
parts = dotted_name.split('.')
cur_layer = self
for part in parts:
if part in cur_layer.field_names:
cur_layer = cur_layer.get_field(part)
else:
return False
return True | 0.004515 |
def prepend_status(func):
"""Prepends the output of `func` with the status."""
@ft.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper stub."""
res = func(self, *args, **kwargs)
if self.status is not StepResult.UNSET:
res = "[{status}]".format(status=self.status.name) + res
return res
return wrapper | 0.00271 |
def render_code(self):
""" Try to load the previous code (if we had a crash or something)
I should allow saving.
"""
tmp_dir = os.environ.get('TMP','')
view_code = os.path.join(tmp_dir,'view.enaml')
if os.path.exists(view_code):
try:
with open(view_code) as f:
return f.read()
except:
pass
return DEFAULT_CODE | 0.011236 |
def _connection(self):
'''
Return a connection from the pool
'''
try:
return self._pool.get(False)
except Empty:
args = [
self._host, self._port, self._keyspace
]
kwargs = {
'user' : None,
'password' : None,
'cql_version' : self._cql_version,
'compression' : self._compression,
'consistency_level' : self._consistency_level,
'transport' : self._transport,
}
if self._credentials:
kwargs['user'] = self._credentials['user']
kwargs['password'] = self._credentials['password']
return cql.connect(*args, **kwargs) | 0.017518 |
def get_or_none(cls, **filter_kwargs):
"""
Returns a video or None.
"""
try:
video = cls.objects.get(**filter_kwargs)
except cls.DoesNotExist:
video = None
return video | 0.008299 |
def upload(self):
"""
Upload all po files to GDocs ignoring conflicts.
This method looks for all msgids in po_files and sends them
as ods to GDocs Spreadsheet.
"""
local_ods_path = os.path.join(self.temp_path, LOCAL_ODS)
try:
po_to_ods(self.languages, self.locale_root,
self.po_files_path, local_ods_path)
except (IOError, OSError) as e:
raise PODocsError(e)
self._upload_file_to_gdoc(local_ods_path)
self._clear_temp() | 0.003656 |
def start(self):
""" Starts the agent. Among other things, this means connecting
to the master agent, if configured that way. """
if self._status != netsnmpAgentStatus.CONNECTED \
and self._status != netsnmpAgentStatus.RECONNECTING:
self._status = netsnmpAgentStatus.FIRSTCONNECT
libnsa.init_snmp(b(self.AgentName))
if self._status == netsnmpAgentStatus.CONNECTFAILED:
msg = "Error connecting to snmpd instance at \"{0}\" -- " \
"incorrect \"MasterSocket\" or snmpd not running?"
msg = msg.format(self.MasterSocket)
raise netsnmpAgentException(msg) | 0.028716 |
def __execute_str(self, instr):
"""Execute STR instruction.
"""
op0_val = self.read_operand(instr.operands[0])
self.write_operand(instr.operands[2], op0_val)
return None | 0.009479 |
def status(cwd, opts=None, user=None):
'''
Show changed files of the given repository
cwd
The path to the Mercurial repository
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' hg.status /path/to/repo
'''
def _status(cwd):
cmd = ['hg', 'status']
if opts:
for opt in opts.split():
cmd.append('{0}'.format(opt))
out = __salt__['cmd.run_stdout'](
cmd, cwd=cwd, runas=user, python_shell=False)
types = {
'M': 'modified',
'A': 'added',
'R': 'removed',
'C': 'clean',
'!': 'missing',
'?': 'not tracked',
'I': 'ignored',
' ': 'origin of the previous file',
}
ret = {}
for line in out.splitlines():
t, f = types[line[0]], line[2:]
if t not in ret:
ret[t] = []
ret[t].append(f)
return ret
if salt.utils.data.is_iter(cwd):
return dict((cwd, _status(cwd)) for cwd in cwd)
else:
return _status(cwd) | 0.000797 |
def remove_mea(mea_name):
'''Adds the mea design defined by the yaml file in the install folder
Parameters
----------
mea_yaml_file
Returns
-------
'''
this_dir, this_filename = os.path.split(__file__)
electrodes = [f for f in os.listdir(os.path.join(this_dir, "electrodes"))]
for e in electrodes:
if mea_name in e:
if os.path.isfile(os.path.join(this_dir, "electrodes", mea_name + '.yaml')):
os.remove(os.path.join(this_dir, "electrodes", mea_name + '.yaml'))
print("Removed: ", os.path.join(this_dir, "electrodes", mea_name + '.yaml'))
elif os.path.isfile(os.path.join(this_dir, "electrodes", mea_name + '.yml')):
os.remove(os.path.join(this_dir, "electrodes", mea_name + '.yml'))
print("Removed: ", os.path.join(this_dir, "electrodes", mea_name + '.yml'))
electrodes = [f[:-5] for f in os.listdir(os.path.join(this_dir, "electrodes"))]
print('Available MEA: \n', electrodes)
return | 0.007729 |
def get_catalog(self, query):
"""Fetch a parsed THREDDS catalog from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance.
Parameters
----------
query : RadarQuery
The parameters to send to the radar server
Returns
-------
catalog : TDSCatalog
The catalog of matching data files
Raises
------
:class:`~siphon.http_util.BadQueryError`
When the query cannot be handled by the server
See Also
--------
get_catalog_raw
"""
# TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging
try:
url = self._base[:-1] if self._base[-1] == '/' else self._base
url += '?' + str(query)
return TDSCatalog(url)
except ET.ParseError:
raise BadQueryError(self.get_catalog_raw(query)) | 0.004771 |
def list(self):
'''
Create a dictionary with the details for the updates in the collection.
Returns:
dict: Details about each update
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.list()
'''
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx
if self.count() == 0:
return 'Nothing to return'
log.debug('Building a detailed report of the results.')
# Build a dictionary containing details for each update
results = {}
for update in self.updates:
results[update.Identity.UpdateID] = {
'guid': update.Identity.UpdateID,
'Title': six.text_type(update.Title),
'Type': self.update_types[update.Type],
'Description': update.Description,
'Downloaded': bool(update.IsDownloaded),
'Installed': bool(update.IsInstalled),
'Mandatory': bool(update.IsMandatory),
'EULAAccepted': bool(update.EulaAccepted),
'NeedsReboot': bool(update.RebootRequired),
'Severity': six.text_type(update.MsrcSeverity),
'UserInput':
bool(update.InstallationBehavior.CanRequestUserInput),
'RebootBehavior':
self.reboot_behavior[
update.InstallationBehavior.RebootBehavior],
'KBs': ['KB' + item for item in update.KBArticleIDs],
'Categories': [item.Name for item in update.Categories]
}
return results | 0.001082 |
def pop_option (ident, argv=None):
"""A lame routine for grabbing command-line arguments. Returns a boolean
indicating whether the option was present. If it was, it's removed from
the argument string. Because of the lame behavior, options can't be
combined, and non-boolean options aren't supported. Operates on sys.argv
by default.
Note that this will proceed merrily if argv[0] matches your option.
"""
if argv is None:
from sys import argv
if len (ident) == 1:
ident = '-' + ident
else:
ident = '--' + ident
found = ident in argv
if found:
argv.remove (ident)
return found | 0.006024 |
def int_to_var_bytes(x):
"""Converts an integer to a bitcoin variable length integer as a bytearray
:param x: the integer to convert
"""
if x < 253:
return intbytes.to_bytes(x, 1)
elif x < 65536:
return bytearray([253]) + intbytes.to_bytes(x, 2)[::-1]
elif x < 4294967296:
return bytearray([254]) + intbytes.to_bytes(x, 4)[::-1]
else:
return bytearray([255]) + intbytes.to_bytes(x, 8)[::-1] | 0.002217 |
def firstChild(self):
'''
firstChild - property, Get the first child block, text or tag.
@return <str/AdvancedTag/None> - The first child block, or None if no child blocks
'''
blocks = object.__getattribute__(self, 'blocks')
# First block is empty string for indent, but don't hardcode incase that changes
if blocks[0] == '':
firstIdx = 1
else:
firstIdx = 0
if len(blocks) == firstIdx:
# No first child
return None
return blocks[1] | 0.01049 |
def handle_response(self, response, **kwargs):
"""Takes the given response and tries kerberos-auth, as needed."""
num_401s = kwargs.pop('num_401s', 0)
# Check if we have already tried to get the CBT data value
if not self.cbt_binding_tried and self.send_cbt:
# If we haven't tried, try getting it now
cbt_application_data = _get_channel_bindings_application_data(response)
if cbt_application_data:
# Only the latest version of pykerberos has this method available
try:
self.cbt_struct = kerberos.channelBindings(application_data=cbt_application_data)
except AttributeError:
# Using older version set to None
self.cbt_struct = None
# Regardless of the result, set tried to True so we don't waste time next time
self.cbt_binding_tried = True
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
response.request.body.seek(self.pos)
if response.status_code == 401 and num_401s < 2:
# 401 Unauthorized. Handle it, and if it still comes back as 401,
# that means authentication failed.
_r = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning %s", _r)
log.debug("handle_response() has seen %d 401 responses", num_401s)
num_401s += 1
return self.handle_response(_r, num_401s=num_401s, **kwargs)
elif response.status_code == 401 and num_401s >= 2:
# Still receiving 401 responses after attempting to handle them.
# Authentication has failed. Return the 401 response.
log.debug("handle_response(): returning 401 %s", response)
return response
else:
_r = self.handle_other(response)
log.debug("handle_response(): returning %s", _r)
return _r | 0.002917 |
def security_rule_delete(security_rule, security_group, resource_group,
**kwargs):
'''
.. versionadded:: 2019.2.0
Delete a security rule within a specified security group.
:param name: The name of the security rule to delete.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_delete testrule1 testnsg testgroup
'''
result = False
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
secrule = netconn.security_rules.delete(
network_security_group_name=security_group,
resource_group_name=resource_group,
security_rule_name=security_rule
)
secrule.wait()
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
return result | 0.001862 |
def image_files_list(self, limit=-1, offset=-1):
"""Retrieve list of all images in the data store.
Parameters
----------
limit : int
Limit number of results in returned object listing
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
Listing of image handles
"""
return self.images.list_objects(limit=limit, offset=offset) | 0.004107 |
def run_top_task(self, task_name=None, sort=None, **kwargs):
"""Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'status': 'pending'})
# find task and set status to running
task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort)
try:
# get task info e.g. hyper parameters, python script
if task is None:
logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort))
return False
else:
logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort))
_datetime = task['time']
_script = task['script']
_id = task['_id']
_hyper_parameters = task['hyper_parameters']
_saved_result_keys = task['saved_result_keys']
logging.info(" hyper parameters:")
for key in _hyper_parameters:
globals()[key] = _hyper_parameters[key]
logging.info(" {}: {}".format(key, _hyper_parameters[key]))
# run task
s = time.time()
logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime))
_script = _script.decode('utf-8')
with tf.Graph().as_default(): # as graph: # clear all TF graphs
exec(_script, globals())
# set status to finished
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}})
# return results
__result = {}
for _key in _saved_result_keys:
logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key])))
__result.update({"%s" % _key: globals()[_key]})
_ = self.db.Task.find_one_and_update(
{
'_id': _id
}, {'$set': {
'result': __result
}}, return_document=pymongo.ReturnDocument.AFTER
)
logging.info(
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".
format(task_name, sort, _datetime,
time.time() - s)
)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
logging.info("[Database] Fail to run task")
# if fail, set status back to pending
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}})
return False | 0.003164 |
def fix(self):
"""
Fix the parameters with the coordinates (either ra,dec or l,b depending on how the class
has been instanced)
"""
if self._coord_type == 'equatorial':
self.ra.fix = True
self.dec.fix = True
else:
self.l.fix = True
self.b.fix = True | 0.011173 |
def pretty(d, indent=0):
"""A prettier way to print nested dicts
"""
for key, value in d.items():
print(' ' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
sys.stderr.write(' ' * (indent+1) + str(value) + '\n') | 0.0033 |
def input_list_parser(infile_list):
"""Always return a list of files with varying input.
>>> input_list_parser(['/path/to/folder/'])
['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']
>>> input_list_parser(['/path/to/file.txt'])
['/path/to/file.txt']
>>> input_list_parser(['file1.txt'])
['file1.txt']
Args:
infile_list: List of arguments
Returns:
list: Standardized list of files
"""
final_list_of_files = []
for x in infile_list:
# If the input is a folder
if op.isdir(x):
os.chdir(x)
final_list_of_files.extend(glob.glob('*'))
# If the input is a file
if op.isfile(x):
final_list_of_files.append(x)
return final_list_of_files | 0.002463 |
def _validate_targets(self, targets):
"""turn any valid targets argument into a list of integer ids"""
if targets is None:
# default to all
return self.ids
if isinstance(targets, (int,str,unicode)):
# only one target specified
targets = [targets]
_targets = []
for t in targets:
# map raw identities to ids
if isinstance(t, (str,unicode)):
t = self.by_ident.get(cast_bytes(t), t)
_targets.append(t)
targets = _targets
bad_targets = [ t for t in targets if t not in self.ids ]
if bad_targets:
raise IndexError("No Such Engine: %r" % bad_targets)
if not targets:
raise IndexError("No Engines Registered")
return targets | 0.008516 |
def _getMultiClassMap(self):
""" Relief algorithms handle the scoring updates a little differently for data with multiclass outcomes. In ReBATE we implement multiclass scoring in line with
the strategy described by Kononenko 1994 within the RELIEF-F variant which was suggested to outperform the RELIEF-E multiclass variant. This strategy weights
score updates derived from misses of different classes by the class frequency observed in the training data. 'The idea is that the algorithm should estimate the
ability of attributes to separate each pair of classes regardless of which two classes are closest to each other'. In this method we prepare for this normalization
by creating a class dictionary, and storing respective class frequencies. This is needed for ReliefF multiclass score update normalizations. """
mcmap = dict()
for i in range(self._datalen):
if(self._y[i] not in mcmap):
mcmap[self._y[i]] = 0
else:
mcmap[self._y[i]] += 1
for each in self._label_list:
mcmap[each] = mcmap[each]/float(self._datalen)
return mcmap | 0.005952 |
def geo_lines(self, edges):
"""
Utility function to convert a list of edges into a list of
:class:`openquake.hazardlib.geo.Line` instances.
:param edge: a node describing an edge
"""
lines = []
for edge in edges:
with context(self.fname, edge):
coords = split_coords_3d(~edge.LineString.posList)
lines.append(geo.Line([geo.Point(*p) for p in coords]))
return lines | 0.004264 |
def reconstructed_data_vector_from_blurred_mapping_matrix_and_solution_vector(blurred_mapping_matrix, solution_vector):
""" Compute the reconstructed hyper vector from the blurrred mapping matrix *f* and solution vector *S*.
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
"""
reconstructed_data_vector = np.zeros(blurred_mapping_matrix.shape[0])
for i in range(blurred_mapping_matrix.shape[0]):
for j in range(solution_vector.shape[0]):
reconstructed_data_vector[i] += solution_vector[j] * blurred_mapping_matrix[i, j]
return reconstructed_data_vector | 0.006974 |
def next_state(self):
"""This is a method that will be called when the time remaining ends.
The current state can be: roasting, cooling, idle, sleeping, connecting,
or unkown."""
if(self.roaster.get_roaster_state() == 'roasting'):
self.roaster.time_remaining = 20
self.roaster.cool()
elif(self.roaster.get_roaster_state() == 'cooling'):
self.roaster.idle() | 0.006944 |
def get_relative_path(self, domain, locale):
"""
Gets the relative file path using the template.
@type domain: str
@param domain: The domain
@type locale: str
@param locale: The locale
@rtype: string
@return: The relative file path
"""
return self.relative_path_template.format(
domain=domain,
locale=locale,
extension=self.get_extension()
) | 0.004274 |
def buildcontent(self):
"""build HTML content only, no header or body tags"""
self.buildcontainer()
self.option = json.dumps(self.options, cls = HighchartsEncoder)
self.setoption = json.dumps(self.setOptions, cls = HighchartsEncoder)
self.data = json.dumps(self.data_temp, cls = HighchartsEncoder)
# DEM 2017/04/25: Make 'data' available as an array
# ... this permits jinja2 array access to each data definition
# ... which is useful for looping over multiple data sources
self.data_list = [json.dumps(x, cls = HighchartsEncoder) for x in self.data_temp]
if self.drilldown_flag:
self.drilldown_data = json.dumps(self.drilldown_data_temp, cls = HighchartsEncoder)
self._htmlcontent = self.template_content_highcharts.render(chart=self).encode('utf-8') | 0.018713 |
def readLocationElement(self, locationElement):
""" Format 0 location reader """
loc = Location()
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
xValue = yValue = None
try:
xValue = dimensionElement.attrib.get('xvalue')
xValue = float(xValue)
except ValueError:
if self.logger:
self.logger.info("KeyError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get('yvalue')
if yValue is not None:
yValue = float(yValue)
except ValueError:
pass
if yValue is not None:
loc[dimName] = (xValue, yValue)
else:
loc[dimName] = xValue
return loc | 0.003272 |
def search(self, search_term, num_results, **kwargs):
"""Gets x number of Google image result urls for
a given search term.
Arguments
search_term: str
tearm to search for
num_results: int
number of url results to return
return ['url','url']
"""
results = []
count = 1
try:
while len(results) <= num_results:
search_results = self.service.cse().list(
q=search_term,
cx=self.cse_id,
searchType="image",
fileType=self.file_type,
start=count,
**kwargs).execute()
results.extend(
[r['link'] for r in search_results['items']])
count += len(search_results)
results = results[:num_results]
except KeyError as e:
logger.warning('Exception: %s', e)
if len(results) == 0:
raise ZeroResultsException("No images found")
return(results) | 0.001842 |
def syscall_direct(*events):
'''
Directly process these events. This should never be used for normal events.
'''
def _syscall(scheduler, processor):
for e in events:
processor(e)
return _syscall | 0.004274 |
def create_video(video_data):
"""
Called on to create Video objects in the database
create_video is used to create Video objects whose children are EncodedVideo
objects which are linked to Profile objects. This is an alternative to the HTTP
requests so it can be used internally. The VideoSerializer is used to
deserialize this object. If there are duplicate profile_names, the entire
creation will be rejected. If the profile is not found in the database, the
video will not be created.
Args:
video_data (dict):
{
url: api url to the video
edx_video_id: ID of the video
duration: Length of video in seconds
client_video_id: client ID of video
encoded_video: a list of EncodedVideo dicts
url: url of the video
file_size: size of the video in bytes
profile: ID of the profile
courses: Courses associated with this video
image: poster image file name for a particular course
}
Raises:
Raises ValCannotCreateError if the video cannot be created.
Returns the successfully created Video object
"""
serializer = VideoSerializer(data=video_data)
if serializer.is_valid():
serializer.save()
return video_data.get("edx_video_id")
else:
raise ValCannotCreateError(serializer.errors) | 0.002046 |
def image(self, well_row, well_column, field_row, field_column):
"""Get path of specified image.
Parameters
----------
well_row : int
Starts at 0. Same as --U in files.
well_column : int
Starts at 0. Same as --V in files.
field_row : int
Starts at 0. Same as --Y in files.
field_column : int
Starts at 0. Same as --X in files.
Returns
-------
string
Path to image or empty string if image is not found.
"""
return next((i for i in self.images
if attribute(i, 'u') == well_column and
attribute(i, 'v') == well_row and
attribute(i, 'x') == field_column and
attribute(i, 'y') == field_row), '') | 0.005938 |
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`PathPayment` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check(
'account',
op_xdr_object.body.pathPaymentOp.destination.ed25519).decode()
send_asset = Asset.from_xdr_object(
op_xdr_object.body.pathPaymentOp.sendAsset)
dest_asset = Asset.from_xdr_object(
op_xdr_object.body.pathPaymentOp.destAsset)
send_max = Operation.from_xdr_amount(
op_xdr_object.body.pathPaymentOp.sendMax)
dest_amount = Operation.from_xdr_amount(
op_xdr_object.body.pathPaymentOp.destAmount)
path = []
if op_xdr_object.body.pathPaymentOp.path:
for x in op_xdr_object.body.pathPaymentOp.path:
path.append(Asset.from_xdr_object(x))
return cls(
source=source,
destination=destination,
send_asset=send_asset,
send_max=send_max,
dest_asset=dest_asset,
dest_amount=dest_amount,
path=path) | 0.001533 |
def get_interfaces_status():
"""Get the status of all the interfaces"""
status = {}
for driverClass in CLASSES:
try:
instance = driverClass()
status[instance.get_name()] = instance.get_status()
except Exception:
raise
return status | 0.003344 |
def fork_exec(cmd_list, input_data=None):
"""
Like the subprocess.check_*() helper functions, but tailored to bang.
``cmd_list`` is the command to run, and its arguments as a list of strings.
``input_data`` is the optional data to pass to the command's stdin.
On success, returns the output (i.e. stdout) of the remote command.
On failure, raises BangError with the command's stderr.
"""
# log.debug('fork_exec: cmd_list = %s, input_data = ^%s^' %
# (cmd_list, input_data))
p = subprocess.Popen(
cmd_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out = p.communicate(input_data)
if p.returncode != 0:
raise bang.BangError('ret: %d, stdout: ^%s^, stderr: ^%s^' %
(p.returncode, out[0], out[1]))
return out[0] | 0.002255 |
def get_child_by_name(self, inst_name):
"""
Returns an immediate child :class:`~Node` whose instance name matches ``inst_name``
Returns ``None`` if ``inst_name`` does not match
Parameters
----------
inst_name: str
Name of immediate child to get
Returns
-------
:class:`~Node` or None
Child Node. None if not found.
"""
child_inst = self.inst.get_child_by_name(inst_name)
if child_inst is None:
return None
return Node._factory(child_inst, self.env, self) | 0.005025 |
def create_shipfile(target, source, env):
"""Create a .ship file with all dependencies."""
source_dir = os.path.dirname(str(source[0]))
recipe_name = os.path.basename(str(source[0]))[:-5]
resman = RecipeManager()
resman.add_recipe_actions(env['CUSTOM_STEPS'])
resman.add_recipe_folder(source_dir, whitelist=[os.path.basename(str(source[0]))])
recipe = resman.get_recipe(recipe_name)
recipe.archive(str(target[0])) | 0.004454 |
def value(self):
""" returns the class as a dictionary """
val = {}
for k in self.__allowed_keys:
v = getattr(self, "_" + k)
if v is not None:
val[k] = v
return val | 0.008475 |
def line_props(event):
"""
Get information for a pick event on a Line2D artist (as created with
``plot``.)
This will yield x and y values that are interpolated between vertices
(instead of just being the position of the mouse) or snapped to the nearest
vertex if only the vertices are drawn.
Parameters
-----------
event : PickEvent
The pick event to process
Returns
--------
props : dict
A dict with keys: x & y
"""
xclick, yclick = event.mouseevent.xdata, event.mouseevent.ydata
i = event.ind[0]
xorig, yorig = event.artist.get_xydata().T
# For points-only lines, snap to the nearest point.
linestyle = event.artist.get_linestyle()
if linestyle in ['none', ' ', '', None, 'None']:
return dict(x=xorig[i], y=yorig[i])
# ax.step is actually implemented as a Line2D with a different drawstyle...
xs_data = xorig[max(i - 1, 0) : i + 2]
ys_data = yorig[max(i - 1, 0) : i + 2]
drawstyle = event.artist.drawStyles[event.artist.get_drawstyle()]
if drawstyle == "_draw_lines":
pass
elif drawstyle == "_draw_steps_pre":
xs_data = _interleave(xs_data, xs_data[:-1])
ys_data = _interleave(ys_data, ys_data[1:])
elif drawstyle == "_draw_steps_post":
xs_data = _interleave(xs_data, xs_data[1:])
ys_data = _interleave(ys_data, ys_data[:-1])
elif drawstyle == "_draw_steps_mid":
mid_xs = (xs_data[:-1] + xs_data[1:]) / 2
xs_data = _interleave(xs_data, np.column_stack([mid_xs, mid_xs]))
ys_data = _interleave(
ys_data, np.column_stack([ys_data[:-1], ys_data[1:]]))
else:
raise ValueError(
"Unknown drawstyle: {}".format(event.artist.get_drawstyle()))
# The artist transform may be different from the axes transform (e.g.,
# axvline/axhline)
artist_transform = event.artist.get_transform()
axes_transform = event.artist.axes.transData
xs_screen, ys_screen = (
artist_transform.transform(
np.column_stack([xs_data, ys_data]))).T
xclick_screen, yclick_screen = (
axes_transform.transform([xclick, yclick]))
x_screen, y_screen = _interpolate_line(xs_screen, ys_screen,
xclick_screen, yclick_screen)
x, y = axes_transform.inverted().transform([x_screen, y_screen])
return dict(x=x, y=y) | 0.001244 |
def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) | 0.001242 |
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""Reimplemented from QtCore.QAbstractItemModel
You can only set the value.
:param index: the index to edit, column should be 1.
:type index: :class:`PySide.QtCore.QModelIndex`
:param value: the new value for the configobj
:type value: object
:param role: Optional - the ItemDataRole. Default is QtCore.Qt.EditRole
:type role: QtCore.Qt.ItemDataRole
:returns: True if index was edited, False if index could not be edited.
:rtype: bool
:raises: None
"""
if index.isValid():
if role == QtCore.Qt.EditRole:
if index.column() == 1:
p = index.internalPointer()
k = self.get_key(p, index.row())
# we could just set the value
# BUT for listvalues etc it will not work
strval = self._val_to_str(value)
# _handle_value will parse it correctly
# comments gets lost
(parsedval, comment) = self._conf._handle_value(strval)
p[k] = parsedval
self.dataChanged.emit(index, index)
return True
return False | 0.001528 |
def QA_fetch_get_hkfund_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==49') | 0.001522 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.