code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def load_adjusted_array(self, domain, columns, dates, sids, mask):
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
data[~mask] = column.missing_value
return {
column: AdjustedArray(
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
} | Load data from our stored baseline. |
def template_str(tem, queue=False, **kwargs):
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.State(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts))
ret = st_.call_template_str(tem)
_set_retcode(ret)
return ret | Execute the information stored in a string from an sls template
CLI Example:
.. code-block:: bash
salt '*' state.template_str '<Template String>' |
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o) | Pack an object and return the packed bytes. |
def update_global_variables_list_store(self):
self.list_store_iterators = {}
self.list_store.clear()
keys = self.model.global_variable_manager.get_all_keys()
keys.sort()
for key in keys:
iter = self.list_store.append([key,
self.model.global_variable_manager.get_data_type(key).__name__,
str(self.model.global_variable_manager.get_representation(key)),
str(self.model.global_variable_manager.is_locked(key)),
])
self.list_store_iterators[key] = iter | Updates the global variable list store
Triggered after creation or deletion of a variable has taken place. |
def main():
parser = __build_option_parser()
args = parser.parse_args()
analyze_ws = AnalyzeWS(args)
try:
analyze_ws.set_file(args.file_[0])
except IOError:
print 'IOError raised while reading file. Exiting!'
sys.exit(3)
if args.to_file or args.to_browser:
analyze_ws.to_file_mode()
if args.to_browser:
analyze_ws.to_browser_mode()
else:
analyze_ws.interactive_mode() | Main method of the script |
def Process(self, parser_mediator, **kwargs):
if kwargs:
raise ValueError('Unused keyword arguments: {0:s}.'.format(
', '.join(kwargs.keys()))) | Evaluates if this is the correct plugin and processes data accordingly.
The purpose of the process function is to evaluate if this particular
plugin is the correct one for the particular data structure at hand.
This function accepts one value to use for evaluation, that could be
a registry key, list of table names for a database or any other criteria
that can be used to evaluate if the plugin should be run or not.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
kwargs (dict[str, object]): Depending on the plugin they may require
different sets of arguments to be able to evaluate whether or not
this is the correct plugin.
Raises:
ValueError: when there are unused keyword arguments. |
def add(ctx, alias, mapping, backend):
if not backend:
backends_list = ctx.obj['settings'].get_backends()
if len(backends_list) > 1:
raise click.UsageError(
"You're using more than 1 backend. Please set the backend to "
"add the alias to with the --backend option (choices are %s)" %
", ".join(dict(backends_list).keys())
)
add_mapping(ctx, alias, mapping, backend) | Add a new alias to your configuration file. |
def recompile(self, nick=None, new_nick=None, **kw):
if self.bot.nick == nick.nick:
self.bot.config['nick'] = new_nick
self.bot.recompile() | recompile regexp on new nick |
def _get_account_number(self, token, uuid):
data = {"accessToken": token,
"uuid": uuid}
try:
raw_res = yield from self._session.post(ACCOUNT_URL,
data=data,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get account number")
try:
json_content = yield from raw_res.json()
account_number = json_content\
.get('getCustomerAccounts', {})\
.get('accounts', [{}])[0]\
.get('accountNumber')
except (OSError, ValueError):
raise PyFidoError("Bad json getting account number")
if account_number is None:
raise PyFidoError("Can not get account number")
return account_number | Get fido account number. |
def dump_as_json(record, output_file):
def default_func(value):
if isinstance(value, frozenset):
return sorted(value)
raise TypeError(repr(value) + " is not JSON serializable")
converted_record = record._replace(
ordered_statistics=[x._asdict() for x in record.ordered_statistics])
json.dump(
converted_record._asdict(), output_file,
default=default_func, ensure_ascii=False)
output_file.write(os.linesep) | Dump an relation record as a json value.
Arguments:
record -- A RelationRecord instance to dump.
output_file -- A file to output. |
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) - events consumed: {2:d} - running: '
'{3!s}\n').format(
worker_status.identifier, worker_status.pid,
worker_status.number_of_consumed_events,
worker_status.status not in definitions.ERROR_STATUS_INDICATORS)
self._output_writer.Write(status_line) | Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status. |
def _get_id(self, file_path):
title = '%s._get_id' % self.__class__.__name__
list_kwargs = {
'spaces': self.drive_space,
'fields': 'files(id, parents)'
}
path_segments = file_path.split(os.sep)
parent_id = ''
empty_string = ''
while path_segments:
walk_query = "name = '%s'" % path_segments.pop(0)
if parent_id:
walk_query += "and '%s' in parents" % parent_id
list_kwargs['q'] = walk_query
try:
response = self.drive.list(**list_kwargs).execute()
except:
raise DriveConnectionError(title)
file_list = response.get('files', [])
if file_list:
if path_segments:
parent_id = file_list[0].get('id')
else:
file_id = file_list[0].get('id')
return file_id, parent_id
else:
return empty_string, empty_string | a helper method for retrieving id of file or folder |
def input_flush():
try:
import sys, termios
termios.tcflush(sys.stdin, termios.TCIFLUSH)
except ImportError:
import msvcrt
while msvcrt.kbhit():
msvcrt.getch() | Flush the input buffer on posix and windows. |
def extract_and_process(path_in, path_out):
path_in = os.path.realpath(os.path.expanduser(path_in))
path_out = os.path.realpath(os.path.expanduser(path_out))
extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_file(json)
if ep:
stmts += ep.statements
return stmts | Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements |
def send_remote_port(self):
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg) | Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port. |
def register_introspection_functions(self):
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp}) | Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html |
def get_networks(self):
networks = self.c_resources["networks"]
result = []
for net in networks:
_c_network = net.get("_c_network")
if _c_network is None:
continue
roles = utils.get_roles_as_list(net)
result.append((roles, _c_network))
return result | Get the networks assoiated with the resource description.
Returns
list of tuple roles, network |
def _set_batch(self, batch, fg, bg, bgblend=1, nullChar=False):
for (x, y), char in batch:
self._set_char(x, y, char, fg, bg, bgblend) | Try to perform a batch operation otherwise fall back to _set_char.
If fg and bg are defined then this is faster but not by very
much.
if any character is None then nullChar is True
batch is a iterable of [(x, y), ch] items |
def make_opfields( cls ):
opfields = {}
for opname in SERIALIZE_FIELDS.keys():
opcode = NAME_OPCODES[opname]
opfields[opcode] = SERIALIZE_FIELDS[opname]
return opfields | Calculate the virtulachain-required opfields dict. |
def _render_timestep(self,
t: int,
s: Fluents, a: Fluents, f: Fluents,
r: np.float32) -> None:
print("============================")
print("TIME = {}".format(t))
print("============================")
fluent_variables = self._compiler.rddl.action_fluent_variables
self._render_fluent_timestep('action', a, fluent_variables)
fluent_variables = self._compiler.rddl.interm_fluent_variables
self._render_fluent_timestep('interms', f, fluent_variables)
fluent_variables = self._compiler.rddl.state_fluent_variables
self._render_fluent_timestep('states', s, fluent_variables)
self._render_reward(r) | Prints fluents and rewards for the given timestep `t`.
Args:
t (int): timestep
s (Sequence[Tuple[str], np.array]: State fluents.
a (Sequence[Tuple[str], np.array]: Action fluents.
f (Sequence[Tuple[str], np.array]: Interm state fluents.
r (np.float32): Reward. |
def read_static_uplink(self):
if self.node_list is None or self.node_uplink_list is None:
return
for node, port in zip(self.node_list.split(','),
self.node_uplink_list.split(',')):
if node.strip() == self.host_name:
self.static_uplink = True
self.static_uplink_port = port.strip()
return | Read the static uplink from file, if given. |
def _activity_helper(modifier: str, location=None):
rv = {MODIFIER: modifier}
if location:
rv[LOCATION] = location
return rv | Make an activity dictionary.
:param str modifier:
:param Optional[dict] location: An entity from :func:`pybel.dsl.entity`
:rtype: dict |
def get_mentions(self, *args, **kwargs):
return self.get_content(self.config['mentions'], *args, **kwargs) | Return a get_content generator for username mentions.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered. |
def directly_connected(self, ident):
content_id, subtopic_id = normalize_ident(ident)
return self.everything(include_deleted=False,
content_id=content_id,
subtopic_id=subtopic_id) | Return a generator of labels connected to ``ident``.
``ident`` may be a ``content_id`` or a ``(content_id,
subtopic_id)``.
If no labels are defined for ``ident``, then the generator
will yield no labels.
Note that this only returns *directly* connected labels. It
will not follow transitive relationships.
:param ident: content id or (content id and subtopic id)
:type ident: ``str`` or ``(str, str)``
:rtype: generator of :class:`Label` |
def _has_terms(self):
loc = self._super_get('_term_location')
return self._super_has(loc) \
and isiterable(self._super_get(loc)) \
and len(self._super_get(loc)) > 0 \
and all([isinstance(term, Term) for term in self._super_get(loc)]) | bool, whether the instance has any sub-terms |
def shell_call(self, shellcmd):
return(subprocess.call(self.shellsetup + shellcmd, shell=True)) | Shell call with necessary setup first. |
def delete_stream(stream_name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = _execute_with_retries(conn,
"delete_stream",
StreamName=stream_name)
if 'error' not in r:
r['result'] = True
return r | Delete the stream with name stream_name. This cannot be undone! All data will be lost!!
CLI example::
salt myminion boto_kinesis.delete_stream my_stream region=us-east-1 |
def call(cmd_args, suppress_output=False):
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result | Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1 |
def get_commands_in_namespace(namespace=None, level=1):
from ..command import Command
commands = {}
if namespace is None:
frame = inspect.stack()[level][0]
namespace = frame.f_globals
elif inspect.ismodule(namespace):
namespace = vars(namespace)
for name in namespace:
obj = namespace[name]
if isinstance(obj, Command):
commands[name] = obj
return OrderedDict((name, commands[name]) for name in sorted(commands)) | Get commands in namespace.
Args:
namespace (dict|module): Typically a module. If not passed, the
globals from the call site will be used.
level (int): If not called from the global scope, set this
appropriately to account for the call stack.
Returns:
OrderedDict: The commands found in the namespace, ordered by
name.
Can be used to create ``__all__`` lists::
__all__ = list(get_commands_in_namespace()) |
def reduce_list_of_bags_of_words(list_of_keyword_sets):
bag_of_words = dict()
get_bag_of_words_keys = bag_of_words.keys
for keyword_set in list_of_keyword_sets:
for keyword in keyword_set:
if keyword in get_bag_of_words_keys():
bag_of_words[keyword] += 1
else:
bag_of_words[keyword] = 1
return bag_of_words | Reduces a number of keyword sets to a bag-of-words.
Input: - list_of_keyword_sets: This is a python list of sets of strings.
Output: - bag_of_words: This is the corresponding multi-set or bag-of-words, in the form of a python dictionary. |
def add_metadata_query_properties(self, meta_constraints, id_table, id_column):
for mc in meta_constraints:
meta_key = str(mc.key)
ct = mc.constraint_type
sql_template =
self.sql_args.append(SQLBuilder.map_value(mc.value))
self.sql_args.append(meta_key)
if ct == 'less':
self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '<='))
elif ct == 'greater':
self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '>='))
elif ct == 'number_equals':
self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '='))
elif ct == 'string_equals':
self.where_clauses.append(sql_template.format(id_table, id_column, 'stringValue', '='))
else:
raise ValueError("Unknown meta constraint type!") | Construct WHERE clauses from a list of MetaConstraint objects, adding them to the query state.
:param meta_constraints:
A list of MetaConstraint objects, each of which defines a condition over metadata which must be satisfied
for results to be included in the overall query.
:raises:
ValueError if an unknown meta constraint type is encountered. |
def contains_circle(self, pt, radius):
return (self.l < pt.x - radius and self.r > pt.x + radius and
self.t < pt.y - radius and self.b > pt.y + radius) | Is the circle completely inside this rect? |
def read_sps(path):
for line in open(path):
xs = line.rstrip().split(' ')
yield xs[1:], int(xs[0]) | Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int). |
def format_unencoded(self, tokensource, outfile):
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece) | The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators. |
def is_node(objecttype):
if not isclass(objecttype):
return False
if not issubclass(objecttype, ObjectType):
return False
for i in objecttype._meta.interfaces:
if issubclass(i, Node):
return True
return False | Check if the given objecttype has Node as an interface |
def includeme(config):
root = config.get_root_resource()
root.add('nef_polymorphic', '{collections:.+,.+}',
view=PolymorphicESView,
factory=PolymorphicACL) | Connect view to route that catches all URIs like
'something,something,...' |
def _order_pases(self, passes):
passes = set(passes)
pass_deps = {}
for opt in passes:
_, before, after = self._known_passes[opt]
if opt not in pass_deps:
pass_deps[opt] = set()
for after_pass in after:
pass_deps[opt].add(after_pass)
for other in before:
if other not in passes:
continue
if other not in pass_deps:
pass_deps[other] = set()
pass_deps[other].add(opt)
return toposort_flatten(pass_deps) | Topologically sort optimization passes.
This ensures that the resulting passes are run in order
respecting before/after constraints.
Args:
passes (iterable): An iterable of pass names that should
be included in the optimization passes run. |
def _execute_cmd(plugin, args='', run_type='cmd.retcode'):
data = {}
all_plugins = list_plugins()
if plugin in all_plugins:
data = __salt__[run_type](
'{0}{1} {2}'.format(PLUGINDIR, plugin, args),
python_shell=False)
return data | Execute nagios plugin if it's in the directory with salt command specified in run_type |
def get_proxies(self, url):
hostname = urlparse(url).hostname
if hostname is None:
hostname = ""
value_from_js_func = self.pac.find_proxy_for_url(url, hostname)
if value_from_js_func in self._cache:
return self._cache[value_from_js_func]
config_values = parse_pac_value(self.pac.find_proxy_for_url(url, hostname), self.socks_scheme)
if self._proxy_auth:
config_values = [add_proxy_auth(value, self._proxy_auth) for value in config_values]
self._cache[value_from_js_func] = config_values
return config_values | Get the proxies that are applicable to a given URL, according to the PAC file.
:param str url: The URL for which to find appropriate proxies.
:return: All the proxies that apply to the given URL.
Can be empty, which means to abort the request.
:rtype: list[str] |
def ApproximateDistanceBetweenPoints(pa, pb):
alat, alon = pa
blat, blon = pb
sa = transitfeed.Stop(lat=alat, lng=alon)
sb = transitfeed.Stop(lat=blat, lng=blon)
return transitfeed.ApproximateDistanceBetweenStops(sa, sb) | Finds the distance between two points on the Earth's surface.
This is an approximate distance based on assuming that the Earth is a sphere.
The points are specified by their lattitude and longitude.
Args:
pa: the first (lat, lon) point tuple
pb: the second (lat, lon) point tuple
Returns:
The distance as a float in metres. |
def img_binary_list():
qemu_imgs = []
for path in Qemu.paths_list():
try:
for f in os.listdir(path):
if (f == "qemu-img" or f == "qemu-img.exe") and \
os.access(os.path.join(path, f), os.X_OK) and \
os.path.isfile(os.path.join(path, f)):
qemu_path = os.path.join(path, f)
version = yield from Qemu._get_qemu_img_version(qemu_path)
qemu_imgs.append({"path": qemu_path, "version": version})
except OSError:
continue
return qemu_imgs | Gets QEMU-img binaries list available on the host.
:returns: Array of dictionary {"path": Qemu-img binary path, "version": version of Qemu-img} |
def frameify(self, state, data):
data = state.recv_buf + data
while data:
line, sep, rest = data.partition('\n')
if sep != '\n':
break
data = rest
if self.carriage_return and line[-1] == '\r':
line = line[:-1]
try:
yield line
except FrameSwitch:
break
state.recv_buf = data | Split data into a sequence of lines. |
def get_mutations_size(self):
mutation_size = 0
for mutation in self._get_mutations():
mutation_size += mutation.ByteSize()
return mutation_size | Gets the total mutations size for current row
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_get_mutations_size]
:end-before: [END bigtable_row_get_mutations_size] |
def within_depth_range(self, lower_depth=None, upper_depth=None):
if not lower_depth:
if not upper_depth:
return self.catalogue
else:
lower_depth = np.inf
if not upper_depth:
upper_depth = 0.0
is_valid = np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid) | Selects events within a specified depth range
:param float lower_depth:
Lower depth for consideration
:param float upper_depth:
Upper depth for consideration
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events |
def save_excel(self, fd):
from pylon.io.excel import ExcelWriter
ExcelWriter(self).write(fd) | Saves the case as an Excel spreadsheet. |
def confusion_matrix(
gold, pred, null_pred=False, null_gold=False, normalize=False, pretty_print=True
):
conf = ConfusionMatrix(null_pred=null_pred, null_gold=null_gold)
gold = arraylike_to_numpy(gold)
pred = arraylike_to_numpy(pred)
conf.add(gold, pred)
mat = conf.compile()
if normalize:
mat = mat / len(gold)
if pretty_print:
conf.display(normalize=normalize)
return mat | A shortcut method for building a confusion matrix all at once.
Args:
gold: an array-like of gold labels (ints)
pred: an array-like of predictions (ints)
null_pred: If True, include the row corresponding to null predictions
null_gold: If True, include the col corresponding to null gold labels
normalize: if True, divide counts by the total number of items
pretty_print: if True, pretty-print the matrix before returning |
def full_name_natural_split(full_name):
parts = full_name.strip().split(' ')
first_name = ""
if parts:
first_name = parts.pop(0)
if first_name.lower() == "el" and parts:
first_name += " " + parts.pop(0)
last_name = ""
if parts:
last_name = parts.pop()
if (last_name.lower() == 'i' or last_name.lower() == 'ii'
or last_name.lower() == 'iii' and parts):
last_name = parts.pop() + " " + last_name
middle_initials = ""
for middle_name in parts:
if middle_name:
middle_initials += middle_name[0]
return first_name, middle_initials, last_name | This function splits a full name into a natural first name, last name
and middle initials. |
def open_browser(url: str, browser: str = None) -> None:
if '--open-browser' in sys.argv:
sys.argv.remove('--open-browser')
if browser is None:
browser = config.browser
if browser in _browsers:
webbrowser.get(browser).open(url)
else:
webbrowser.open(url) | Open web browser. |
def get_curve(self, mnemonic, alias=None):
return self.data.get(self.get_mnemonic(mnemonic, alias=alias), None) | Wraps get_mnemonic.
Instead of picking curves by name directly from the data dict, you
can pick them up with this method, which takes account of the alias
dict you pass it. If you do not pass an alias dict, then you get the
curve you asked for, if it exists, or None. NB Wells do not have alias
dicts, but Projects do.
Args:
mnemonic (str): the name of the curve you want.
alias (dict): an alias dictionary, mapping mnemonics to lists of
mnemonics.
Returns:
Curve. |
def calculate_iI_correspondence(omega):
r
Ne = len(omega[0])
om = omega[0][0]
correspondence = []
I = 0
for i in range(Ne):
if omega[i][0] != om:
om = omega[i][0]
I += 1
correspondence += [(i+1, I+1)]
Nnd = I+1
def I_nd(i):
return correspondence[i-1][1]
def i_d(I):
for i in range(Ne):
if correspondence[i][1] == I:
return correspondence[i][0]
return i_d, I_nd, Nnd | r"""Get the correspondance between degenerate and nondegenerate schemes. |
def set(self, key, value):
return self.__setitem__(key, value, force=True) | Set a key's value regardless of whether a change is seen. |
def _create_feed_dict(self, data):
return {
self.input_data: data,
self.hrand: np.random.rand(data.shape[0], self.num_hidden),
self.vrand: np.random.rand(data.shape[0], data.shape[1])
} | Create the dictionary of data to feed to tf session during training.
:param data: training/validation set batch
:return: dictionary(self.input_data: data, self.hrand: random_uniform,
self.vrand: random_uniform) |
def expected_counts(p0, T, N):
r
if (N <= 0):
EC = coo_matrix(T.shape, dtype=float)
return EC
else:
p_k = 1.0 * p0
p_sum = 1.0 * p_k
Tt = T.transpose()
for k in np.arange(N - 1):
p_k = Tt.dot(p_k)
p_sum += p_k
D_psum = diags(p_sum, 0)
EC = D_psum.dot(T)
return EC | r"""Compute expected transition counts for Markov chain after N steps.
Expected counts are computed according to ..math::
E[C_{ij}^{(n)}]=\sum_{k=0}^{N-1} (p_0^T T^{k})_{i} p_{ij}
Parameters
----------
p0 : (M,) ndarray
Starting (probability) vector of the chain.
T : (M, M) sparse matrix
Transition matrix of the chain.
N : int
Number of steps to take from initial state.
Returns
--------
EC : (M, M) sparse matrix
Expected value for transition counts after N steps. |
def _add_colorbar(ax: Axes, cmap: colors.Colormap, cmap_data: np.ndarray, norm: colors.Normalize):
fig = ax.get_figure()
mappable = cm.ScalarMappable(cmap=cmap, norm=norm)
mappable.set_array(cmap_data)
fig.colorbar(mappable, ax=ax) | Show a colorbar right of the plot. |
def visit_tryfinally(self, node, parent):
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
newnode.postinit(
[self.visit(child, newnode) for child in node.body],
[self.visit(n, newnode) for n in node.finalbody],
)
return newnode | visit a TryFinally node by returning a fresh instance of it |
def raw_mode():
if WIN:
yield
else:
import tty
import termios
if not isatty(sys.stdin):
f = open("/dev/tty")
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
except termios.error:
pass
try:
yield
finally:
try:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if f is not None:
f.close()
except termios.error:
pass | Enables terminal raw mode during the context.
Note: Currently noop for Windows systems.
Usage: ::
with raw_mode():
do_some_stuff() |
def _printTraceback(self, test, err):
exception_type, exception_value = err[:2]
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template))) | Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call |
def amplification_circuit(algorithm: Program, oracle: Program,
qubits: List[int],
num_iter: int,
decompose_diffusion: bool = False) -> Program:
program = Program()
uniform_superimposer = Program().inst([H(qubit) for qubit in qubits])
program += uniform_superimposer
if decompose_diffusion:
diffusion = decomposed_diffusion_program(qubits)
else:
diffusion = diffusion_program(qubits)
defined_gates = oracle.defined_gates + algorithm.defined_gates + diffusion.defined_gates
for _ in range(num_iter):
program += (oracle.instructions
+ algorithm.dagger().instructions
+ diffusion.instructions
+ algorithm.instructions)
for gate in defined_gates:
program.defgate(gate.name, gate.matrix)
return program | Returns a program that does ``num_iter`` rounds of amplification, given a measurement-less
algorithm, an oracle, and a list of qubits to operate on.
:param algorithm: A program representing a measurement-less algorithm run on qubits.
:param oracle: An oracle maps any basis vector ``|psi>`` to either ``+|psi>`` or
``-|psi>`` depending on whether ``|psi>`` is in the desirable subspace or the undesirable
subspace.
:param qubits: the qubits to operate on
:param num_iter: number of iterations of amplifications to run
:param decompose_diffusion: If True, decompose the Grover diffusion gate into two qubit
gates. If False, use a defgate to define the gate.
:return: The amplified algorithm. |
def _waitForSSHPort(self):
logger.debug('Waiting for ssh port to open...')
for i in count():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(a_short_time)
s.connect((self.effectiveIP, 22))
logger.debug('...ssh port open')
return i
except socket.error:
pass
finally:
s.close() | Wait until the instance represented by this box is accessible via SSH.
:return: the number of unsuccessful attempts to connect to the port before a the first
success |
def remap_overlapping_column_names(table_op, root_table, data_columns):
if not isinstance(table_op, ops.Join):
return None
left_root, right_root = ops.distinct_roots(table_op.left, table_op.right)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
column_names = [
({name, name + suffixes[root_table]} & data_columns, name)
for name in root_table.schema.names
]
mapping = OrderedDict(
(first(col_name), final_name)
for col_name, final_name in column_names
if col_name
)
return mapping | Return an ``OrderedDict`` mapping possibly suffixed column names to
column names without suffixes.
Parameters
----------
table_op : TableNode
The ``TableNode`` we're selecting from.
root_table : TableNode
The root table of the expression we're selecting from.
data_columns : set or frozenset
The available columns to select from
Returns
-------
mapping : OrderedDict[str, str]
A map from possibly-suffixed column names to column names without
suffixes. |
def write_molecule(filename, format=None):
datafile(filename, format=format,
mode='w').write('molecule',current_system()) | Write the system displayed in a file as a molecule. |
def mime_type(self, path):
name, ext = os.path.splitext(path)
return MIME_TYPES[ext] | Get mime-type from filename |
def check_verifier(self, verifier):
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper) | Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper. |
def get_export_configuration(self, config_id):
sql = (
'SELECT uid, exportConfigId, exportType, searchString, targetURL, '
'targetUser, targetPassword, exportName, description, active '
'FROM archive_exportConfig WHERE exportConfigId = %s')
return first_from_generator(
self.generators.export_configuration_generator(sql=sql, sql_args=(config_id,))) | Retrieve the ExportConfiguration with the given ID
:param string config_id:
ID for which to search
:return:
a :class:`meteorpi_model.ExportConfiguration` or None, or no match was found. |
def received_message(self, m):
m = str(m)
logger.debug("Incoming upstream WS: %s", m)
uwsgi.websocket_send(m)
logger.debug("Send ok") | Push upstream messages to downstream. |
def determine_emitter(cls, request):
default_emitter = cls._meta.emitters[0]
if not request:
return default_emitter
if request.method == 'OPTIONS':
return JSONEmitter
accept = request.META.get('HTTP_ACCEPT', '*/*')
if accept == '*/*':
return default_emitter
base_format = mimeparse.best_match(cls._meta.emitters_dict.keys(),
accept)
return cls._meta.emitters_dict.get(
base_format,
default_emitter) | Get emitter for request.
:return emitter: Instance of adrest.utils.emitters.BaseEmitter |
def add_list(self, bl):
if self.cur_element is None:
self.add_text_frame()
self.push_element()
self.cur_element._text_box.append(bl.node)
style = bl.style_name
if style not in self._preso._styles_added:
self._preso._styles_added[style] = 1
content = bl.default_styles_root()[0]
self._preso._auto_styles.append(content)
self.cur_element = bl | note that this pushes the cur_element, but doesn't pop it.
You'll need to do that |
def _update_card_file_location(self, card_name, new_directory):
with tmp_chdir(self.gssha_directory):
file_card = self.project_manager.getCard(card_name)
if file_card:
if file_card.value:
original_location = file_card.value.strip("'").strip('"')
new_location = os.path.join(new_directory,
os.path.basename(original_location))
file_card.value = '"{0}"'.format(os.path.basename(original_location))
try:
move(original_location, new_location)
except OSError as ex:
log.warning(ex)
pass | Moves card to new gssha working directory |
def iter_generic_bases(type_):
for t in type_.__mro__:
if not isinstance(t, typing.GenericMeta):
continue
yield t
t = t.__origin__
while t:
yield t
t = t.__origin__ | Iterates over all generics `type_` derives from, including origins.
This function is only necessary because, in typing 3.5.0, a generic doesn't
get included in the list of bases when it constructs a parameterized version
of itself. This was fixed in aab2c59; now it would be enough to just iterate
over the MRO. |
def resolve_deps(self, obj):
deps = self.get_deps(obj)
return list(self.iresolve(*deps)) | Returns list of resolved dependencies for given obj.
:param obj: Object to lookup dependencies for
:type obj: object
:return: Resolved dependencies
:rtype: list |
def authenticate(self, username, password):
if username is None or password is None:
return False
if not re.match("^[A-Za-z0-9_-]*$", username):
return False
user_dn = self.get_user_dn(username)
server = ldap3.Server(
self.uri,
use_ssl=self.use_ssl
)
connection = ldap3.Connection(server, user=user_dn, password=password)
return connection.bind() | Authenticate the user with a bind on the LDAP server |
def class_name_to_resource_name(class_name: str) -> str:
s = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', class_name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s) | Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name. |
def _run_argparser(self, argv):
if self._parser is None:
raise ValueError('Link was not given a parser on initialization')
args = self._parser.parse_args(argv)
self.update_args(args.__dict__)
return args | Initialize a link with a set of arguments using an `argparser.ArgumentParser` |
def secret_absent(name, namespace='default', **kwargs):
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
secret = __salt__['kubernetes.show_secret'](name, namespace, **kwargs)
if secret is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The secret does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The secret is going to be deleted'
ret['result'] = None
return ret
__salt__['kubernetes.delete_secret'](name, namespace, **kwargs)
ret['result'] = True
ret['changes'] = {
'kubernetes.secret': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = 'Secret deleted'
return ret | Ensures that the named secret is absent from the given namespace.
name
The name of the secret
namespace
The name of the namespace |
def from_raw_message(cls, rawmessage):
empty = cls.create_empty(0x00)
userdata_dict = cls.normalize(empty, rawmessage)
return Userdata(userdata_dict) | Create a user data instance from a raw byte stream. |
def update(self, batch_size, ignore_stale_grad=False):
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
assert not (self._kvstore and self._update_on_kvstore), \
'update() when parameters are updated on kvstore ' \
'is not supported. Try setting `update_on_kvstore` ' \
'to False when creating trainer.'
self._check_and_rescale_grad(self._scale / batch_size)
self._update(ignore_stale_grad) | Makes one step of parameter update.
Should be called after `autograd.backward()` and outside of `record()` scope,
and after `trainer.update()`.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
ignore_stale_grad : bool, optional, default=False
If true, ignores Parameters with stale gradient (gradient that has not
been updated by `backward` after last step) and skip update. |
def get_entry(user, identifier=None, cmd=None):
cron_entries = list_tab(user).get('crons', False)
for cron_entry in cron_entries:
if identifier and cron_entry.get('identifier') == identifier:
return cron_entry
elif cmd and cron_entry.get('cmd') == cmd:
return cron_entry
return False | Return the specified entry from user's crontab.
identifier will be used if specified, otherwise will lookup cmd
Either identifier or cmd should be specified.
user:
User's crontab to query
identifier:
Search for line with identifier
cmd:
Search for cron line with cmd
CLI Example:
.. code-block:: bash
salt '*' cron.identifier_exists root identifier=task1 |
def get_render_language(contentitem):
plugin = contentitem.plugin
if plugin.render_ignore_item_language \
or (plugin.cache_output and plugin.cache_output_per_language):
return get_language()
else:
return contentitem.language_code | Tell which language should be used to render the content item. |
def get_action_cache(self, action_key):
data = None
if self.cache:
data = self.cache.get(
self.app.config['ACCESS_ACTION_CACHE_PREFIX'] +
action_key
)
return data | Get action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name.
:returns: The action stored in cache or ``None``. |
def _disabled(funs):
ret = []
_disabled = __salt__['grains.get']('state_runs_disabled')
for state in funs:
for _state in _disabled:
if '.*' in _state:
target_state = _state.split('.')[0]
target_state = target_state + '.' if not target_state.endswith('.') else target_state
if state.startswith(target_state):
err = (
'The state file "{0}" is currently disabled by "{1}", '
'to re-enable, run state.enable {1}.'
).format(
state,
_state,
)
ret.append(err)
continue
else:
if _state == state:
err = (
'The state file "{0}" is currently disabled, '
'to re-enable, run state.enable {0}.'
).format(
_state,
)
ret.append(err)
continue
return ret | Return messages for disabled states
that match state functions in funs. |
def authenticate_request(self, method, bucket='', key='', headers=None):
path = self.conn.calling_format.build_path_base(bucket, key)
auth_path = self.conn.calling_format.build_auth_path(bucket, key)
http_request = boto.connection.AWSAuthConnection.build_base_http_request(
self.conn,
method,
path,
auth_path,
{},
headers
)
http_request.authorize(connection=self.conn)
return http_request | Authenticate a HTTP request by filling in Authorization field header.
:param method: HTTP method (e.g. GET, PUT, POST)
:param bucket: name of the bucket.
:param key: name of key within bucket.
:param headers: dictionary of additional HTTP headers.
:return: boto.connection.HTTPRequest object with Authorization header
filled (NB: will also have a Date field if none before and a User-Agent
field will be set to Boto). |
def filenames(self):
if self._is_reader:
assert self._filenames is not None
return self._filenames
else:
return self.data_producer.filenames | list of file names the data is originally being read from.
Returns
-------
names : list of str
list of file names at the beginning of the input chain. |
def index():
crawlers = []
for crawler in manager:
data = Event.get_counts(crawler)
data['last_active'] = crawler.last_run
data['total_ops'] = crawler.op_count
data['running'] = crawler.is_running
data['crawler'] = crawler
crawlers.append(data)
return render_template('index.html', crawlers=crawlers) | Generate a list of all crawlers, alphabetically, with op counts. |
def keys_to_snake_case(camel_case_dict):
return dict((to_snake_case(key), value) for (key, value) in camel_case_dict.items()) | Make a copy of a dictionary with all keys converted to snake case. This is just calls to_snake_case on
each of the keys in the dictionary and returns a new dictionary.
:param camel_case_dict: Dictionary with the keys to convert.
:type camel_case_dict: Dictionary.
:return: Dictionary with the keys converted to snake case. |
def _process_m2m_through(self, obj, action):
source = getattr(obj, self.field.rel.field.m2m_field_name())
target = getattr(obj, self.field.rel.field.m2m_reverse_field_name())
pk_set = set()
if target:
pk_set.add(target.pk)
self.process_m2m(source, pk_set, action=action, reverse=False, cache_key=obj) | Process custom M2M through model actions. |
def convert_money(amount, currency_from, currency_to):
new_amount = base_convert_money(amount, currency_from, currency_to)
return moneyed.Money(new_amount, currency_to) | Convert 'amount' from 'currency_from' to 'currency_to' and return a Money
instance of the converted amount. |
def pull(self, key, default=None):
val = self.get(key, default)
self.forget(key)
return val | Pulls an item from the collection.
:param key: The key
:type key: mixed
:param default: The default value
:type default: mixed
:rtype: mixed |
def get_queryset(self):
self.author = get_object_or_404(
Author, **{Author.USERNAME_FIELD: self.kwargs['username']})
return self.author.entries_published() | Retrieve the author by his username and
build a queryset of his published entries. |
def f(field: str, kwargs: Dict[str, Any],
default: Optional[Any] = None) -> str:
if default is not None:
return str(kwargs.get(field, default))
return str(kwargs[field]) | Alias for more readable command construction |
def prt_results(self, goea_results):
if self.args.outfile is None:
self._prt_results(goea_results)
else:
outfiles = self.args.outfile.split(",")
grpwr = self.prepgrp.get_objgrpwr(goea_results) if self.prepgrp else None
if grpwr is None:
self.prt_outfiles_flat(goea_results, outfiles)
else:
grpwr.prt_outfiles_grouped(outfiles) | Print GOEA results to the screen or to a file. |
def _send_request(self, operation, url, payload, desc):
res = None
try:
payload_json = None
if payload and payload != '':
payload_json = jsonutils.dumps(payload)
self._login()
desc_lookup = {'POST': ' creation', 'PUT': ' update',
'DELETE': ' deletion', 'GET': ' get'}
res = requests.request(operation, url, data=payload_json,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False)
desc += desc_lookup.get(operation, operation.lower())
LOG.info("DCNM-send_request: %(desc)s %(url)s %(pld)s",
{'desc': desc, 'url': url, 'pld': payload})
self._logout()
except (requests.HTTPError, requests.Timeout,
requests.ConnectionError) as exc:
LOG.exception('Error during request: %s', exc)
raise dexc.DfaClientRequestFailed(reason=exc)
return res | Send request to DCNM. |
def filter_regex(names, regex):
return tuple(name for name in names
if regex.search(name) is not None) | Return a tuple of strings that match the regular expression pattern. |
def del_host(self, mac):
msg = OmapiMessage.open(b"host")
msg.obj.append((b"hardware-address", pack_mac(mac)))
msg.obj.append((b"hardware-type", struct.pack("!I", 1)))
response = self.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
raise OmapiErrorNotFound()
if response.handle == 0:
raise OmapiError("received invalid handle from server")
response = self.query_server(OmapiMessage.delete(response.handle))
if response.opcode != OMAPI_OP_STATUS:
raise OmapiError("delete failed") | Delete a host object with with given mac address.
@type mac: str
@raises ValueError:
@raises OmapiError:
@raises OmapiErrorNotFound: if no lease object with the given
mac address could be found
@raises socket.error: |
def keyring_auth(username=None, region=None, authenticate=True):
if not keyring:
raise exc.KeyringModuleNotInstalled("The 'keyring' Python module is "
"not installed on this system.")
if username is None:
username = settings.get("keyring_username")
if not username:
raise exc.KeyringUsernameMissing("No username specified for keyring "
"authentication.")
password = keyring.get_password("pyrax", username)
if password is None:
raise exc.KeyringPasswordNotFound("No password was found for the "
"username '%s'." % username)
set_credentials(username, password, region=region,
authenticate=authenticate) | Use the password stored within the keyring to authenticate. If a username
is supplied, that name is used; otherwise, the keyring_username value
from the config file is used.
If there is no username defined, or if the keyring module is not installed,
or there is no password set for the given username, the appropriate errors
will be raised.
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections. |
def set_color_temp(self, color_temp):
if self._json_state['control_url']:
url = CONST.INTEGRATIONS_URL + self._device_uuid
color_data = {
'action': 'setcolortemperature',
'colorTemperature': int(color_temp)
}
response = self._abode.send_request("post", url, data=color_data)
response_object = json.loads(response.text)
_LOGGER.debug("Set Color Temp Response: %s", response.text)
if response_object['idForPanel'] != self.device_id:
raise AbodeException((ERROR.SET_STATUS_DEV_ID))
if response_object['colorTemperature'] != int(color_temp):
_LOGGER.warning(
("Set color temp mismatch for device %s. "
"Request val: %s, Response val: %s "),
self.device_id, color_temp,
response_object['colorTemperature'])
self.update(response_object)
_LOGGER.info("Set device %s color_temp to: %s",
self.device_id, color_temp)
return True
return False | Set device color. |
def infer_from_frame_stack(self, ob_stack):
logits, vf = self.sess.run([self.logits_t, self.value_function_t],
feed_dict={self.obs_t: ob_stack})
return logits, vf | Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf. |
def _is_gpg2(version):
(major, minor, micro) = _match_version_string(version)
if major == 2:
return True
return False | Returns True if using GnuPG version 2.x.
:param tuple version: A tuple of three integers indication major, minor,
and micro version numbers. |
def apply_patches(self):
success = True
for name, patch in sorted(self):
success = self.apply_patch(patch)
return success | Applies the patches.
:return: Method success.
:rtype: bool |
def get_xy_name(self, yidx, xidx=0):
assert isinstance(xidx, int)
if isinstance(yidx, int):
yidx = [yidx]
uname = ['Time [s]'] + self.uname
fname = ['$Time\\ [s]$'] + self.fname
xname = [list(), list()]
yname = [list(), list()]
xname[0] = uname[xidx]
xname[1] = fname[xidx]
yname[0] = [uname[i] for i in yidx]
yname[1] = [fname[i] for i in yidx]
return xname, yname | Return variable names for the given indices
:param yidx:
:param xidx:
:return: |
def validate(document, spec):
if not spec:
return True
missing = []
for key, field in spec.iteritems():
if field.required and key not in document:
missing.append(key)
failed = []
for key, field in spec.iteritems():
if key in document:
try: document[key] = field.validate(document[key])
except ValueError: failed.append(key)
if missing or failed:
if missing and not failed:
raise ValueError("Required fields missing: %s" % (missing))
if failed and not missing:
raise ValueError("Keys did not match spec: %s" % (failed))
raise ValueError("Missing fields: %s, Invalid fields: %s" % (missing, failed))
return True | Validate that a document meets a specification. Returns True if
validation was successful, but otherwise raises a ValueError. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.