code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def load_config(self, config_path=None):
self.loaded = True
config = copy.deepcopy(DEFAULTS)
if config_path is None:
if "FEDORA_MESSAGING_CONF" in os.environ:
config_path = os.environ["FEDORA_MESSAGING_CONF"]
else:
config_path = "/etc/fedora-messaging/config.toml"
if os.path.exists(config_path):
_log.info("Loading configuration from {}".format(config_path))
with open(config_path) as fd:
try:
file_config = toml.load(fd)
for key in file_config:
config[key.lower()] = file_config[key]
except toml.TomlDecodeError as e:
msg = "Failed to parse {}: error at line {}, column {}: {}".format(
config_path, e.lineno, e.colno, e.msg
)
raise exceptions.ConfigurationException(msg)
else:
_log.info("The configuration file, {}, does not exist.".format(config_path))
self.update(config)
self._validate()
return self | Load application configuration from a file and merge it with the default
configuration.
If the ``FEDORA_MESSAGING_CONF`` environment variable is set to a
filesystem path, the configuration will be loaded from that location.
Otherwise, the path defaults to ``/etc/fedora-messaging/config.toml``. |
def get_density(self, compound='', element=''):
_stack = self.stack
if compound == '':
_list_compounds = _stack.keys()
list_all_dict = {}
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
list_all_dict[_compound] = {}
for _element in _list_element:
list_all_dict[_compound][_element] = self.get_density(
compound=_compound,
element=_element)
return list_all_dict
list_compounds = _stack.keys()
if compound not in list_compounds:
list_compounds_joined = ', '.join(list_compounds)
raise ValueError("Compound '{}' could not be find in {}".format(compile, list_compounds_joined))
if element == '':
element = compound
list_element = _stack[compound].keys()
if element not in list_element:
list_element_joined = ', '.join(list_element)
raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined))
return _stack[compound][element]['density']['value'] | returns the list of isotopes for the element of the compound defined with their density
Parameters:
===========
compound: string (default is empty). If empty, all the stoichiometric will be displayed
element: string (default is same as compound).
Raises:
=======
ValueError if element is not defined in the stack |
def connect(self, host, port):
self._connected = False
self._host = "%s:%d" % (host, port)
self._closed = False
self._close_info = {
'reply_code': 0,
'reply_text': 'failed to connect to %s' % (self._host),
'class_id': 0,
'method_id': 0
}
self._transport.connect((host, port))
self._transport.write(PROTOCOL_HEADER)
self._last_octet_time = time.time()
if self._synchronous_connect:
self._channels[0].add_synchronous_cb(self._channels[0]._recv_start)
while not self._connected:
self.read_frames() | Connect to a host and port. |
def getReaderNames(self):
hresult, pcscreaders = SCardListReaders(self.hcontext, [])
if 0 != hresult and SCARD_E_NO_READERS_AVAILABLE != hresult:
raise ListReadersException(hresult)
readers = []
if None == self.readersAsked:
readers = pcscreaders
else:
for reader in self.readersAsked:
if not isinstance(reader, type("")):
reader = str(reader)
if reader in pcscreaders:
readers = readers + [reader]
return readers | Returns the list or PCSC readers on which to wait for cards. |
def _prep_smooth(t, y, dy, span, t_out, span_out, period):
if period:
t = t % period
if t_out is not None:
t_out = t_out % period
t, y, dy = validate_inputs(t, y, dy, sort_by=t)
if span_out is not None:
if t_out is None:
raise ValueError("Must specify t_out when span_out is given")
if span is not None:
raise ValueError("Must specify only one of span, span_out")
span, t_out = np.broadcast_arrays(span_out, t_out)
indices = np.searchsorted(t, t_out)
elif span is None:
raise ValueError("Must specify either span_out or span")
else:
indices = None
return t, y, dy, span, t_out, span_out, indices | Private function to prepare & check variables for smooth utilities |
def cleanup():
if _output_dir and os.path.exists(_output_dir):
log.msg_warn("Cleaning up output directory at '{output_dir}' ..."
.format(output_dir=_output_dir))
if not _dry_run:
shutil.rmtree(_output_dir) | Cleanup the output directory |
def _get_level_values(self, level, unique=False):
values = self.levels[level]
level_codes = self.codes[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(values._values, level_codes,
fill_value=values._na_value)
values = values._shallow_copy(filled)
return values | Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray |
def encode_events(self, duration, events, values, dtype=np.bool):
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total] | Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values) |
def downgrade(engine, desired_version):
with engine.begin() as conn:
metadata = sa.MetaData(conn)
metadata.reflect()
version_info_table = metadata.tables['version_info']
starting_version = sa.select((version_info_table.c.version,)).scalar()
if starting_version < desired_version:
raise AssetDBImpossibleDowngrade(db_version=starting_version,
desired_version=desired_version)
if starting_version == desired_version:
return
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
downgrade_keys = range(desired_version, starting_version)[::-1]
_pragma_foreign_keys(conn, False)
for downgrade_key in downgrade_keys:
_downgrade_methods[downgrade_key](op, conn, version_info_table)
_pragma_foreign_keys(conn, True) | Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database. |
def run_linter(self, linter) -> None:
self.current = linter.name
if (linter.name not in self.parser["all"].as_list("linters")
or linter.base_pyversion > sys.version_info):
return
if any(x not in self.installed for x in linter.requires_install):
raise ModuleNotInstalled(linter.requires_install)
linter.add_output_hook(self.out_func)
linter.set_config(self.fn, self.parser[linter.name])
linter.run(self.files)
self.status_code = self.status_code or linter.status_code | Run a checker class |
def on_menu(self, event):
state = self.state
ret = self.menu.find_selected(event)
if ret is None:
return
ret.call_handler()
state.child_pipe_send.send(ret) | handle menu selections |
def accumulate(a_generator, cooperator=None):
if cooperator:
own_cooperate = cooperator.cooperate
else:
own_cooperate = cooperate
spigot = ValueBucket()
items = stream_tap((spigot,), a_generator)
d = own_cooperate(items).whenDone()
d.addCallback(accumulation_handler, spigot)
return d | Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function. |
def uniform_crossover(random, mom, dad, args):
ux_bias = args.setdefault('ux_bias', 0.5)
crossover_rate = args.setdefault('crossover_rate', 1.0)
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
if random.random() < ux_bias:
bro[i] = m
sis[i] = d
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | Return the offspring of uniform crossover on the candidates.
This function performs uniform crossover (UX). For each element
of the parents, a biased coin is flipped to determine whether
the first offspring gets the 'mom' or the 'dad' element. An
optional keyword argument in args, ``ux_bias``, determines the bias.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *ux_bias* -- the bias toward the first candidate in the crossover
(default 0.5) |
def _initialize_applicationUiFile():
RuntimeGlobals.ui_file = umbra.ui.common.get_resource_path(UiConstants.ui_file)
if not foundations.common.path_exists(RuntimeGlobals.ui_file):
raise foundations.exceptions.FileExistsError("'{0}' ui file is not available, {1} will now close!".format(
UiConstants.ui_file, Constants.application_name)) | Initializes the Application ui file. |
async def run_async(self):
try:
await self.run_loop_async()
except Exception as err:
_logger.error("Run loop failed %r", err)
try:
_logger.info("Shutting down all pumps %r", self.host.guid)
await self.remove_all_pumps_async("Shutdown")
except Exception as err:
raise Exception("Failed to remove all pumps {!r}".format(err)) | Starts the run loop and manages exceptions and cleanup. |
def create(cls, job_id, spider, workflow, results=None,
logs=None, status=JobStatus.PENDING):
obj = cls(
job_id=job_id,
spider=spider,
workflow=workflow,
results=results,
logs=logs,
status=status,
)
db.session.add(obj)
return obj | Create a new entry for a scheduled crawler job. |
def reparse(self, unsaved_files=None, options=0):
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
unsaved_files_array, options) | Reparse an already parsed translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects. |
def create_pairwise_bilateral(sdims, schan, img, chdim=-1):
if chdim == -1:
im_feat = img[np.newaxis].astype(np.float32)
else:
im_feat = np.rollaxis(img, chdim).astype(np.float32)
if isinstance(schan, Number):
im_feat /= schan
else:
for i, s in enumerate(schan):
im_feat[i] /= s
cord_range = [range(s) for s in im_feat.shape[1:]]
mesh = np.array(np.meshgrid(*cord_range, indexing='ij'), dtype=np.float32)
for i, s in enumerate(sdims):
mesh[i] /= s
feats = np.concatenate([mesh, im_feat])
return feats.reshape([feats.shape[0], -1]) | Util function that create pairwise bilateral potentials. This works for
all image dimensions. For the 2D case does the same as
`DenseCRF2D.addPairwiseBilateral`.
Parameters
----------
sdims: list or tuple
The scaling factors per dimension. This is referred to `sxy` in
`DenseCRF2D.addPairwiseBilateral`.
schan: list or tuple
The scaling factors per channel in the image. This is referred to
`srgb` in `DenseCRF2D.addPairwiseBilateral`.
img: numpy.array
The input image.
chdim: int, optional
This specifies where the channel dimension is in the image. For
example `chdim=2` for a RGB image of size (240, 300, 3). If the
image has no channel dimension (e.g. it has only one channel) use
`chdim=-1`. |
def GetValidHostsForCert(cert):
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname'] | Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs. |
def configure(self, options=None, attribute_options=None):
self._mapping.update(options=options,
attribute_options=attribute_options) | Configures the options and attribute options of the mapping associated
with this representer with the given dictionaries.
:param dict options: configuration options for the mapping associated
with this representer.
:param dict attribute_options: attribute options for the mapping
associated with this representer. |
def xor_app(parser, cmd, args):
parser.add_argument(
'-d', '--dec',
help='interpret the key as a decimal integer',
dest='type',
action='store_const',
const=int
)
parser.add_argument(
'-x', '--hex',
help='interpret the key as an hexadecimal integer',
dest='type',
action='store_const',
const=lambda v: int(v, 16)
)
parser.add_argument('key', help='the key to xor the value with')
parser.add_argument('value', help='the value to xor, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
if args.type is not None:
args.key = args.type(args.key)
return xor(args.key, pwnypack.main.binary_value_or_stdin(args.value)) | Xor a value with a key. |
def make(class_name, base, schema):
return type(class_name, (base,), dict(SCHEMA=schema)) | Create a new schema aware type. |
def iterpws(self, n):
for _id in np.argsort(self._freq_list)[::-1][:n]:
pw = self._T.restore_key(_id)
if self._min_pass_len <= len(pw) <= self._max_pass_len:
yield _id, pw, self._freq_list[_id] | Returns passwords in order of their frequencies.
@n: The numebr of passwords to return
Return: pwid, password, frequency
Every password is assigned an uniq id, for efficient access. |
def InitializeUpload(self, http_request, http=None, client=None):
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != RESUMABLE_UPLOAD:
return
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request,
retries=self.num_retries)
if http_response.status_code != http_client.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
if self.auto_transfer:
return self.StreamInChunks()
return http_response | Initialize this upload from the given http_request. |
def parse(cls, backend, ik, spk, spk_signature, otpks):
ik = backend.decodePublicKey(ik)[0]
spk["key"] = backend.decodePublicKey(spk["key"])[0]
otpks = list(map(lambda otpk: {
"key" : backend.decodePublicKey(otpk["key"])[0],
"id" : otpk["id"]
}, otpks))
return cls(ik, spk, spk_signature, otpks) | Use this method when creating a bundle from data you retrieved directly from some
PEP node. This method applies an additional decoding step to the public keys in
the bundle. Pass the same structure as the constructor expects. |
def update_state(self, state):
if not isinstance(state, MachineState):
raise TypeError("state can only be an instance of type MachineState")
self._call("updateState",
in_p=[state]) | Updates the VM state.
This operation will also update the settings file with the correct
information about the saved state file and delete this file from disk
when appropriate.
in state of type :class:`MachineState` |
def full_rule(self):
return join(self.bp_prefix, self.rule, trailing_slash=self.rule.endswith('/')) | The full url rule for this route, including any blueprint prefix. |
def collect(self):
def traverse(d, metric_name=''):
for key, value in d.iteritems():
if isinstance(value, dict):
if metric_name == '':
metric_name_next = key
else:
metric_name_next = metric_name + '.' + key
traverse(value, metric_name_next)
else:
metric_name_finished = metric_name + '.' + key
self.publish_gauge(
name=metric_name_finished,
value=value,
precision=1
)
md_state = self._parse_mdstat()
traverse(md_state, '') | Publish all mdstat metrics. |
def convert_to_str(d):
d2 = {}
for k, v in d.items():
k = str(k)
if type(v) in [list, tuple]:
d2[k] = [str(a) for a in v]
elif type(v) is dict:
d2[k] = convert_to_str(v)
else:
d2[k] = str(v)
return d2 | Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied. |
def add_force_flaky_options(add_option):
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
) | Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable` |
def get_calling_namespaces():
try: 1//0
except ZeroDivisionError:
frame = sys.exc_info()[2].tb_frame.f_back
while frame.f_globals.get("__name__") == __name__:
frame = frame.f_back
return frame.f_locals, frame.f_globals | Return the locals and globals for the function that called
into this module in the current call stack. |
def all(self, archived=False, limit=None, page=None):
path = partial(_path, self.adapter)
if not archived:
path = _path(self.adapter)
else:
path = _path(self.adapter, 'archived')
return self._get(path, limit=limit, page=page) | get all adapter data. |
def render_crispy_form(form, helper=None, context=None):
from crispy_forms.templatetags.crispy_forms_tags import CrispyFormNode
if helper is not None:
node = CrispyFormNode('form', 'helper')
else:
node = CrispyFormNode('form', None)
node_context = Context(context)
node_context.update({
'form': form,
'helper': helper
})
return node.render(node_context) | Renders a form and returns its HTML output.
This function wraps the template logic in a function easy to use in a Django view. |
def streamify(self, state, frame):
pieces = frame.split(self.prefix)
return '%s%s%s%s%s' % (self.prefix, self.begin,
(self.prefix + self.nop).join(pieces),
self.prefix, self.end) | Prepare frame for output as a byte-stuffed stream. |
def _stop(self):
self._pause()
self._cmds_q.put(("stop",))
try:
self._recorder.terminate()
except Exception:
pass
self._recording = False | Stops recording. Returns all recorded data and their timestamps. Destroys recorder process. |
def get_connection(self, command, args=()):
command = command.upper().strip()
is_pubsub = command in _PUBSUB_COMMANDS
if is_pubsub and self._pubsub_conn:
if not self._pubsub_conn.closed:
return self._pubsub_conn, self._pubsub_conn.address
self._pubsub_conn = None
for i in range(self.freesize):
conn = self._pool[0]
self._pool.rotate(1)
if conn.closed:
continue
if conn.in_pubsub:
continue
if is_pubsub:
self._pubsub_conn = conn
self._pool.remove(conn)
self._used.add(conn)
return conn, conn.address
return None, self._address | Get free connection from pool.
Returns connection. |
def setup_benchbuild():
LOG.debug("Setting up Benchbuild...")
venv_dir = local.path("/benchbuild")
prefixes = CFG["container"]["prefixes"].value
prefixes.append(venv_dir)
CFG["container"]["prefixes"] = prefixes
src_dir = str(CFG["source_dir"])
have_src = src_dir is not None
if have_src:
__mount_source(src_dir)
benchbuild = find_benchbuild()
if benchbuild and not requires_update(benchbuild):
if have_src:
__upgrade_from_source(venv_dir, with_deps=False)
return
setup_virtualenv(venv_dir)
if have_src:
__upgrade_from_source(venv_dir)
else:
__upgrade_from_pip(venv_dir) | Setup benchbuild inside a container.
This will query a for an existing installation of benchbuild and
try to upgrade it to the latest version, if possible. |
def api_headers_tween_factory(handler, registry):
def api_headers_tween(request):
response = handler(request)
set_version(request, response)
set_req_guid(request, response)
return response
return api_headers_tween | This tween provides necessary API headers |
def unrecord(plugin_or_specs, filename):
plugin, filename = normalize_migration(plugin_or_specs, filename)
migration = get_migration(plugin, filename)
if migration:
log.info('Removing migration %s:%s', plugin, filename)
db = get_db()
db.eval(UNRECORD_WRAPPER, migration['_id'])
else:
log.error('Migration not found %s:%s', plugin, filename) | Remove a database migration record.
\b
A record can be expressed with the following syntaxes:
- plugin filename
- plugin fliename.js
- plugin:filename
- plugin:fliename.js |
def load_diagram_from_csv(filepath, bpmn_diagram):
sequence_flows = bpmn_diagram.sequence_flows
process_elements_dict = bpmn_diagram.process_elements
diagram_attributes = bpmn_diagram.diagram_attributes
plane_attributes = bpmn_diagram.plane_attributes
process_dict = BpmnDiagramGraphCSVImport.import_csv_file_as_dict(filepath)
BpmnDiagramGraphCSVImport.populate_diagram_elements_dict(diagram_attributes)
BpmnDiagramGraphCSVImport.populate_process_elements_dict(process_elements_dict, process_dict)
BpmnDiagramGraphCSVImport.populate_plane_elements_dict(plane_attributes)
BpmnDiagramGraphCSVImport.import_nodes(process_dict, bpmn_diagram, sequence_flows)
BpmnDiagramGraphCSVImport.representation_adjustment(process_dict, bpmn_diagram, sequence_flows) | Reads an CSV file from given filepath and maps it into inner representation of BPMN diagram.
Returns an instance of BPMNDiagramGraph class.
:param filepath: string with output filepath,
:param bpmn_diagram: an instance of BpmnDiagramGraph class. |
def frontiers_style():
inchpercm = 2.54
frontierswidth=8.5
textsize = 5
titlesize = 7
plt.rcdefaults()
plt.rcParams.update({
'figure.figsize' : [frontierswidth/inchpercm, frontierswidth/inchpercm],
'figure.dpi' : 160,
'xtick.labelsize' : textsize,
'ytick.labelsize' : textsize,
'font.size' : textsize,
'axes.labelsize' : textsize,
'axes.titlesize' : titlesize,
'axes.linewidth': 0.75,
'lines.linewidth': 0.75,
'legend.fontsize' : textsize,
})
return None | Figure styles for frontiers |
def absolute_urls(html):
from bs4 import BeautifulSoup
from yacms.core.request import current_request
request = current_request()
if request is not None:
dom = BeautifulSoup(html, "html.parser")
for tag, attr in ABSOLUTE_URL_TAGS.items():
for node in dom.findAll(tag):
url = node.get(attr, "")
if url:
node[attr] = request.build_absolute_uri(url)
html = str(dom)
return html | Converts relative URLs into absolute URLs. Used for RSS feeds to
provide more complete HTML for item descriptions, but could also
be used as a general richtext filter. |
def realtime_comment_classifier(sender, instance, created, **kwargs):
if created:
moderator_settings = getattr(settings, 'MODERATOR', None)
if moderator_settings:
if 'REALTIME_CLASSIFICATION' in moderator_settings:
if not moderator_settings['REALTIME_CLASSIFICATION']:
return
if not getattr(instance, 'is_reply_comment', False):
from moderator.utils import classify_comment
classify_comment(instance) | Classifies a comment after it has been created.
This behaviour is configurable by the REALTIME_CLASSIFICATION MODERATOR,
default behaviour is to classify(True). |
def assoc(objects, sitecol, assoc_dist, mode, asset_refs=()):
if isinstance(objects, numpy.ndarray) or hasattr(objects, 'lons'):
return _GeographicObjects(objects).assoc(sitecol, assoc_dist, mode)
else:
return _GeographicObjects(sitecol).assoc2(
objects, assoc_dist, mode, asset_refs) | Associate geographic objects to a site collection.
:param objects:
something with .lons, .lats or ['lon'] ['lat'], or a list of lists
of objects with a .location attribute (i.e. assets_by_site)
:param assoc_dist:
the maximum distance for association
:param mode:
if 'strict' fail if at least one site is not associated
if 'error' fail if all sites are not associated
:returns: (filtered site collection, filtered objects) |
def prj_remove_user(self, *args, **kwargs):
if not self.cur_prj:
return
i = self.prj_user_tablev.currentIndex()
item = i.internalPointer()
if item:
user = item.internal_data()
log.debug("Removing user %s.", user.username)
item.set_parent(None)
self.cur_prj.users.remove(user) | Remove the, in the user table view selected, user.
:returns: None
:rtype: None
:raises: None |
def get_bpf_pointer(tcpdump_lines):
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
return bpf_program(size, bip) | Create a BPF Pointer for TCPDump filter |
def wait(self, timeout=None):
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter()
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
self._release()
else:
self._wait(timeout)
return index
finally:
self._count -= 1
self._exit() | Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'. |
def save_xml(self, doc, element):
super(TargetExecutionContext, self).save_xml(doc, element)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'id', self.id) | Save this target execution context into an xml.dom.Element
object. |
def request_slot(client,
service: JID,
filename: str,
size: int,
content_type: str):
payload = Request(filename, size, content_type)
return (yield from client.send(IQ(
type_=IQType.GET,
to=service,
payload=payload
))) | Request an HTTP upload slot.
:param client: The client to request the slot with.
:type client: :class:`aioxmpp.Client`
:param service: Address of the HTTP upload service.
:type service: :class:`~aioxmpp.JID`
:param filename: Name of the file (without path), may be used by the server
to generate the URL.
:type filename: :class:`str`
:param size: Size of the file in bytes
:type size: :class:`int`
:param content_type: The MIME type of the file
:type content_type: :class:`str`
:return: The assigned upload slot.
:rtype: :class:`.xso.Slot`
Sends a :xep:`363` slot request to the XMPP service to obtain HTTP
PUT and GET URLs for a file upload.
The upload slot is returned as a :class:`~.xso.Slot` object. |
def _handle_calls(self, alts, format_, format_str, arr):
if format_str not in self._format_cache:
self._format_cache[format_str] = list(map(self.header.get_format_field_info, format_))
calls = []
for sample, raw_data in zip(self.samples.names, arr[9:]):
if self.samples.is_parsed(sample):
data = self._parse_calls_data(format_, self._format_cache[format_str], raw_data)
call = record.Call(sample, data)
self._format_checker.run(call, len(alts))
self._check_filters(call.data.get("FT"), "FORMAT/FT", call.sample)
calls.append(call)
else:
calls.append(record.UnparsedCall(sample, raw_data))
return calls | Handle FORMAT and calls columns, factored out of parse_line |
def check(self, action, page=None, lang=None, method=None):
if self.user.is_superuser:
return True
if action == 'change':
return self.has_change_permission(page, lang, method)
if action == 'delete':
if not self.delete_page():
return False
return True
if action == 'add':
if not self.add_page():
return False
return True
if action == 'freeze':
perm = self.user.has_perm('pages.can_freeze')
if perm:
return True
return False
if action == 'publish':
perm = self.user.has_perm('pages.can_publish')
if perm:
return True
return False
return False | Return ``True`` if the current user has permission on the page. |
def _get_cl_dependency_code(self):
code = ''
for d in self._dependencies:
code += d.get_cl_code() + "\n"
return code | Get the CL code for all the CL code for all the dependencies.
Returns:
str: The CL code with the actual code. |
def warning(self):
signals, docs, overs = self.expandedStim()
if np.any(np.array(overs) > 0):
msg = 'Stimuli in this test are over the maximum allowable \
voltage output. They will be rescaled with a maximum \
undesired attenuation of {:.2f}dB.'.format(np.amax(overs))
return msg
return 0 | Checks Stimulus for any warning conditions
:returns: str -- warning message, if any, 0 otherwise |
def openall(self, title=None):
spreadsheet_files = self.list_spreadsheet_files()
return [
Spreadsheet(self, dict(title=x['name'], **x))
for x in spreadsheet_files
] | Opens all available spreadsheets.
:param title: (optional) If specified can be used to filter
spreadsheets by title.
:type title: str
:returns: a list of :class:`~gspread.models.Spreadsheet` instances. |
def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel):
if kwargs is _sentinel:
kwargs = {}
event = Event(time, priority, action, argument, kwargs)
with self._lock:
heapq.heappush(self._queue, event)
return event | Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary. |
def from_json(cls, json_data):
new_instance = cls()
for field_name, field_obj in cls._get_fields().items():
if isinstance(field_obj, NestedDocumentField):
if field_name in json_data:
nested_field = field_obj.__get__(new_instance, new_instance.__class__)
if not nested_field:
nested_field = field_obj.nested_klass()
nested_document = nested_field.from_json(json_data[field_name])
field_obj.__set__(new_instance, nested_document)
elif isinstance(field_obj, BaseField):
if field_name in json_data:
value = field_obj.from_json(json_data[field_name])
field_obj.__set__(new_instance, value)
else:
continue
return new_instance | Converts json data to a new document instance |
def min_weighted_vertex_cover(G, weight=None, sampler=None, **sampler_args):
indep_nodes = set(maximum_weighted_independent_set(G, weight, sampler, **sampler_args))
return [v for v in G if v not in indep_nodes] | Returns an approximate minimum weighted vertex cover.
Defines a QUBO with ground states corresponding to a minimum weighted
vertex cover and uses the sampler to sample from it.
A vertex cover is a set of vertices such that each edge of the graph
is incident with at least one vertex in the set. A minimum weighted
vertex cover is the vertex cover of minimum total node weight.
Parameters
----------
G : NetworkX graph
weight : string, optional (default None)
If None, every node has equal weight. If a string, use this node
attribute as the node weight. A node without this attribute is
assumed to have max weight.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
vertex_cover : list
List of nodes that the form a the minimum weighted vertex cover, as
determined by the given sampler.
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
https://en.wikipedia.org/wiki/Vertex_cover
https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization
References
----------
Based on the formulation presented in [AL]_ |
def _fit_m(D, a0, logp, tol=1e-7, maxiter=1000):
N,K = D.shape
s = a0.sum()
for i in xrange(maxiter):
m = a0 / s
a1 = _ipsi(logp + (m*(psi(a0) - logp)).sum())
a1 = a1/a1.sum() * s
if norm(a1 - a0) < tol:
return a1
a0 = a1
raise Exception('Failed to converge after {} iterations, s is {}'
.format(maxiter, s)) | With fixed precision s, maximize mean m |
def isosurface_from_data(data, isolevel, origin, spacing):
spacing = np.array(extent/resolution)
if isolevel >= 0:
triangles = marching_cubes(data, isolevel)
else:
triangles = marching_cubes(-data, -isolevel)
faces = []
verts = []
for i, t in enumerate(triangles):
faces.append([i * 3, i * 3 +1, i * 3 + 2])
verts.extend(t)
faces = np.array(faces)
verts = origin + spacing/2 + np.array(verts)*spacing
return verts, faces | Small wrapper to get directly vertices and faces to feed into programs |
def get_guts(self, last_build, missing='missing or bad'):
try:
data = _load_data(self.out)
except:
logger.info("building because %s %s", os.path.basename(self.out), missing)
return None
if len(data) != len(self.GUTS):
logger.info("building because %s is bad", self.outnm)
return None
for i, (attr, func) in enumerate(self.GUTS):
if func is None:
continue
if func(attr, data[i], getattr(self, attr), last_build):
return None
return data | returns None if guts have changed |
def itemgetter_handle(tokens):
internal_assert(len(tokens) == 2, "invalid implicit itemgetter args", tokens)
op, args = tokens
if op == "[":
return "_coconut.operator.itemgetter(" + args + ")"
elif op == "$[":
return "_coconut.functools.partial(_coconut_igetitem, index=" + args + ")"
else:
raise CoconutInternalException("invalid implicit itemgetter type", op) | Process implicit itemgetter partials. |
def plot(self, xmin, xmax, idx_input=0, idx_output=0, points=100,
**kwargs) -> None:
for toy, ann_ in self:
ann_.plot(xmin, xmax,
idx_input=idx_input, idx_output=idx_output,
points=points,
label=str(toy),
**kwargs)
pyplot.legend() | Call method |anntools.ANN.plot| of all |anntools.ANN| objects
handled by the actual |anntools.SeasonalANN| object. |
def release(self, connection):
"Releases the connection back to the pool."
self._checkpid()
if connection.pid != self.pid:
return
try:
self.pool.put_nowait(connection)
except Full:
pass | Releases the connection back to the pool. |
def generated_passphrase_entropy(self) -> float:
if (
self.amount_w is None
or self.amount_n is None
or not self.wordlist
):
raise ValueError("Can't calculate the passphrase entropy: "
"wordlist is empty or amount_n or "
"amount_w isn't set")
if self.amount_n == 0 and self.amount_w == 0:
return 0.0
entropy_n = self.entropy_bits((self.randnum_min, self.randnum_max))
entropy_w = self._wordlist_entropy_bits \
if self._wordlist_entropy_bits \
else self.entropy_bits(self.wordlist)
return calc_passphrase_entropy(
self.amount_w,
entropy_w,
entropy_n,
self.amount_n
) | Calculate the entropy of a passphrase that would be generated. |
def less(environment, opts):
require_extra_image(LESSC_IMAGE)
print 'Converting .less files to .css...'
for log in environment.compile_less():
print log | Recompiles less files in an environment.
Usage:
datacats less [ENVIRONMENT]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.' |
def panic(self, *args):
self._err("fatal", *args)
if self.test_errs_mode is False:
sys.exit(1) | Creates a fatal error and exit |
def iter_work_specs(self, limit=None, start=None):
count = 0
ws_list, start = self.list_work_specs(limit, start)
while True:
for name_spec in ws_list:
yield name_spec[1]
count += 1
if (limit is not None) and (count >= limit):
break
if not start:
break
if limit is not None:
limit -= count
ws_list, start = self.list_work_specs(limit, start) | yield work spec dicts |
def set_defaults(self):
rfields, sfields = self.get_write_fields()
for f in rfields:
self.set_default(f)
for f in sfields:
self.set_default(f) | Set defaults for fields needed to write the header if they have
defaults.
Notes
-----
- This is NOT called by `rdheader`. It is only automatically
called by the gateway `wrsamp` for convenience.
- This is also not called by `wrheader` since it is supposed to
be an explicit function.
- This is not responsible for initializing the attributes. That
is done by the constructor.
See also `set_p_features` and `set_d_features`. |
def gregorian_to_julian(day):
before_march = 1 if day.month < MARCH else 0
month_index = day.month + MONTHS_PER_YEAR * before_march - MARCH
years_elapsed = day.year - JULIAN_START_YEAR - before_march
total_days_in_previous_months = (153 * month_index + 2) // 5
total_days_in_previous_years = 365 * years_elapsed
total_leap_days = (
(years_elapsed // 4) -
(years_elapsed // 100) +
(years_elapsed // 400)
)
return sum([
day.day,
total_days_in_previous_months,
total_days_in_previous_years,
total_leap_days,
-32045,
]) | Convert a datetime.date object to its corresponding Julian day.
:param day: The datetime.date to convert to a Julian day
:returns: A Julian day, as an integer |
async def enable_analog_reporting(self, pin):
command = [PrivateConstants.REPORT_ANALOG + pin,
PrivateConstants.REPORTING_ENABLE]
await self._send_command(command) | Enables analog reporting. By turning reporting on for a single pin,
:param pin: Analog pin number. For example for A0, the number is 0.
:returns: No return value |
def get_module_classes(node):
return [
child
for child in ast.walk(node)
if isinstance(child, ast.ClassDef)
] | Return classes associated with a given module |
def write8(self, offset, value):
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value) | Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds. |
def delete_job(name=None):
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if not job_exists(name):
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
try:
server.delete_job(name)
except jenkins.JenkinsException as err:
raise CommandExecutionError(
'Encountered error deleting job \'{0}\': {1}'.format(name, err)
)
return True | Return true is job is deleted successfully.
:param name: The name of the job to delete.
:return: Return true if job is deleted successfully.
CLI Example:
.. code-block:: bash
salt '*' jenkins.delete_job jobname |
def iters(cls, batch_size=32, bptt_len=35, device=0, root='.data',
vectors=None, **kwargs):
TEXT = data.Field()
train, val, test = cls.splits(TEXT, root=root, **kwargs)
TEXT.build_vocab(train, vectors=vectors)
return data.BPTTIterator.splits(
(train, val, test), batch_size=batch_size, bptt_len=bptt_len,
device=device) | Create iterator objects for splits of the WikiText-2 dataset.
This is the simplest way to use the dataset, and assumes common
defaults for field, vocabulary, and iterator parameters.
Arguments:
batch_size: Batch size.
bptt_len: Length of sequences for backpropagation through time.
device: Device to create batches on. Use -1 for CPU and None for
the currently active GPU device.
root: The root directory that the dataset's zip archive will be
expanded into; therefore the directory in whose wikitext-2
subdirectory the data files will be stored.
wv_dir, wv_type, wv_dim: Passed to the Vocab constructor for the
text field. The word vectors are accessible as
train.dataset.fields['text'].vocab.vectors.
Remaining keyword arguments: Passed to the splits method. |
def get_postgame(self):
if self._cache['postgame'] is not None:
return self._cache['postgame']
self._handle.seek(0)
try:
self._cache['postgame'] = parse_postgame(self._handle, self.size)
return self._cache['postgame']
except IOError:
self._cache['postgame'] = False
return None
finally:
self._handle.seek(self.body_position) | Get postgame structure. |
def versions(self):
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions | Return all version changes. |
def applyReq(self, request: Request, cons_time: int):
self.execute_hook(NodeHooks.PRE_REQUEST_APPLICATION, request=request,
cons_time=cons_time)
req_handler = self.get_req_handler(txn_type=request.operation[TXN_TYPE])
seq_no, txn = req_handler.apply(request, cons_time)
ledger_id = self.ledger_id_for_request(request)
self.execute_hook(NodeHooks.POST_REQUEST_APPLICATION, request=request,
cons_time=cons_time, ledger_id=ledger_id,
seq_no=seq_no, txn=txn) | Apply request to appropriate ledger and state. `cons_time` is the
UTC epoch at which consensus was reached. |
def nova_services_up(self):
required = set(['nova-conductor', 'nova-cert', 'nova-scheduler',
'nova-compute'])
try:
services = self._nclient.services.list()
except Exception as e:
LOG.error('Failure determining running Nova services: %s', e)
return False
return not bool(required.difference(
[service.binary for service in services
if service.status == 'enabled' and service.state == 'up'])) | Checks if required Nova services are up and running.
returns: True if all needed Nova services are up, False otherwise |
def warn(self, cmd, desc=''):
return self._label_desc(cmd, desc, self.warn_color) | Style for warning message. |
def triangle_area(e1, e2, e3):
e1_length = numpy.sqrt(numpy.sum(e1 * e1, axis=-1))
e2_length = numpy.sqrt(numpy.sum(e2 * e2, axis=-1))
e3_length = numpy.sqrt(numpy.sum(e3 * e3, axis=-1))
s = (e1_length + e2_length + e3_length) / 2.0
return numpy.sqrt(s * (s - e1_length) * (s - e2_length) * (s - e3_length)) | Get the area of triangle formed by three vectors.
Parameters are three three-dimensional numpy arrays representing
vectors of triangle's edges in Cartesian space.
:returns:
Float number, the area of the triangle in squared units of coordinates,
or numpy array of shape of edges with one dimension less.
Uses Heron formula, see http://mathworld.wolfram.com/HeronsFormula.html. |
def _temp_filename(contents):
fp = tempfile.NamedTemporaryFile(
prefix='codequalitytmp', delete=False)
name = fp.name
fp.write(contents)
fp.close()
_files_to_cleanup.append(name)
return name | Make a temporary file with `contents`.
The file will be cleaned up on exit. |
def yield_for_all_futures(result):
while True:
if result is None:
break
try:
future = gen.convert_yielded(result)
except gen.BadYieldError:
break
else:
result = yield future
raise gen.Return(result) | Converts result into a Future by collapsing any futures inside result.
If result is a Future we yield until it's done, then if the value inside
the Future is another Future we yield until it's done as well, and so on. |
def collections(self, values):
if self.cache:
self.cache.set(
self.app.config['COLLECTIONS_CACHE_KEY'], values) | Set list of collections. |
def _bse_cli_list_ref_formats(args):
all_refformats = api.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return '\n'.join(liststr) | Handles the list-ref-formats subcommand |
def dump_part(part, total_segments=None):
try:
connection = Connection(host=config['host'], region=config['region'])
filename = ".".join([config['table_name'], str(part), "dump"])
if config['compress']:
opener = gzip.GzipFile
filename += ".gz"
else:
opener = open
dumper = BatchDumper(connection, config['table_name'], config['capacity'], part, total_segments)
with opener(filename, 'w') as output:
while dumper.has_items:
items = dumper.get_items()
for item in items:
output.write(json.dumps(item))
output.write("\n")
output.flush()
config['queue'].put(len(items))
config['queue'].put('complete')
except Exception as e:
print('Unhandled exception: {0}'.format(e)) | 'part' may be the hash_key if we are dumping just a few hash_keys - else
it will be the segment number |
def all_editable_exts():
exts = []
for (language, extensions) in languages.ALL_LANGUAGES.items():
exts.extend(list(extensions))
return ['.' + ext for ext in exts] | Return a list of all editable extensions |
def is_running(self) -> bool:
return (
self._has_started and
self.is_alive() or
self.completed_at is None or
(datetime.utcnow() - self.completed_at).total_seconds() < 0.5
) | Specifies whether or not the thread is running |
def create_snapshot(self, snapshot_size, snapshot_suffix):
size_extent = math.ceil(self.extents_count() * snapshot_size)
size_kb = self.volume_group().extent_size() * size_extent
snapshot_name = self.volume_name() + snapshot_suffix
lvcreate_cmd = ['sudo'] if self.lvm_command().sudo() is True else []
lvcreate_cmd.extend([
'lvcreate', '-L', '%iK' % size_kb, '-s', '-n', snapshot_name, '-p', 'r', self.volume_path()
])
subprocess.check_output(lvcreate_cmd, timeout=self.__class__.__lvm_snapshot_create_cmd_timeout__)
return WLogicalVolume(self.volume_path() + snapshot_suffix, sudo=self.lvm_command().sudo()) | Create snapshot for this logical volume.
:param snapshot_size: size of newly created snapshot volume. This size is a fraction of the source \
logical volume space (of this logical volume)
:param snapshot_suffix: suffix for logical volume name (base part is the same as the original volume \
name)
:return: WLogicalVolume |
def unsubscribe_all(self):
topics = list(self.topics.keys())
for topic in topics:
self.unsubscribe(topic) | Unsubscribe from all topics. |
def transpose_list(list_of_dicts):
res = {}
for d in list_of_dicts:
for k, v in d.items():
if k in res:
res[k].append(v)
else:
res[k] = [v]
return res | Transpose a list of dicts to a dict of lists
:param list_of_dicts: to transpose, as in the output from a parse call
:return: Dict of lists |
def firmware_version(self):
buf = (ctypes.c_char * self.MAX_BUF_SIZE)()
self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)
return ctypes.string_at(buf).decode() | Returns a firmware identification string of the connected J-Link.
It consists of the following:
- Product Name (e.g. J-Link)
- The string: compiled
- Compile data and time.
- Optional additional information.
Args:
self (JLink): the ``JLink`` instance
Returns:
Firmware identification string. |
def _get_num_pass(data, n):
numpass = tz.get_in(["config", "algorithm", "ensemble", "numpass"], data)
if numpass:
return int(numpass)
trusted_pct = tz.get_in(["config", "algorithm", "ensemble", "trusted_pct"], data)
if trusted_pct:
return int(math.ceil(float(trusted_pct) * n))
return 2 | Calculate the number of samples needed to pass ensemble calling. |
def _create_ip_report(self):
try:
ip_report_name = os.path.join(self.report_dir, "%s-ip.csv" % self.session)
self.logger.con_out('Creating IP Report - %s', ip_report_name)
ip_report = open(ip_report_name, 'wt')
ip_report.write('Obfuscated IP,Original IP\n')
for k,v in self.ip_db.items():
ip_report.write('%s,%s\n' %(self._int2ip(k),self._int2ip(v)))
ip_report.close()
self.logger.info('Completed IP Report')
self.ip_report = ip_report_name
except Exception as e:
self.logger.exception(e)
raise Exception('CreateReport Error: Error Creating IP Report') | this will take the obfuscated ip and hostname databases and output csv files |
def namespace_for_prefix(self, prefix):
try:
ni = self.__lookup_prefix(prefix)
except PrefixNotFoundError:
return None
else:
return ni.uri | Get the namespace the given prefix maps to.
Args:
prefix (str): The prefix
Returns:
str: The namespace, or None if the prefix isn't mapped to
anything in this set. |
def generate(env):
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=C rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm') | Add Builders and construction variables for rpm to an Environment. |
def info(self, account_id, resource_id, parent_id):
resource = self.record(account_id, resource_id)
if resource is None and not parent_id:
return {'ResourceId': resource_id,
'LockStatus': self.STATE_UNLOCKED}
elif resource is None:
parent = self.record(account_id, parent_id)
if parent is None:
return {'ResourceId': resource_id,
'ParentId': parent_id,
'LockStatus': self.STATE_UNLOCKED}
parent['ResourceId'] = resource_id
parent['ParentId'] = parent_id
parent['LockType'] = 'parent'
return parent
if resource['ResourceId'].startswith('vpc-'):
return resource
if resource['ResourceId'].startswith('sg-'):
return resource | Check if a resource is locked.
If a resource has an explicit status we use that, else
we defer to the parent resource lock status. |
def upload(self, filename, **kwargs):
kwargs['index'] = self.name
path = 'data/inputs/oneshot'
self.service.post(path, name=filename, **kwargs)
return self | Uploads a file for immediate indexing.
**Note**: The file must be locally accessible from the server.
:param filename: The name of the file to upload. The file can be a
plain, compressed, or archived file.
:type filename: ``string``
:param kwargs: Additional arguments (optional). For more about the
available parameters, see `Index parameters <http://dev.splunk.com/view/SP-CAAAEE6#indexparams>`_ on Splunk Developer Portal.
:type kwargs: ``dict``
:return: The :class:`Index`. |
def _require_bucket(self, bucket_name):
if not self.exists(bucket_name) and not self.claim_bucket(bucket_name):
raise OFSException("Invalid bucket: %s" % bucket_name)
return self._get_bucket(bucket_name) | Also try to create the bucket. |
def rescan_images(registry):
with Session() as session:
try:
result = session.Image.rescanImages(registry)
except Exception as e:
print_error(e)
sys.exit(1)
if result['ok']:
print("kernel image metadata updated")
else:
print("rescanning failed: {0}".format(result['msg'])) | Update the kernel image metadata from all configured docker registries. |
def auto_batch_size(sequence_length,
mesh_shape,
layout_rules,
tokens_per_split=2048):
num_splits = mtf.tensor_dim_to_mesh_dim_size(
layout_rules, mesh_shape, mtf.Dimension("batch", 0))
ret = max(1, tokens_per_split // sequence_length) * num_splits
tf.logging.info(
"AUTO_BATCH_SIZE tokens_per_split=%s num_splits=%s"
" sequence_length=%s batch_size=%s"
% (tokens_per_split, num_splits, sequence_length, ret))
return ret | Automatically compute batch size.
Args:
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
tokens_per_split: an integer
Returns:
an integer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.