code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def token(name):
def wrap(f):
tokenizers.append((name, f))
return f
return wrap | Marker for a token
:param str name: Name of tokenizer |
def complement(color):
r
(r, g, b) = parse_color(color)
gcolor = grapefruit.Color((r / 255.0, g / 255.0, b / 255.0))
complement = gcolor.ComplementaryColor()
(r, g, b) = [int(c * 255.0) for c in complement.rgb]
return (r, g, b) | r"""Calculates polar opposite of color
This isn't guaranteed to look good >_> (especially with brighter, higher
intensity colors.) This will be replaced with a formula that produces
better looking colors in the future.
>>> complement('red')
(0, 255, 76)
>>> complement((0, 100, 175))
(175, 101, 0) |
def all(self, value, pos=None):
value = bool(value)
length = self.len
if pos is None:
pos = xrange(self.len)
for p in pos:
if p < 0:
p += length
if not 0 <= p < length:
raise IndexError("Bit position {0} out of range.".format(p))
if not self._datastore.getbit(p) is value:
return False
return True | Return True if one or many bits are all set to value.
value -- If value is True then checks for bits set to 1, otherwise
checks for bits set to 0.
pos -- An iterable of bit positions. Negative numbers are treated in
the same way as slice indices. Defaults to the whole bitstring. |
def toggle_bit(self, position: int):
if position > (self._bit_width - 1):
raise ValueError('position greater than the bit width')
self._value ^= (1 << position)
self._text_update() | Toggles the value at position
:param position: integer between 0 and 7, inclusive
:return: None |
def get_tmp_file(dir=None):
"Create and return a tmp filename, optionally at a specific path. `os.remove` when done with it."
with tempfile.NamedTemporaryFile(delete=False, dir=dir) as f: return f.name | Create and return a tmp filename, optionally at a specific path. `os.remove` when done with it. |
def create(cls, zmq_context, endpoint):
socket = zmq_context.socket(zmq.REQ)
socket.connect(endpoint)
return cls(socket) | Create new client transport.
Instead of creating the socket yourself, you can call this function and
merely pass the :py:class:`zmq.core.context.Context` instance.
By passing a context imported from :py:mod:`zmq.green`, you can use
green (gevent) 0mq sockets as well.
:param zmq_context: A 0mq context.
:param endpoint: The endpoint the server is bound to. |
def wait_all(jobs, timeout=None):
return Job._wait(jobs, timeout, concurrent.futures.ALL_COMPLETED) | Return when at all of the specified jobs have completed or timeout expires.
Args:
jobs: a Job or list of Jobs to wait on.
timeout: a timeout in seconds to wait for. None (the default) means no timeout.
Returns:
A list of the jobs that have now completed or None if there were no jobs. |
async def cursor_async(self):
await self.connect_async(loop=self._loop)
if self.transaction_depth_async() > 0:
conn = self.transaction_conn_async()
else:
conn = None
try:
return (await self._async_conn.cursor(conn=conn))
except:
await self.close_async()
raise | Acquire async cursor. |
def on_excepthandler(self, node):
return (self.run(node.type), node.name, node.body) | Exception handler... |
def rename(self, new_name, range=None):
self.log.debug('rename: in')
if not new_name:
new_name = self.editor.ask_input("Rename to:")
self.editor.write(noautocmd=True)
b, e = self.editor.word_under_cursor_pos()
current_file = self.editor.path()
self.editor.raw_message(current_file)
self.send_refactor_request(
"RefactorReq",
{
"typehint": "RenameRefactorDesc",
"newName": new_name,
"start": self.get_position(b[0], b[1]),
"end": self.get_position(e[0], e[1]) + 1,
"file": current_file,
},
{"interactive": False}
) | Request a rename to the server. |
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
created_models = []
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
return model_objs_updated, created_models | Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple |
def _initialize_application():
RuntimeGlobals.application = umbra.ui.common.get_application_instance()
umbra.ui.common.set_window_default_icon(RuntimeGlobals.application)
RuntimeGlobals.reporter = umbra.reporter.install_exception_reporter() | Initializes the Application. |
def delete(self, port, qos_policy=None):
LOG.info("Deleting QoS policy %(qos_policy)s on port %(port)s",
dict(qos_policy=qos_policy, port=port))
self._utils.remove_port_qos_rule(port["port_id"]) | Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port. |
def get_attention(config: AttentionConfig, max_seq_len: int, prefix: str = C.ATTENTION_PREFIX) -> 'Attention':
att_cls = Attention.get_attention_cls(config.type)
params = config.__dict__.copy()
params.pop('_frozen')
params['max_seq_len'] = max_seq_len
params['prefix'] = prefix
return _instantiate(att_cls, params) | Returns an Attention instance based on attention_type.
:param config: Attention configuration.
:param max_seq_len: Maximum length of source sequences.
:param prefix: Name prefix.
:return: Instance of Attention. |
def imagedatadict_to_ndarray(imdict):
arr = imdict['Data']
im = None
if isinstance(arr, parse_dm3.array.array):
im = numpy.asarray(arr, dtype=arr.typecode)
elif isinstance(arr, parse_dm3.structarray):
t = tuple(arr.typecodes)
im = numpy.frombuffer(
arr.raw_data,
dtype=structarray_to_np_map[t])
assert dm_image_dtypes[imdict["DataType"]][1] == im.dtype
assert imdict['PixelDepth'] == im.dtype.itemsize
im = im.reshape(imdict['Dimensions'][::-1])
if imdict["DataType"] == 23:
im = im.view(numpy.uint8).reshape(im.shape + (-1, ))[..., :-1]
return im | Converts the ImageData dictionary, imdict, to an nd image. |
def read_feather(path, columns=None, use_threads=True):
feather, pyarrow = _try_import()
path = _stringify_path(path)
if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):
int_use_threads = int(use_threads)
if int_use_threads < 1:
int_use_threads = 1
return feather.read_feather(path, columns=columns,
nthreads=int_use_threads)
return feather.read_feather(path, columns=columns,
use_threads=bool(use_threads)) | Load a feather-format object from the file path
.. versionadded 0.20.0
Parameters
----------
path : string file path, or file-like object
columns : sequence, default None
If not provided, all columns are read
.. versionadded 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
.. versionadded 0.21.0
.. deprecated 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads
.. versionadded 0.24.0
Returns
-------
type of object stored in file |
def _remove_data_dir_path(self, inp=None):
if inp is not None:
split_str = os.path.join(self.data_path, '')
return inp.apply(lambda x: x.split(split_str)[-1]) | Remove the data directory path from filenames |
def rand_block(minimum, scale, maximum=1):
t = min(rand_pareto_float(minimum, scale), maximum)
time.sleep(t)
return t | block current thread at random pareto time ``minimum < block < 15`` and return the sleep time ``seconds``
:param minimum:
:type minimum:
:param scale:
:type scale:
:param slow_mode: a tuple e.g.(2, 5)
:type slow_mode: tuple
:return: |
def has_chosen(state, correct, msgs):
ctxt = {}
exec(state.student_code, globals(), ctxt)
sel_indx = ctxt["selected_option"]
if sel_indx != correct:
state.report(Feedback(msgs[sel_indx - 1]))
else:
state.reporter.success_msg = msgs[correct - 1]
return state | Verify exercises of the type MultipleChoiceExercise
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
correct: index of correct option, where 1 is the first option.
msgs : list of feedback messages corresponding to each option.
:Example:
The following SCT is for a multiple choice exercise with 2 options, the first
of which is correct.::
Ex().has_chosen(1, ['Correct!', 'Incorrect. Try again!']) |
def main():
if "--help" in sys.argv or "-h" in sys.argv or len(sys.argv) > 3:
raise SystemExit(__doc__)
try:
discordian_calendar(*sys.argv[1:])
except ValueError as error:
raise SystemExit("Error: {}".format("\n".join(error.args))) | Command line entry point for dcal. |
def from_taxtable(cls, taxtable_fp):
r = csv.reader(taxtable_fp)
headers = next(r)
rows = (collections.OrderedDict(list(zip(headers, i))) for i in r)
row = next(rows)
root = cls(rank=row['rank'], tax_id=row[
'tax_id'], name=row['tax_name'])
path_root = headers.index('root')
root.ranks = headers[path_root:]
for row in rows:
rank, tax_id, name = [
row[i] for i in ('rank', 'tax_id', 'tax_name')]
path = [_f for _f in list(row.values())[path_root:] if _f]
parent = root.path(path[:-1])
parent.add_child(cls(rank, tax_id, name=name))
return root | Generate a node from an open handle to a taxtable, as generated by
``taxit taxtable`` |
async def save(self, db=None):
self._db = db or self.db
data = self.prepare_data()
self.validate()
for i in self.connection_retries():
try:
created = False if '_id' in data else True
result = await self.db[self.get_collection_name()].insert_one(data)
self._id = result.inserted_id
asyncio.ensure_future(post_save.send(
sender=self.__class__,
db=self.db,
instance=self,
created=created)
)
break
except ConnectionFailure as ex:
exceed = await self.check_reconnect_tries_and_wait(i, 'save')
if exceed:
raise ex | If object has _id, then object will be created or fully rewritten.
If not, object will be inserted and _id will be assigned. |
def _do_help(self, cmd, args):
print(self.doc_string())
print()
data_unsorted = []
cls = self.__class__
for name in dir(cls):
obj = getattr(cls, name)
if iscommand(obj):
cmds = []
for cmd in getcommands(obj):
cmds.append(cmd)
cmd_str = ','.join(sorted(cmds))
doc_str = textwrap.dedent(obj.__doc__).strip() if obj.__doc__ else \
'(no doc string available)'
data_unsorted.append([cmd_str, doc_str])
data_sorted = sorted(data_unsorted, key = lambda x: x[0])
data = [['COMMANDS', 'DOC STRING']] + data_sorted
table_banner = 'List of Available Commands'
table = terminaltables.SingleTable(data, table_banner)
table.inner_row_border = True
table.inner_heading_row_border = True
print(table.table) | Display doc strings of the shell and its commands. |
def order_target_value(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style) | Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent` |
def export_xlsx(self, key):
spreadsheet_file = self.client.files().get(fileId=key).execute()
links = spreadsheet_file.get('exportLinks')
downloadurl = links.get('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
resp, content = self.client._http.request(downloadurl)
return content | Download xlsx version of spreadsheet. |
def _get_ssh_config(config_path='~/.ssh/config'):
ssh_config = paramiko.SSHConfig()
try:
with open(os.path.realpath(os.path.expanduser(config_path))) as f:
ssh_config.parse(f)
except IOError:
pass
return ssh_config | Extract the configuration located at ``config_path``.
Returns:
paramiko.SSHConfig: the configuration instance. |
def load_gffutils_db(f):
import gffutils
db = gffutils.FeatureDB(f, keep_order=True)
return db | Load database for gffutils.
Parameters
----------
f : str
Path to database.
Returns
-------
db : gffutils.FeatureDB
gffutils feature database. |
async def write(self, data):
if type(data) != bytes:
data = self._encode_body(data)
self.protocol.push_data(b"%x\r\n%b\r\n" % (len(data), data))
await self.protocol.drain() | Writes a chunk of data to the streaming response.
:param data: bytes-ish data to be written. |
def sanitize_html(value, valid_tags=VALID_TAGS, strip=True):
return bleach.clean(value, tags=list(VALID_TAGS.keys()), attributes=VALID_TAGS, strip=strip) | Strips unwanted markup out of HTML. |
def escape( text, newline=False ):
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '\"' in text:
text = text.replace( '\"', '"' )
if '\'' in text:
text = text.replace( '\'', '"' )
if newline:
if '\n' in text:
text = text.replace( '\n', '<br>' )
return text | Escape special html characters. |
def cancelled(self):
return self._state == self.S_EXCEPTION and isinstance(self._result, Cancelled) | Return whether this future was successfully cancelled. |
def _padding(self, image, geometry, options):
image['options']['background'] = options.get('padding_color')
image['options']['gravity'] = 'center'
image['options']['extent'] = '%sx%s' % (geometry[0], geometry[1])
return image | Pads the image |
def __execute_bisz(self, instr):
op0_val = self.read_operand(instr.operands[0])
op2_val = 1 if op0_val == 0 else 0
self.write_operand(instr.operands[2], op2_val)
return None | Execute BISZ instruction. |
def getJson(cls, url, method='GET', headers={}, data=None, socket=None, timeout=120):
if not 'Content-Type' in headers:
headers['Content-Type'] = ['application/json']
body = yield cls().getBody(url, method, headers, data, socket, timeout)
defer.returnValue(json.loads(body)) | Fetch a JSON result via HTTP |
def set_PLOS_2column_fig_style(self, ratio=1):
plt.rcParams.update({
'figure.figsize' : [self.PLOSwidth2Col, self.PLOSwidth2Col*ratio],
}) | figure size corresponding to Plos 2 columns |
def queue(p_queue, host=None):
if host is not None:
return _path(_c.FSQ_QUEUE, root=_path(host, root=hosts(p_queue)))
return _path(p_queue, _c.FSQ_QUEUE) | Construct a path to the queue dir for a queue |
def __expr_str(cls, expr, level):
ident = ' ' * level * 4
if isinstance(expr, tuple):
return '{}{}'.format(ident, str(expr))
if expr.etype[0] in ['pvar', 'constant']:
return '{}Expression(etype={}, args={})'.format(ident, expr.etype, expr.args)
if not isinstance(expr, Expression):
return '{}{}'.format(ident, str(expr))
args = list(cls.__expr_str(arg, level + 1) for arg in expr.args)
args = '\n'.join(args)
return '{}Expression(etype={}, args=\n{})'.format(ident, expr.etype, args) | Returns string representing the expression. |
def next_frame_l2():
hparams = next_frame_basic_deterministic()
hparams.loss["targets"] = modalities.video_l2_loss
hparams.top["targets"] = modalities.video_l1_top
hparams.video_modality_loss_cutoff = 2.4
return hparams | Basic conv model with L2 modality. |
def _dedup_index(self, df_a):
pairs = self._link_index(df_a, df_a)
pairs = pairs[pairs.labels[0] > pairs.labels[1]]
return pairs | Build an index for deduplicating a dataset.
Parameters
----------
df_a : (tuple of) pandas.Series
The data of the DataFrame to build the index with.
Returns
-------
pandas.MultiIndex
A pandas.MultiIndex with record pairs. Each record pair
contains the index values of two records. The records are
sampled from the lower triangular part of the matrix. |
def join_sources(source_module: DeploymentModule, contract_name: str):
joined_file = Path(__file__).parent.joinpath('joined.sol')
remapping = {module: str(path) for module, path in contracts_source_path().items()}
command = [
'./utils/join-contracts.py',
'--import-map',
json.dumps(remapping),
str(contracts_source_path_of_deployment_module(
source_module,
).joinpath(contract_name + '.sol')),
str(joined_file),
]
working_dir = Path(__file__).parent.parent
try:
subprocess.check_call(command, cwd=working_dir)
except subprocess.CalledProcessError as ex:
print(f'cd {str(working_dir)}; {subprocess.list2cmdline(command)} failed.')
raise ex
return joined_file.read_text() | Use join-contracts.py to concatenate all imported Solidity files.
Args:
source_module: a module name to look up contracts_source_path()
contract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc. |
def debug(method):
def new_method(*args, **kwargs):
import pdb
try:
import pudb
except ImportError:
pudb = pdb
try:
pudb.runcall(method, *args, **kwargs)
except pdb.bdb.BdbQuit:
sys.exit('Normal quit from debugger')
new_method.__doc__ = method.__doc__
new_method.__name__ = 'debug(%s)' % method.__name__
return new_method | Decorator to debug the given method |
def memoize(fun):
argspec = inspect2.getfullargspec(fun)
arg_names = argspec.args + argspec.kwonlyargs
kwargs_defaults = get_kwargs_defaults(argspec)
def cache_key(args, kwargs):
return get_args_tuple(args, kwargs, arg_names, kwargs_defaults)
@functools.wraps(fun)
def new_fun(*args, **kwargs):
k = cache_key(args, kwargs)
if k not in new_fun.__cache:
new_fun.__cache[k] = fun(*args, **kwargs)
return new_fun.__cache[k]
def clear_cache():
new_fun.__cache.clear()
new_fun.__cache = {}
new_fun.clear_cache = clear_cache
return new_fun | Memoizes return values of the decorated function.
Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache()
is called on the function. |
def delete(ids, yes):
failures = False
for id in ids:
data_source = get_data_object(id, use_data_config=True)
if not data_source:
failures = True
continue
data_name = normalize_data_name(data_source.name)
suffix = data_name.split('/')[-1]
if not suffix.isdigit():
failures = True
floyd_logger.error('%s is not a dataset, skipped.', id)
if suffix == 'output':
floyd_logger.error('To delete job output, please delete the job itself.')
continue
if not yes and not click.confirm("Delete Data: {}?".format(data_name),
abort=False,
default=False):
floyd_logger.info("Data %s: Skipped", data_name)
continue
if not DataClient().delete(data_source.id):
failures = True
else:
floyd_logger.info("Data %s: Deleted", data_name)
if failures:
sys.exit(1) | Delete datasets. |
def extract_formats(config_handle):
configurations = dict(config_handle)
formats = dict(configurations.get('formats', {}))
return formats | Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``. |
def get(self, slug):
kb = api.get_kb_by_slug(slug)
check_knowledge_access(kb)
parser = reqparse.RequestParser()
parser.add_argument(
'from', type=str,
help="Return only entries where key matches this.")
parser.add_argument(
'to', type=str,
help="Return only entries where value matches this.")
parser.add_argument('page', type=int,
help="Require a specific page")
parser.add_argument('per_page', type=int,
help="Set how much result per page")
parser.add_argument('match_type', type=str,
help="s=substring, e=exact, sw=startswith")
parser.add_argument('sortby', type=str,
help="the sorting criteria ('from' or 'to')")
args = parser.parse_args()
kb_dict = kb.to_dict()
kb_dict['mappings'] = KnwKBMappingsResource \
.search_mappings(kb=kb, key=args['from'], value=args['to'],
match_type=args['match_type'],
sortby=args['sortby'], page=args['page'],
per_page=args['per_page'])
return kb_dict | Get KnwKB.
Url parameters:
- from: filter "mappings from"
- to: filter "mappings to"
- page
- per_page
- match_type: s=substring, e=exact, sw=startswith
- sortby: 'from' or 'to' |
def create_shot(self, sequence):
dialog = ShotCreatorDialog(sequence=sequence, parent=self)
dialog.exec_()
shot = dialog.shot
return shot | Create and return a new shot
:param sequence: the sequence for the shot
:type sequence: :class:`jukeboxcore.djadapter.models.Sequence`
:returns: The created shot or None
:rtype: None | :class:`jukeboxcore.djadapter.models.Shot`
:raises: None |
def from_charmm(cls, path, positions=None, forcefield=None, strict=True, **kwargs):
psf = CharmmPsfFile(path)
if strict and forcefield is None:
raise ValueError('PSF files require key `forcefield`.')
if strict and positions is None:
raise ValueError('PSF files require key `positions`.')
psf.parmset = CharmmParameterSet(*forcefield)
psf.loadParameters(psf.parmset)
return cls(master=psf, topology=psf.topology, positions=positions, path=path,
**kwargs) | Loads PSF Charmm structure from `path`. Requires `charmm_parameters`.
Parameters
----------
path : str
Path to PSF file
forcefield : list of str
Paths to Charmm parameters files, such as *.par or *.str. REQUIRED
Returns
-------
psf : SystemHandler
SystemHandler with topology. Charmm parameters are embedded in
the `master` attribute. |
def rawselect(message: Text,
choices: List[Union[Text, Choice, Dict[Text, Any]]],
default: Optional[Text] = None,
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
return select.select(message, choices, default, qmark, style,
use_shortcuts=True,
**kwargs) | Ask the user to select one item from a list of choices using shortcuts.
The user can only select one option.
Args:
message: Question text
choices: Items shown in the selection, this can contain `Choice` or
or `Separator` objects or simple items as strings. Passing
`Choice` objects, allows you to configure the item more
(e.g. preselecting it or disabeling it).
default: Default return value (single value).
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`). |
def parse_date(value):
if not value:
return None
if isinstance(value, datetime.date):
return value
return parse_datetime(value).date() | Attempts to parse `value` into an instance of ``datetime.date``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string, datetime.date, or
datetime.datetime value. |
def configure_error_handlers(app):
def render_error(error):
return (render_template('errors/%s.html' % error.code,
title=error_messages[error.code], code=error.code), error.code)
for (errcode, title) in error_messages.iteritems():
app.errorhandler(errcode)(render_error) | Configure application error handlers |
def get_renderers(self):
renderers = super(WithDynamicViewSetMixin, self).get_renderers()
if settings.ENABLE_BROWSABLE_API is False:
return [
r for r in renderers if not isinstance(r, BrowsableAPIRenderer)
]
else:
return renderers | Optionally block Browsable API rendering. |
def create_account(self, **kwargs):
response = self.__requester.request(
'POST',
'accounts',
_kwargs=combine_kwargs(**kwargs)
)
return Account(self.__requester, response.json()) | Create a new root account.
:calls: `POST /api/v1/accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.create>`_
:rtype: :class:`canvasapi.account.Account` |
def pull(self):
repo_root = settings.REPO_ROOT
pull_from_origin(join(repo_root, self.name)) | Pull from the origin. |
def check_smart_storage_config_ids(self):
if self.smart_storage_config_identities is None:
msg = ('The Redfish controller failed to get the '
'SmartStorageConfig controller configurations.')
LOG.debug(msg)
raise exception.IloError(msg) | Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO. |
def detect_interval(
self,
min_head_length=None,
max_head_length=None,
min_tail_length=None,
max_tail_length=None
):
head = self.detect_head(min_head_length, max_head_length)
tail = self.detect_tail(min_tail_length, max_tail_length)
begin = head
end = self.real_wave_mfcc.audio_length - tail
self.log([u"Audio length: %.3f", self.real_wave_mfcc.audio_length])
self.log([u"Head length: %.3f", head])
self.log([u"Tail length: %.3f", tail])
self.log([u"Begin: %.3f", begin])
self.log([u"End: %.3f", end])
if (begin >= TimeValue("0.000")) and (end > begin):
self.log([u"Returning %.3f %.3f", begin, end])
return (begin, end)
self.log(u"Returning (0.000, 0.000)")
return (TimeValue("0.000"), TimeValue("0.000")) | Detect the interval of the audio file
containing the fragments in the text file.
Return the audio interval as a tuple of two
:class:`~aeneas.exacttiming.TimeValue` objects,
representing the begin and end time, in seconds,
with respect to the full wave duration.
If one of the parameters is ``None``, the default value
(``0.0`` for min, ``10.0`` for max) will be used.
:param min_head_length: estimated minimum head length
:type min_head_length: :class:`~aeneas.exacttiming.TimeValue`
:param max_head_length: estimated maximum head length
:type max_head_length: :class:`~aeneas.exacttiming.TimeValue`
:param min_tail_length: estimated minimum tail length
:type min_tail_length: :class:`~aeneas.exacttiming.TimeValue`
:param max_tail_length: estimated maximum tail length
:type max_tail_length: :class:`~aeneas.exacttiming.TimeValue`
:rtype: (:class:`~aeneas.exacttiming.TimeValue`, :class:`~aeneas.exacttiming.TimeValue`)
:raises: TypeError: if one of the parameters is not ``None`` or a number
:raises: ValueError: if one of the parameters is negative |
def login_required(func):
@wraps(func)
def wrapped(*args, **kwargs):
if g.user is None:
return redirect(url_for(current_app.config['LDAP_LOGIN_VIEW'],
next=request.path))
return func(*args, **kwargs)
return wrapped | When applied to a view function, any unauthenticated requests will
be redirected to the view named in LDAP_LOGIN_VIEW. Authenticated
requests do NOT require membership from a specific group.
The login view is responsible for asking for credentials, checking
them, and setting ``flask.g.user`` to the name of the authenticated
user if the credentials are acceptable.
:param func: The view function to decorate. |
def rsync_git(local_path, remote_path, exclude=None, extra_opts=None,
version_file='version.txt'):
with settings(hide('output', 'running'), warn_only=True):
print(green('Version On Server: ' + run('cat ' + '{}/{}'.format(
remote_path, version_file)).strip()))
print(green('Now Deploying Version ' +
write_version(join(local_path, version_file))))
rsync(local_path, remote_path, exclude, extra_opts) | Rsync deploy a git repo. Write and compare version.txt |
def wrap(tensor, books=None, tensor_shape=None):
if books is None:
books = bookkeeper.for_default_graph()
if isinstance(tensor, PrettyTensor):
return tensor.as_layer()
elif isinstance(tensor, UnboundVariable):
def set_input_from_unbound_var(data):
if data is not None:
return wrap(data, books)
else:
return None
return _DeferredLayer(books, set_input_from_unbound_var, [tensor], {})
else:
tensor = tf.convert_to_tensor(tensor, name='input')
if tensor_shape:
_set_shape_on_tensor(tensor, tensor_shape)
return Layer(books, tensor=tensor, name=tensor.name) | Creates an input layer representing the given tensor.
Args:
tensor: The tensor.
books: The bookkeeper; this is usually not required unless you are building
multiple `tf.Graphs.`
tensor_shape: An optional shape that will be set on the Tensor or verified
to match the tensor.
Returns:
A layer. |
def _build_schema(self, s):
w = self._whatis(s)
if w == self.IS_LIST:
w0 = self._whatis(s[0])
js = {"type": "array",
"items": {"type": self._jstype(w0, s[0])}}
elif w == self.IS_DICT:
js = {"type": "object",
"properties": {key: self._build_schema(val) for key, val in s.items()}}
req = [key for key, val in s.items() if not val.is_optional]
if req:
js["required"] = req
else:
js = {"type": self._jstype(w, s)}
for k, v in self._json_schema_keys.items():
if k not in js:
js[k] = v
return js | Recursive schema builder, called by `json_schema`. |
def make_python_name(self, name):
for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),
("$", "DOLLAR"), (".", "DOT"), ("@", "_"), (":", "_"),
('-', '_')]:
if k in name:
name = name.replace(k, v)
if name.startswith("__"):
return "_X" + name
if len(name) == 0:
pass
elif name[0] in "01234567879":
return "_" + name
return name | Transforms an USR into a valid python name. |
def zip_currentdir(self) -> None:
with zipfile.ZipFile(f'{self.currentpath}.zip', 'w') as zipfile_:
for filepath, filename in zip(self.filepaths, self.filenames):
zipfile_.write(filename=filepath, arcname=filename)
del self.currentdir | Pack the current working directory in a `zip` file.
|FileManager| subclasses allow for manual packing and automatic
unpacking of working directories. The only supported format is `zip`.
To avoid possible inconsistencies, origin directories and zip
files are removed after packing or unpacking, respectively.
As an example scenario, we prepare a |FileManager| object with
the current working directory `folder` containing the files
`test1.txt` and `text2.txt`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> basepath = 'projectname/basename'
>>> with TestIO():
... os.makedirs(basepath)
... filemanager.currentdir = 'folder'
... open(f'{basepath}/folder/file1.txt', 'w').close()
... open(f'{basepath}/folder/file2.txt', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.txt']
The directories existing under the base path are identical
with the ones returned by property |FileManager.availabledirs|:
>>> with TestIO():
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
After packing the current working directory manually, it is
still counted as a available directory:
>>> with TestIO():
... filemanager.zip_currentdir()
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder.zip']
Folder2Path(folder=.../projectname/basename/folder.zip)
Instead of the complete directory, only the contained files
are packed:
>>> from zipfile import ZipFile
>>> with TestIO():
... with ZipFile('projectname/basename/folder.zip', 'r') as zp:
... sorted(zp.namelist())
['file1.txt', 'file2.txt']
The zip file is unpacked again, as soon as `folder` becomes
the current working directory:
>>> with TestIO():
... filemanager.currentdir = 'folder'
... sorted(os.listdir(basepath))
... filemanager.availabledirs
... filemanager.filenames # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
['file1.txt', 'file2.txt'] |
def parse_year_days(year_info):
leap_month, leap_days = _parse_leap(year_info)
res = leap_days
for month in range(1, 13):
res += (year_info >> (16 - month)) % 2 + 29
return res | Parse year days from a year info. |
def interactive_server(port=27017, verbose=True, all_ok=False, name='MockupDB',
ssl=False, uds_path=None):
if uds_path is not None:
port = None
server = MockupDB(port=port,
verbose=verbose,
request_timeout=int(1e6),
ssl=ssl,
auto_ismaster=True,
uds_path=uds_path)
if all_ok:
server.append_responder({})
server.autoresponds('whatsmyuri', you='localhost:12345')
server.autoresponds({'getLog': 'startupWarnings'},
log=['hello from %s!' % name])
server.autoresponds(OpMsg('buildInfo'), version='MockupDB ' + __version__)
server.autoresponds(OpMsg('listCollections'))
server.autoresponds('replSetGetStatus', ok=0)
server.autoresponds('getFreeMonitoringStatus', ok=0)
return server | A `MockupDB` that the mongo shell can connect to.
Call `~.MockupDB.run` on the returned server, and clean it up with
`~.MockupDB.stop`.
If ``all_ok`` is True, replies {ok: 1} to anything unmatched by a specific
responder. |
def to_shcoeffs(self, itaper, normalization='4pi', csphase=1):
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in set(['4pi', 'ortho', 'schmidt']):
raise ValueError(
"normalization must be '4pi', 'ortho' " +
"or 'schmidt'. Provided value was {:s}"
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
coeffs = self.to_array(itaper, normalization=normalization.lower(),
csphase=csphase)
return SHCoeffs.from_array(coeffs, normalization=normalization.lower(),
csphase=csphase, copy=False) | Return the spherical harmonic coefficients of taper i as a SHCoeffs
class instance.
Usage
-----
clm = x.to_shcoeffs(itaper, [normalization, csphase])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
itaper : int
Taper number, where itaper=0 is the best concentrated.
normalization : str, optional, default = '4pi'
Normalization of the output class: '4pi', 'ortho' or 'schmidt' for
geodesy 4pi-normalized, orthonormalized, or Schmidt semi-normalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it. |
def finish(self):
stylesheet = ''.join(self._buffer)
parser = CSSParser()
css = parser.parseString(stylesheet)
replaceUrls(css, self._replace)
self.request.write(css.cssText)
return self.request.finish() | Parse the buffered response body, rewrite its URLs, write the result to
the wrapped request, and finish the wrapped request. |
def send_confirmation_email(self):
context= {'user': self,
'new_email': self.email_unconfirmed,
'protocol': get_protocol(),
'confirmation_key': self.email_confirmation_key,
'site': Site.objects.get_current()}
subject_old = ''.join(render_to_string(
'accounts/emails/confirmation_email_subject_old.txt',
context).splitlines())
message_old = render_to_string(
'accounts/emails/confirmation_email_message_old.txt',
context)
send_mail(subject_old,
message_old,
settings.DEFAULT_FROM_EMAIL,
[self.email])
subject_new = ''.join(render_to_string(
'accounts/emails/confirmation_email_subject_new.txt',
context).splitlines())
message_new = render_to_string(
'accounts/emails/confirmation_email_message_new.txt',
context)
send_mail(subject_new,
message_new,
settings.DEFAULT_FROM_EMAIL,
[self.email_unconfirmed,]) | Sends an email to confirm the new email address.
This method sends out two emails. One to the new email address that
contains the ``email_confirmation_key`` which is used to verify this
this email address with :func:`User.objects.confirm_email`.
The other email is to the old email address to let the user know that
a request is made to change this email address. |
def from_moment_relative_to_crystal_axes(cls, moment, lattice):
unit_m = lattice.matrix / np.linalg.norm(lattice.matrix, axis=1)[:, None]
moment = np.matmul(list(moment), unit_m)
moment[np.abs(moment) < 1e-8] = 0
return cls(moment) | Obtaining a Magmom object from a magnetic moment provided
relative to crystal axes.
Used for obtaining moments from magCIF file.
:param magmom: list of floats specifying vector magmom
:param lattice: Lattice
:return: Magmom |
def data_to_imagesurface (data, **kwargs):
import cairo
data = np.atleast_2d (data)
if data.ndim != 2:
raise ValueError ('input array may not have more than 2 dimensions')
argb32 = data_to_argb32 (data, **kwargs)
format = cairo.FORMAT_ARGB32
height, width = argb32.shape
stride = cairo.ImageSurface.format_stride_for_width (format, width)
if argb32.strides[0] != stride:
raise ValueError ('stride of data array not compatible with ARGB32')
return cairo.ImageSurface.create_for_data (argb32, format,
width, height, stride) | Turn arbitrary data values into a Cairo ImageSurface.
The method and arguments are the same as data_to_argb32, except that the
data array will be treated as 2D, and higher dimensionalities are not
allowed. The return value is a Cairo ImageSurface object.
Combined with the write_to_png() method on ImageSurfaces, this is an easy
way to quickly visualize 2D data. |
def blueprint(self) -> Optional[str]:
if self.endpoint is not None and '.' in self.endpoint:
return self.endpoint.rsplit('.', 1)[0]
else:
return None | Returns the blueprint the matched endpoint belongs to.
This can be None if the request has not been matched or the
endpoint is not in a blueprint. |
def is_third_friday(day=None):
day = day if day is not None else datetime.datetime.now()
defacto_friday = (day.weekday() == 4) or (
day.weekday() == 3 and day.hour() >= 17)
return defacto_friday and 14 < day.day < 22 | check if day is month's 3rd friday |
def get_zorder(self, overlay, key, el):
spec = util.get_overlay_spec(overlay, key, el)
return self.ordering.index(spec) | Computes the z-order of element in the NdOverlay
taking into account possible batching of elements. |
def parseLines(self):
inAst = parse(''.join(self.lines), self.inFilename)
self.visit(inAst) | Form an AST for the code and produce a new version of the source. |
def truncated_normal_log_likelihood(params, low, high, data):
mu = params[0]
sigma = params[1]
if sigma == 0:
return np.inf
ll = np.sum(norm.logpdf(data, mu, sigma))
ll -= len(data) * np.log((norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))
return -ll | Calculate the log likelihood of the truncated normal distribution.
Args:
params: tuple with (mean, std), the parameters under which we evaluate the model
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the negative log likelihood of observing the given data under the given parameters.
This is meant to be used in minimization routines. |
def _heartbeat_loop(self):
self.logger.debug("running main heartbeat thread")
while not self.closed:
time.sleep(self.settings['SLEEP_TIME'])
self._report_self() | A main run loop thread to do work |
def _get_atomsection(mol2_lst):
started = False
for idx, s in enumerate(mol2_lst):
if s.startswith('@<TRIPOS>ATOM'):
first_idx = idx + 1
started = True
elif started and s.startswith('@<TRIPOS>'):
last_idx_plus1 = idx
break
return mol2_lst[first_idx:last_idx_plus1] | Returns atom section from mol2 provided as list of strings |
def partition_list(pred, iterable):
left, right = partition_iter(pred, iterable)
return list(left), list(right) | Partitions an iterable with a predicate into two lists, one with elements satisfying
the predicate and one with elements that do not satisfy it.
.. note: this just converts the results of partition_iter to a list for you so that you don't
have to in most cases using `partition_iter` is a better option.
:returns: a tuple (satisfiers, unsatisfiers). |
def upload_file_and_send_file_offer(self, file_name, user_id, data=None, input_file_path=None,
content_type='application/octet-stream', auto_open=False,
prevent_share=False, scope='content/send'):
if input_file_path:
with open(input_file_path, 'rb') as f:
data = f.read()
if not data:
raise ValueError('Either the data of a file or the path to a file must be provided')
params = {
'fileName': file_name,
'userId': user_id,
'autoOpen': 'true' if auto_open else 'false',
'preventShare': 'true' if prevent_share else 'false',
}
return _post(
token=self.oauth.get_app_token(scope),
uri='/user/media/file/send?' + urllib.urlencode(params),
data=data,
content_type=content_type
) | Upload a file of any type to store and return a FileId once file offer has been sent.
No user authentication required |
def task_annotate(self, task, annotation):
self._execute(
task['uuid'],
'annotate',
'--',
annotation
)
id, annotated_task = self.get_task(uuid=task[six.u('uuid')])
return annotated_task | Annotates a task. |
def colors(self, color_code):
if color_code is None:
color_code = WINDOWS_CODES['/all']
current_fg, current_bg = self.colors
if color_code == WINDOWS_CODES['/fg']:
final_color_code = self.default_fg | current_bg
elif color_code == WINDOWS_CODES['/bg']:
final_color_code = current_fg | self.default_bg
elif color_code == WINDOWS_CODES['/all']:
final_color_code = self.default_fg | self.default_bg
elif color_code == WINDOWS_CODES['bgblack']:
final_color_code = current_fg
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
self._kernel32.SetConsoleTextAttribute(self._stream_handle, final_color_code) | Change the foreground and background colors for subsequently printed characters.
None resets colors to their original values (when class was instantiated).
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
:param int color_code: Color code from WINDOWS_CODES. |
def process_nxml_file(file_name, citation=None, offline=False,
output_fname=default_output_fname):
with open(file_name, 'rb') as f:
nxml_str = f.read().decode('utf-8')
return process_nxml_str(nxml_str, citation, False, output_fname) | Return a ReachProcessor by processing the given NXML file.
NXML is the format used by PubmedCentral for papers in the open
access subset.
Parameters
----------
file_name : str
The name of the NXML file to be processed.
citation : Optional[str]
A PubMed ID passed to be used in the evidence for the extracted INDRA
Statements. Default: None
offline : Optional[bool]
If set to True, the REACH system is ran offline. Otherwise (by default)
the web service is called. Default: False
output_fname : Optional[str]
The file to output the REACH JSON output to.
Defaults to reach_output.json in current working directory.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements. |
def f1_score(df, col_true=None, col_pred='precision_result', pos_label=1, average=None):
r
if not col_pred:
col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
return fbeta_score(df, col_true, col_pred, pos_label=pos_label, average=average) | r"""
Compute f-1 score of a predicted DataFrame. f-1 is defined as
.. math::
\frac{2 \cdot precision \cdot recall}{precision + recall}
:Parameters:
- **df** - predicted data frame
- **col_true** - column name of true label
- **col_pred** - column name of predicted label, 'prediction_result' by default.
- **pos_label** - denote the desired class label when ``average`` == `binary`
- **average** - denote the method to compute average.
:Returns:
Recall score
:Return type:
float | numpy.array[float]
The parameter ``average`` controls the behavior of the function.
* When ``average`` == None (by default), f-1 of every class is given as a list.
* When ``average`` == 'binary', f-1 of class specified in ``pos_label`` is given.
* When ``average`` == 'micro', f-1 of overall precision and recall is given, where overall precision and recall are computed in micro-average mode.
* When ``average`` == 'macro', average f-1 of all the class is given.
* When ``average`` == `weighted`, average f-1 of all the class weighted by support of every true classes is given.
:Example:
Assume we have a table named 'predicted' as follows:
======== ===================
label prediction_result
======== ===================
0 1
1 2
2 1
1 1
1 0
2 2
======== ===================
Different options of ``average`` parameter outputs different values:
.. code-block:: python
>>> f1_score(predicted, 'label', average=None)
array([ 0. , 0.33333333, 0.5 ])
>>> f1_score(predicted, 'label', average='macro')
0.27
>>> f1_score(predicted, 'label', average='micro')
0.33
>>> f1_score(predicted, 'label', average='weighted')
0.33 |
def pasa(args):
p = OptionParser(pasa.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pasa_db, fastafile = args
termexons = "pasa.terminal_exons.gff3"
if need_update(fastafile, termexons):
cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi"
cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"'.format(pasa_db)
cmd += ' -g {0}'.format(fastafile)
sh(cmd)
cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl"
cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff"
sh(cmd, outfile=termexons)
return termexons | %prog pasa pasa_db fastafile
Run EVM in TIGR-only mode. |
def _show_stat(self):
_show_stat_wrapper_multi_Progress(self.count,
self.last_count,
self.start_time,
self.max_count,
self.speed_calc_cycles,
self.width,
self.q,
self.last_speed,
self.prepend,
self.show_stat,
self.len,
self.add_args,
self.lock,
self.info_line,
no_move_up=True) | convenient functions to call the static show_stat_wrapper_multi with
the given class members |
def check_script(vouts):
for vout in [v for v in vouts[::-1] if v['hex'].startswith('6a')]:
verb = BlockchainSpider.decode_op_return(vout['hex'])
action = Spoolverb.from_verb(verb).action
if action in Spoolverb.supported_actions:
return verb
raise Exception("Invalid ascribe transaction") | Looks into the vouts list of a transaction
and returns the ``op_return`` if one exists.
Args;
vouts (list): List of outputs of a transaction.
Returns:
str: String representation of the ``op_return``.
Raises:
Exception: If no ``vout`` having a supported
verb (:attr:`supported_actions`) is found. |
def if_sqlserver_disable_constraints_triggers(session: SqlASession,
tablename: str) -> None:
with if_sqlserver_disable_constraints(session, tablename):
with if_sqlserver_disable_triggers(session, tablename):
yield | If we're running under SQL Server, disable triggers AND constraints for the
specified table while the resource is held.
Args:
session: SQLAlchemy :class:`Session`
tablename: table name |
def set(self, key, val, time=0, min_compress_len=0):
return self._set("set", key, val, time, min_compress_len) | Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress. |
def _restore_backup(self):
input_filename, input_file = self._get_backup_file(database=self.database_name,
servername=self.servername)
self.logger.info("Restoring backup for database '%s' and server '%s'",
self.database_name, self.servername)
self.logger.info("Restoring: %s" % input_filename)
if self.decrypt:
unencrypted_file, input_filename = utils.unencrypt_file(input_file, input_filename,
self.passphrase)
input_file.close()
input_file = unencrypted_file
if self.uncompress:
uncompressed_file, input_filename = utils.uncompress_file(input_file, input_filename)
input_file.close()
input_file = uncompressed_file
self.logger.info("Restore tempfile created: %s", utils.handle_size(input_file))
if self.interactive:
self._ask_confirmation()
input_file.seek(0)
self.connector = get_connector(self.database_name)
self.connector.restore_dump(input_file) | Restore the specified database. |
def email(anon, obj, field, val):
return anon.faker.email(field=field) | Generates a random email address. |
def _parse_dav_element(self, dav_response):
href = parse.unquote(
self._strip_dav_path(dav_response.find('{DAV:}href').text)
)
if six.PY2:
href = href.decode('utf-8')
file_type = 'file'
if href[-1] == '/':
file_type = 'dir'
file_attrs = {}
attrs = dav_response.find('{DAV:}propstat')
attrs = attrs.find('{DAV:}prop')
for attr in attrs:
file_attrs[attr.tag] = attr.text
return FileInfo(href, file_type, file_attrs) | Parses a single DAV element
:param dav_response: DAV response
:returns :class:`FileInfo` |
def pelix_services(self):
return {
svc_ref.get_property(pelix.constants.SERVICE_ID): {
"specifications": svc_ref.get_property(
pelix.constants.OBJECTCLASS
),
"ranking": svc_ref.get_property(
pelix.constants.SERVICE_RANKING
),
"properties": svc_ref.get_properties(),
"bundle.id": svc_ref.get_bundle().get_bundle_id(),
"bundle.name": svc_ref.get_bundle().get_symbolic_name(),
}
for svc_ref in self.__context.get_all_service_references(None)
} | List of registered services |
def gameloop(self):
try:
while True:
self.handle_events()
self.update()
self.render()
except KeyboardInterrupt:
pass | A game loop that circles through the methods. |
def _make_mask(self, data, lon_str=LON_STR, lat_str=LAT_STR):
mask = False
for west, east, south, north in self.mask_bounds:
if west < east:
mask_lon = (data[lon_str] > west) & (data[lon_str] < east)
else:
mask_lon = (data[lon_str] < west) | (data[lon_str] > east)
mask_lat = (data[lat_str] > south) & (data[lat_str] < north)
mask |= mask_lon & mask_lat
return mask | Construct the mask that defines a region on a given data's grid. |
def get_database_configuration():
db_config = get_database_config_file()
if db_config is None:
return None
with open(db_config, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
return cfg | Get database configuration as dictionary. |
def _construct(self, context):
with self.g.as_default():
if self._pass_through:
return self._pass_through._construct(context)
current_value = context.get(self, None)
assert current_value is not _unspecified, 'Circular dependency'
if current_value is not None:
return current_value
context[self] = _unspecified
method_args = self._replace_deferred(self._method_args, context)
method_kwargs = self._replace_deferred(self._method_kwargs, context)
result = self._method(*method_args, **method_kwargs)
_strip_unnecessary_contents_from_stack(result, set())
context[self] = result
return result | Constructs this by calling the deferred method.
This assumes that all unbound_vars have been specified in context and if
this layer has already been computed in this context, then the previously
constructed value will be returned.
Args:
context: A dict of UnboundVariables/_DeferredLayers to their values.
Returns:
The result of calling the given method on this layer. |
def get_qualification_type_by_name(self, name):
max_fuzzy_matches_to_check = 100
query = name.upper()
start = time.time()
args = {
"Query": query,
"MustBeRequestable": False,
"MustBeOwnedByCaller": True,
"MaxResults": max_fuzzy_matches_to_check,
}
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
while not results and time.time() - start < self.max_wait_secs:
time.sleep(1)
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
if not results:
return None
qualifications = [self._translate_qtype(r) for r in results]
if len(qualifications) > 1:
for qualification in qualifications:
if qualification["name"].upper() == query:
return qualification
raise MTurkServiceException("{} was not a unique name".format(query))
return qualifications[0] | Return a Qualification Type by name. If the provided name matches
more than one Qualification, check to see if any of the results
match the provided name exactly. If there's an exact match, return
that Qualification. Otherwise, raise an exception. |
def get_list_w_id2nts(ids, id2nts, flds, dflt_null=""):
combined_nt_list = []
ntobj = cx.namedtuple("Nt", " ".join(flds))
for item_id in ids:
nts = [id2nt.get(item_id) for id2nt in id2nts]
vals = _combine_nt_vals(nts, flds, dflt_null)
combined_nt_list.append(ntobj._make(vals))
return combined_nt_list | Return a new list of namedtuples by combining "dicts" of namedtuples or objects. |
def get_pending_domain_join():
base_key = r'SYSTEM\CurrentControlSet\Services\Netlogon'
avoid_key = r'{0}\AvoidSpnSet'.format(base_key)
join_key = r'{0}\JoinDomain'.format(base_key)
if __utils__['reg.key_exists']('HKLM', avoid_key):
log.debug('Key exists: %s', avoid_key)
return True
else:
log.debug('Key does not exist: %s', avoid_key)
if __utils__['reg.key_exists']('HKLM', join_key):
log.debug('Key exists: %s', join_key)
return True
else:
log.debug('Key does not exist: %s', join_key)
return False | Determine whether there is a pending domain join action that requires a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there is a pending domain join action, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_domain_join |
def check_dupl_sources(self):
dd = collections.defaultdict(list)
for src_group in self.src_groups:
for src in src_group:
try:
srcid = src.source_id
except AttributeError:
srcid = src['id']
dd[srcid].append(src)
dupl = []
for srcid, srcs in sorted(dd.items()):
if len(srcs) > 1:
_assert_equal_sources(srcs)
dupl.append(srcs)
return dupl | Extracts duplicated sources, i.e. sources with the same source_id in
different source groups. Raise an exception if there are sources with
the same ID which are not duplicated.
:returns: a list of list of sources, ordered by source_id |
def write(self):
with open(self.log_path, "w") as f:
json.dump(self.log_dict, f, indent=1) | Dump JSON to file |
def has_hlu(self, lun_or_snap, cg_member=None):
hlu = self.get_hlu(lun_or_snap, cg_member=cg_member)
return hlu is not None | Returns True if `lun_or_snap` is attached to the host.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: True - if `lun_or_snap` is attached, otherwise False. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.