code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def to_json(self, *args, **kwargs):
return json.dumps(self.to_schema(), *args, **kwargs) | Generate a schema and convert it directly to serialized JSON.
:rtype: ``str`` |
def SetLowerTimestamp(cls, timestamp):
if not hasattr(cls, '_lower'):
cls._lower = timestamp
return
if timestamp < cls._lower:
cls._lower = timestamp | Sets the lower bound timestamp. |
def visit_rule(self, node, rule):
label, equals, expression = rule
expression.name = label
return expression | Assign a name to the Expression and return it. |
def _write_to_uaa_cache(self, new_item):
data = self._read_uaa_cache()
if self.uri not in data:
data[self.uri] = []
for client in data[self.uri]:
if new_item['id'] == client['id']:
data[self.uri].remove(client)
continue
if 'expires' in client:
expires = dateutil.parser.parse(client['expires'])
if expires < datetime.datetime.now():
data[self.uri].remove(client)
continue
data[self.uri].append(new_item)
with open(self._cache_path, 'w') as output:
output.write(json.dumps(data, sort_keys=True, indent=4)) | Cache the client details into a cached file on disk. |
def _warning(self, msg, duration=None, results=None):
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
) | Log warnings. |
def AddLabel(self, label):
if not isinstance(label, py2to3.STRING_TYPES):
raise TypeError('label is not a string type. Is {0:s}'.format(
type(label)))
if not self._VALID_LABEL_REGEX.match(label):
raise ValueError((
'Unsupported label: "{0:s}". A label must only consist of '
'alphanumeric characters or underscores.').format(label))
if label not in self.labels:
self.labels.append(label) | Adds a label to the event tag.
Args:
label (str): label.
Raises:
TypeError: if the label provided is not a string.
ValueError: if a label is malformed. |
def from_file(cls, path, directory=None, modules=None, active=None):
name = basename(path)
if name.endswith('.rpp'):
name = name[:-4]
lines = _repp_lines(path)
directory = dirname(path) if directory is None else directory
r = cls(name=name, modules=modules, active=active)
_parse_repp(lines, r, directory)
return r | Instantiate a REPP from a `.rpp` file.
The *path* parameter points to the top-level module. Submodules
are loaded from *directory*. If *directory* is not given, it is
the directory part of *path*.
A REPP module may utilize external submodules, which may be
defined in two ways. The first method is to map a module name
to an instantiated REPP instance in *modules*. The second
method assumes that an external group call `>abc` corresponds
to a file `abc.rpp` in *directory* and loads that file. The
second method only happens if the name (e.g., `abc`) does not
appear in *modules*. Only one module may define a tokenization
pattern.
Args:
path (str): the path to the base REPP file to load
directory (str, optional): the directory in which to search
for submodules
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations |
def open_console(self, client=None):
if not client:
client = self.get_current_client()
if self.ipyconsole is not None:
kernel_id = client.get_kernel_id()
if not kernel_id:
QMessageBox.critical(
self, _('Error opening console'),
_('There is no kernel associated to this notebook.'))
return
self.ipyconsole._create_client_for_kernel(kernel_id, None, None,
None)
ipyclient = self.ipyconsole.get_current_client()
ipyclient.allow_rename = False
self.ipyconsole.rename_client_tab(ipyclient,
client.get_short_name()) | Open an IPython console for the given client or the current one. |
def renamecol(self, old, new):
spreadsheet.renamecol(self,old,new)
for x in self.coloring.keys():
if old in self.coloring[x]:
ind = self.coloring[x].index(old)
self.coloring[x][ind] = new | Rename column or color in-place.
Method wraps::
tabular.spreadsheet.renamecol(self, old, new) |
def P(value, bits=None, endian=None, target=None):
return globals()['P%d' % _get_bits(bits, target)](value, endian=endian, target=target) | Pack an unsigned pointer for a given target.
Args:
value(int): The value to pack.
bits(:class:`~pwnypack.target.Target.Bits`): Override the default
word size. If ``None`` it will look at the word size of
``target``.
endian(:class:`~pwnypack.target.Target.Endian`): Override the default
byte order. If ``None``, it will look at the byte order of
the ``target`` argument.
target(:class:`~pwnypack.target.Target`): Override the default byte
order. If ``None``, it will look at the byte order of
the global :data:`~pwnypack.target.target`. |
def _target_id_from_htm(self, line):
m = re.search("\\?code=([a-fA-F0-9]+)", line)
if m:
result = m.groups()[0]
return result
m = re.search("\\?auth=([a-fA-F0-9]+)", line)
if m:
result = m.groups()[0]
return result
return None | ! Extract Target id from htm line.
@return Target id or None |
def decorate_class(self, klass, *decorator_args, **decorator_kwargs):
raise RuntimeError("decorator {} does not support class decoration".format(self.__class__.__name__))
return klass | override this in a child class with your own logic, it must return a
function that returns klass or the like
:param klass: the class object that is being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
:returns: the wrapped class |
def dom_id(self):
parameter = 'DOMID'
if parameter not in self._by:
self._populate(by=parameter)
return self._by[parameter] | A dict of CLBs with DOM ID as key |
def _evaluate(self):
if self._elements:
for element in self._elements:
yield element
else:
for page in itertools.count():
raw_elements = self._retrieve_raw_elements(page)
for raw_element in raw_elements:
element = self._parse_raw_element(raw_element)
self._elements.append(element)
yield element
if self.__limit and len(self._elements) >= self.__limit:
break
if any([
len(raw_elements) < self.page_size,
(self.__limit and len(self._elements) >= self.__limit)
]):
break | Lazily retrieve and paginate report results and build Record instances from returned data |
def clear_optimizer(self):
self._optimized = False
self._type2decls = {}
self._type2name2decls = {}
self._type2decls_nr = {}
self._type2name2decls_nr = {}
self._all_decls = None
self._all_decls_not_recursive = None
for decl in self.declarations:
if isinstance(decl, scopedef_t):
decl.clear_optimizer() | Cleans query optimizer state |
def declallfuncs(self):
for f in self.body:
if (hasattr(f, '_ctype')
and isinstance(f._ctype, FuncType)):
yield f | generator on all declaration of function |
def fit(self, train_X, train_Y, val_X=None, val_Y=None, graph=None):
if len(train_Y.shape) != 1:
num_classes = train_Y.shape[1]
else:
raise Exception("Please convert the labels with one-hot encoding.")
g = graph if graph is not None else self.tf_graph
with g.as_default():
self.build_model(train_X.shape[1], num_classes)
with tf.Session() as self.tf_session:
summary_objs = tf_utils.init_tf_ops(self.tf_session)
self.tf_merged_summaries = summary_objs[0]
self.tf_summary_writer = summary_objs[1]
self.tf_saver = summary_objs[2]
self._train_model(train_X, train_Y, val_X, val_Y)
self.tf_saver.save(self.tf_session, self.model_path) | Fit the model to the data.
Parameters
----------
train_X : array_like, shape (n_samples, n_features)
Training data.
train_Y : array_like, shape (n_samples, n_classes)
Training labels.
val_X : array_like, shape (N, n_features) optional, (default = None).
Validation data.
val_Y : array_like, shape (N, n_classes) optional, (default = None).
Validation labels.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object.
Returns
------- |
def add_input_variable(self, var):
assert(isinstance(var, Variable))
self.input_variable_list.append(var) | Adds the argument variable as one of the input variable |
def chomp_protocol(url):
if '+' in url:
url = url.split('+', 1)[1]
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
if url.startswith('ssh://git@github.com/'):
url = url.replace('ssh://', 'git+ssh://')
elif '://' not in url:
assert 'file:' not in url
url = url.replace('git+', 'git+ssh://')
url = url.replace('ssh://', '')
return url | Return clean VCS url from RFC-style url
:param url: url
:type url: str
:rtype: str
:returns: url as VCS software would accept it
:seealso: #14 |
def main():
sys.path.append(".")
oz.initialize()
retr = optfn.run(list(oz._actions.values()))
if retr == optfn.ERROR_RETURN_CODE:
sys.exit(-1)
elif retr == None:
sys.exit(0)
elif isinstance(retr, int):
sys.exit(retr)
else:
raise Exception("Unexpected return value from action: %s" % retr) | Main entry-point for oz's cli |
def command(self, ns, raw, **kw):
try:
dbname = raw['ns'].split('.', 1)[0]
self.dest[dbname].command(raw['o'], check=True)
except OperationFailure, e:
logging.warning(e) | Executes command.
{ "op" : "c",
"ns" : "testdb.$cmd",
"o" : { "drop" : "fs.files"}
} |
def init(opts):
if 'host' not in opts['proxy']:
log.critical('No "host" key found in pillar for this proxy')
return False
DETAILS['host'] = opts['proxy']['host']
(username, password) = find_credentials() | This function gets called when the proxy starts up.
We check opts to see if a fallback user and password are supplied.
If they are present, and the primary credentials don't work, then
we try the backup before failing.
Whichever set of credentials works is placed in the persistent
DETAILS dictionary and will be used for further communication with the
chassis. |
async def enable_user(self, username):
user_facade = client.UserManagerFacade.from_connection(
self.connection())
entity = client.Entity(tag.user(username))
return await user_facade.EnableUser([entity]) | Re-enable a previously disabled user. |
def get_unread_messages(self,
include_me=False,
include_notifications=False):
return list(self.driver.get_unread_messages_in_chat(
self.id,
include_me,
include_notifications
)) | I fetch unread messages.
:param include_me: if user's messages are to be included
:type include_me: bool
:param include_notifications: if events happening on chat are to be included
:type include_notifications: bool
:return: list of unread messages
:rtype: list |
def btc_tx_witness_strip( tx_serialized ):
if not btc_tx_is_segwit(tx_serialized):
return tx_serialized
tx = btc_tx_deserialize(tx_serialized)
for inp in tx['ins']:
del inp['witness_script']
tx_stripped = btc_tx_serialize(tx)
return tx_stripped | Strip the witness information from a serialized transaction |
def search(self, **kwargs):
return super(ApiEnvironmentVip, self).get(
self.prepare_url('api/v3/environment-vip/', kwargs)) | Method to search environments vip based on extends search.
:param search: Dict containing QuerySets to find environments vip.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail')
or basic ('basic').
:return: Dict containing environments vip |
def _date_from_match(match_object):
year = int(match_object.group("year"))
month = int(match_object.group("month"))
day = int(match_object.group("day"))
return datetime.date(year, month, day) | Create a date object from a regular expression match.
The regular expression match is expected to be from _RE_DATE or
_RE_DATETIME.
@param match_object: The regular expression match.
@type match_object: B{re}.I{MatchObject}
@return: A date object.
@rtype: B{datetime}.I{date} |
def get_account(self, address, token_type):
cur = self.db.cursor()
return namedb_get_account(cur, address, token_type) | Get the state of an account for a given token type |
def make_spondaic(self, scansion: str) -> str:
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]
corrected = "".join(new_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) | If a pentameter line has 12 syllables, then it must start with double spondees.
:param scansion: a string of scansion patterns
:return: a scansion pattern string starting with two spondees
>>> print(PentameterScanner().make_spondaic("U U U U U U U U U U U U"))
- - - - - - U U - U U U |
def imagej_metadata(self):
if not self.is_imagej:
return None
page = self.pages[0]
result = imagej_description_metadata(page.is_imagej)
if 'IJMetadata' in page.tags:
try:
result.update(page.tags['IJMetadata'].value)
except Exception:
pass
return result | Return consolidated ImageJ metadata as dict. |
def _import(func):
func_name = func.__name__
if func_name in globals():
return func_name
module_name = func.__module__
submodules = module_name.split('.')
if submodules[0] in globals():
return module_name + '.' + func_name
for i in range(len(submodules)):
m = submodules[i]
if m in globals():
return '.'.join(submodules[i:]) + '.' + func_name
module_ref = sys.modules[func.__module__]
all_globals = globals()
for n in all_globals:
if all_globals[n] == module_ref:
return n + '.' + func_name
return func_name | Return the namespace path to the function |
def connect(dsn=None, turbodbc_options=None, connection_string=None, **kwargs):
if turbodbc_options is None:
turbodbc_options = make_options()
if connection_string is not None and (dsn is not None or len(kwargs) > 0):
raise ParameterError("Both connection_string and dsn or kwargs specified")
if connection_string is None:
connection_string = _make_connection_string(dsn, **kwargs)
connection = Connection(intern_connect(connection_string,
turbodbc_options))
return connection | Create a connection with the database identified by the ``dsn`` or the ``connection_string``.
:param dsn: Data source name as given in the (unix) odbc.ini file
or (Windows) ODBC Data Source Administrator tool.
:param turbodbc_options: Options that control how turbodbc interacts with the database.
Create such a struct with `turbodbc.make_options()` or leave this blank to take the defaults.
:param connection_string: Preformatted ODBC connection string.
Specifying this and dsn or kwargs at the same time raises ParameterError.
:param \**kwargs: You may specify additional options as you please. These options will go into
the connection string that identifies the database. Valid options depend on the specific database you
would like to connect with (e.g. `user` and `password`, or `uid` and `pwd`)
:return: A connection to your database |
def _send_and_receive(self, target, lun, netfn, cmdid, payload):
self._inc_sequence_number()
header = IpmbHeaderReq()
header.netfn = netfn
header.rs_lun = lun
header.rs_sa = target.ipmb_address
header.rq_seq = self.next_sequence_number
header.rq_lun = 0
header.rq_sa = self.slave_address
header.cmd_id = cmdid
retries = 0
while retries < self.max_retries:
try:
self._send_raw(header, payload)
rx_data = self._receive_raw(header)
break
except IpmiTimeoutError:
log().warning('I2C transaction timed out'),
retries += 1
else:
raise IpmiTimeoutError()
return rx_data.tostring()[5:-1] | Send and receive data using aardvark interface.
target:
lun:
netfn:
cmdid:
payload: IPMI message payload as bytestring
Returns the received data as bytestring |
def all_simple_bb_paths(self, start_address, end_address):
bb_start = self._find_basic_block(start_address)
bb_end = self._find_basic_block(end_address)
paths = networkx.all_simple_paths(self._graph, source=bb_start.address, target=bb_end.address)
return ([self._bb_by_addr[addr] for addr in path] for path in paths) | Return a list of path between start and end address. |
def _formatNumbers(self, line):
if sys.version_info < (2, 7):
return line
last_index = 0
try:
last_index = (line.rindex('}') + 1)
end = line[last_index:]
except ValueError:
return line
else:
splitted = re.split("(\d+)", end)
for index, val in enumerate(splitted):
converted = 0
try:
converted = int(val)
except ValueError:
pass
else:
if converted > 1000:
splitted[index] = format(converted, ",d")
return line[:last_index] + ("").join(splitted) | Format the numbers so that there are commas inserted.
For example: 1200300 becomes 1,200,300. |
def authorize_role(self, role, protocol, from_port, to_port, cidr_ip):
if (protocol != 'tcp' and protocol != 'udp'):
raise RuntimeError('error: expected protocol to be tcp or udp '\
'but got %s' % (protocol))
self._check_role_name(role)
role_group_name = self._group_name_for_role(role)
self.ec2.revoke_security_group(role_group_name,
ip_protocol=protocol,
from_port=from_port,
to_port=to_port, cidr_ip=cidr_ip)
self.ec2.authorize_security_group(role_group_name,
ip_protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr_ip=cidr_ip) | Authorize access to machines in a given role from a given network. |
def comments(self, extra_params=None):
params = {
'per_page': settings.MAX_PER_PAGE,
}
if extra_params:
params.update(extra_params)
return self.api._get_json(
TicketComment,
space=self,
rel_path=self.space._build_rel_path(
'tickets/%s/ticket_comments' % self['number']
),
extra_params=params,
get_all=True,
) | All Comments in this Ticket |
def print_virt_table(self, data):
table = prettytable.PrettyTable()
keys = sorted(data.keys())
table.add_column('Keys', keys)
table.add_column('Values', [data.get(i) for i in keys])
for tbl in table.align.keys():
table.align[tbl] = 'l'
self.printer(table) | Print a vertical pretty table from data. |
def start(nick, host, port=6667, username=None, password=None, channels=None, use_ssl=False, use_sasl=False,
char='!', allow_hosts=False, allow_nicks=False, disable_query=True):
client = IRCClient(nick, host, port, username, password, channels or [], use_ssl, use_sasl, char,
allow_hosts, allow_nicks, disable_query)
client.io_loop.start() | IRC Bot for interacting with salt.
nick
Nickname of the connected Bot.
host
irc server (example - chat.freenode.net).
port
irc port. Default: 6667
password
password for authenticating. If not provided, user will not authenticate on the irc server.
channels
channels to join.
use_ssl
connect to server using ssl. Default: False
use_sasl
authenticate using sasl, instead of messaging NickServ. Default: False
.. note:: This will allow the bot user to be fully authenticated before joining any channels
char
command character to look for. Default: !
allow_hosts
hostmasks allowed to use commands on the bot. Default: False
True to allow all
False to allow none
List of regexes to allow matching
allow_nicks
Nicks that are allowed to use commands on the bot. Default: False
True to allow all
False to allow none
List of regexes to allow matching
disable_query
Disable commands from being sent through private queries. Require they be sent to a channel, so that all
communication can be controlled by access to the channel. Default: True
.. warning:: Unauthenticated Access to event stream
This engine sends events calls to the event stream without authenticating them in salt. Authentication will
need to be configured and enforced on the irc server or enforced in the irc channel. The engine only accepts
commands from channels, so non authenticated users could be banned or quieted in the channel.
/mode +q $~a # quiet all users who are not authenticated
/mode +r # do not allow unauthenticated users into the channel
It would also be possible to add a password to the irc channel, or only allow invited users to join. |
def new(self, rr_name):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('NM record already initialized!')
self.posix_name = rr_name
self.posix_name_flags = 0
self._initialized = True | Create a new Rock Ridge Alternate Name record.
Parameters:
rr_name - The name for the new record.
Returns:
Nothing. |
def _scatter(sequence, n):
chunklen = int(math.ceil(float(len(sequence)) / float(n)))
return [
sequence[ i*chunklen : (i+1)*chunklen ] for i in range(n)
] | Scatters elements of ``sequence`` into ``n`` blocks. |
def _ancestors(
self, qname: Union[QualName, bool] = None) -> List[InstanceNode]:
return self.up()._ancestors(qname) | XPath - return the list of receiver's ancestors. |
def _update_loaded_modules(self):
system_modules = sys.modules.keys()
for module in list(self.loaded_modules):
if module not in system_modules:
self.processed_filepaths.pop(module)
self.loaded_modules.remove(module) | Updates the loaded modules by checking if they are still in sys.modules |
def generate_documentation(self, app_name, **kwargs):
output_file = kwargs.get('output_file_name')
encoding = kwargs.get('encoding', 'utf-8')
doc_string = generate_markdown_doc(app_name, self)
if output_file:
with open(output_file, 'w', encoding=encoding) as doc_file:
doc_file.write(doc_string)
return doc_string | Generate documentation for this specification.
Documentation is generated in Markdown format. An example
of the generated documentation can be found at:
https://github.com/loganasherjones/yapconf/blob/master/example/doc.md
Args:
app_name (str): The name of your application.
Keyword Args:
output_file_name (str): If provided, will write to this file.
encoding (str): The encoding to use for the output file. Default
is utf-8.
Returns:
A string representation of the documentation. |
def list_orgs(self):
orgs = list(self.orgs.keys())
orgs.sort()
return orgs | list the orgs configured in the keychain |
def cli(ctx, board, scons, project_dir, sayyes):
if scons:
Project().create_sconstruct(project_dir, sayyes)
elif board:
Project().create_ini(board, project_dir, sayyes)
else:
click.secho(ctx.get_help()) | Manage apio projects. |
def extract_wav(datafile, target=None):
if datafile.endswith(".wav"):
return datafile
target = target or os.path.dirname(datafile)
if os.path.isdir(target):
target = os.path.join(target, os.path.splitext(os.path.basename(datafile))[0] + ".wav")
if datafile.endswith(".flac"):
cmd = [config.CMD_FLAC, "--silent", "--decode", "--force", "-o", target, datafile]
else:
cmd = [config.CMD_FFMPEG, "-v", "0", "-y", "-i", datafile, "-acodec", "pcm_s16le", "-ac", "2", target]
subprocess.check_call(cmd, stdout=open(os.devnull, "wb"), stderr=subprocess.STDOUT)
return target | Get LPCM 16bit audio stream from mediafile.
If `target` is a directory, create a .wav with the same basename as the input.
If target is empty, write to the directory of the source file. Otherwise, use it
directly as the target filename. |
def wait(self, timeout=-1):
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
if not self._child_exited.wait(timeout):
raise Timeout('timeout waiting for child to exit')
return self.returncode | Wait for the child to exit.
Wait for at most *timeout* seconds, or indefinitely if *timeout* is
None. Return the value of the :attr:`returncode` attribute. |
def define_vol_xml_str(xml, **kwargs):
poolname = __salt__['config.get']('libvirt:storagepool', None)
if poolname is not None:
salt.utils.versions.warn_until(
'Sodium',
'\'libvirt:storagepool\' has been deprecated in favor of '
'\'virt:storagepool\'. \'libvirt:storagepool\' will stop '
'being used in {version}.'
)
else:
poolname = __salt__['config.get']('virt:storagepool', 'default')
conn = __get_conn(**kwargs)
pool = conn.storagePoolLookupByName(six.text_type(poolname))
ret = pool.createXML(xml, 0) is not None
conn.close()
return ret | Define a volume based on the XML passed to the function
:param xml: libvirt XML definition of the storage volume
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.define_vol_xml_str <XML in string format>
The storage pool where the disk image will be defined is ``default``
unless changed with a configuration like this:
.. code-block:: yaml
virt:
storagepool: mine |
def on_packet(self, packet_type):
def _wrapper(fn):
return self.callbacks.register(packet_type, fn)
return _wrapper | Registers a function to be called when packet data is received with a
specific type. |
def ask(question):
while True:
ans = input(question)
al = ans.lower()
if match('^y(es)?$', al):
return True
elif match('^n(o)?$', al):
return False
elif match('^q(uit)?$', al):
stdout.write(CYAN)
print("\nGoodbye.\n")
stdout.write(RESET)
quit()
else:
stdout.write(RED)
print("%s is invalid. Enter (y)es, (n)o or (q)uit." % ans)
stdout.write(RESET) | Infinite loop to get yes or no answer or quit the script. |
def auto_memoize(func):
@wraps(func)
def wrapper(*args):
inst = args[0]
inst._memoized_values = getattr(inst, '_memoized_values', {})
key = (func, args[1:])
if key not in inst._memoized_values:
inst._memoized_values[key] = func(*args)
return inst._memoized_values[key]
return wrapper | Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object.
Only works with methods taking non-keword arguments. Note that the args to the function must be usable as
dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class
methods, only object methods. |
def is_pointer(self, address):
try:
mbi = self.mquery(address)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror == win32.ERROR_INVALID_PARAMETER:
return False
raise
return mbi.has_content() | Determines if an address is a valid code or data pointer.
That is, the address must be valid and must point to code or data in
the target process.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return: C{True} if the address is a valid code or data pointer.
@raise WindowsError: An exception is raised on error. |
def _set_data(self, action):
data = self._load_response(action)
self._handle_continuations(data, 'category')
if action == 'category':
members = data.get('query').get('categorymembers')
if members:
self._add_members(members)
if action == 'random':
rand = data['query']['random'][0]
data = {'pageid': rand.get('id'),
'title': rand.get('title')}
self.data.update(data)
self.params.update(data) | Set category member data from API response |
def save_ical(self, ical_location):
data = self.cal.to_ical()
with open(ical_location, 'w') as ical_file:
ical_file.write(data.decode('utf-8')) | Save the calendar instance to a file |
def create_site(sitename):
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer() | Create a new site directory and init Yass |
def output(self):
if self._timeseries is None:
self.compute()
output = self._timeseries[:, self.system.output_vars]
if isinstance(self.system, NetworkModel):
return self.system._reshape_output(output)
else:
return output | Simulated model output |
def rebase(upstream, branch=None):
rebase_branch = branch and branch or current_branch()
with git_continuer(run, 'rebase --continue', no_edit=True):
stdout = run('rebase %s %s' % (upstream, rebase_branch))
return 'Applying' in stdout | Rebase branch onto upstream
If branch is empty, use current branch |
def num_plates_ET(q_plant, W_chan):
num_plates = np.ceil(np.sqrt(q_plant / (design.ent_tank.CENTER_PLATE_DIST.magnitude
* W_chan * design.ent_tank.CAPTURE_BOD_VEL.magnitude * np.sin(
design.ent_tank.PLATE_ANGLE.to(u.rad).magnitude))))
return num_plates | Return the number of plates in the entrance tank.
This number minimizes the total length of the plate settler unit.
Parameters
----------
q_plant: float
Plant flow rate
W_chan: float
Width of channel
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
>>> num_plates_ET(20*u.L/u.s,2*u.m)
1.0 |
def start_waiting(self):
if not self.waiting:
self.waiting = True
wait_msg = "Waiting for project to become ready for {}".format(self.msg_verb)
self.progress_bar.show_waiting(wait_msg) | Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state. |
def shard_id(self):
count = self._state.shard_count
if count is None:
return None
return (self.id >> 22) % count | Returns the shard ID for this guild if applicable. |
def add_op_create_erasure_pool(self, name, erasure_profile=None,
weight=None, group=None, app_name=None,
max_bytes=None, max_objects=None):
self.ops.append({'op': 'create-pool', 'name': name,
'pool-type': 'erasure',
'erasure-profile': erasure_profile,
'weight': weight,
'group': group, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects}) | Adds an operation to create a erasure coded pool.
:param name: Name of pool to create
:type name: str
:param erasure_profile: Name of erasure code profile to use. If not
set the ceph-mon unit handling the broker
request will set its default value.
:type erasure_profile: str
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
:type weight: float
:param group: Group to add pool to
:type group: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int |
def stop(self):
self._done()
if self._server:
self._server.stop()
self._server = None
log.info('Stop!') | Stop all tasks, and the local proxy server if it's running. |
def get_var_properties(self):
from spyder_kernels.utils.nsview import get_remote_data
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'is_set': isinstance(value, set),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return repr(properties)
else:
return repr(None) | Get some properties of the variables in the current
namespace |
def local_bind_hosts(self):
self._check_is_started()
return [_server.local_host for _server in self._server_list if
_server.local_host is not None] | Return a list containing the IP addresses listening for the tunnels |
def get_signature_names(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA|\.EC|\.DSA)$")
signatures = []
for i in self.get_files():
if signature_expr.search(i):
signatures.append(i)
if len(signatures) > 0:
return signatures
return None | Return a list of the signature file names. |
def batch_stream(buff, stream, size=DEFAULT_BATCH_SIZE):
buff.truncate(0)
for _ in xrange(size):
if hasattr(stream, 'readline'):
line = stream.readline()
else:
try:
line = next(stream)
except StopIteration:
line = ''
if line == '':
buff.seek(0)
return True
buff.write(line)
buff.seek(0)
return False | Writes a batch of `size` lines to `buff`.
Returns boolean of whether the stream has been exhausted. |
def _setattr_default(obj, attr, value, default):
if value is None:
setattr(obj, attr, default)
else:
setattr(obj, attr, value) | Set an attribute of an object to a value or default value. |
def replace_markdown_cells(src, dst):
if len(src['cells']) != len(dst['cells']):
raise ValueError('notebooks do not have the same number of cells')
for n in range(len(src['cells'])):
if src['cells'][n]['cell_type'] != dst['cells'][n]['cell_type']:
raise ValueError('cell number %d of different type in src and dst')
if src['cells'][n]['cell_type'] == 'markdown':
dst['cells'][n]['source'] = src['cells'][n]['source'] | Overwrite markdown cells in notebook object `dst` with corresponding
cells in notebook object `src`. |
def transitive_reduction(G):
H = G.copy()
for a, b, w in G.edges_iter(data=True):
H.remove_edge(a, b)
if not nx.has_path(H, a, b):
H.add_edge(a, b, w)
return H | Returns a transitive reduction of a graph. The original graph
is not modified.
A transitive reduction H of G has a path from x to y if and
only if there was a path from x to y in G. Deleting any edge
of H destroys this property. A transitive reduction is not
unique in general. A transitive reduction has the same
transitive closure as the original graph.
A transitive reduction of a complete graph is a tree. A
transitive reduction of a tree is itself.
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 4)])
>>> H = transitive_reduction(G)
>>> H.edges()
[(1, 2), (2, 3), (3, 4)] |
def save_initial_state(self):
paths = self.paths
self.initial_widget = self.get_widget()
self.initial_cursors = {}
for i, editor in enumerate(self.widgets):
if editor is self.initial_widget:
self.initial_path = paths[i]
try:
self.initial_cursors[paths[i]] = editor.textCursor()
except AttributeError:
pass | Save initial cursors and initial active widget. |
def extract_backup_bundle(self, resource, timeout=-1):
return self._client.update(resource, uri=self.BACKUP_ARCHIVE_PATH, timeout=timeout) | Extracts the existing backup bundle on the appliance and creates all the artifacts.
Args:
resource (dict): Deployment Group to extract.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation in
OneView, it just stops waiting for its completion.
Returns:
dict: A Deployment Group associated with the Artifact Bundle backup. |
def _dispatch_handler(args, cell, parser, handler, cell_required=False,
cell_prohibited=False):
if cell_prohibited:
if cell and len(cell.strip()):
parser.print_help()
raise Exception(
'Additional data is not supported with the %s command.' % parser.prog)
return handler(args)
if cell_required and not cell:
parser.print_help()
raise Exception('The %s command requires additional data' % parser.prog)
return handler(args, cell) | Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid. |
def create(
name,
create_file,
open_file,
remove_file,
create_directory,
list_directory,
remove_empty_directory,
temporary_directory,
stat,
lstat,
link,
readlink,
realpath=_realpath,
remove=_recursive_remove,
):
methods = dict(
create=create_file,
open=lambda fs, path, mode="r": open_file(
fs=fs, path=path, mode=mode,
),
remove_file=remove_file,
create_directory=create_directory,
list_directory=list_directory,
remove_empty_directory=remove_empty_directory,
temporary_directory=temporary_directory,
get_contents=_get_contents,
set_contents=_set_contents,
create_with_contents=_create_with_contents,
remove=remove,
removing=_removing,
stat=stat,
lstat=lstat,
link=link,
readlink=readlink,
realpath=realpath,
exists=_exists,
is_dir=_is_dir,
is_file=_is_file,
is_link=_is_link,
touch=_touch,
children=_children,
glob_children=_glob_children,
)
return attr.s(hash=True)(type(name, (object,), methods)) | Create a new kind of filesystem. |
def vrfs_get(self, subcommand='routes', route_dist=None,
route_family='all', format='json'):
show = {
'format': format,
}
if route_family in SUPPORTED_VRF_RF:
assert route_dist is not None
show['params'] = ['vrf', subcommand, route_dist, route_family]
else:
show['params'] = ['vrf', subcommand, 'all']
return call('operator.show', **show) | This method returns the existing vrfs.
``subcommand`` specifies one of the following.
- 'routes': shows routes present for vrf
- 'summary': shows configuration and summary of vrf
``route_dist`` specifies a route distinguisher value.
If route_family is not 'all', this value must be specified.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 = 'ipv4'
- RF_VPN_V6 = 'ipv6'
- RF_L2_EVPN = 'evpn'
- 'all' (default)
``format`` specifies the format of the response.
This parameter must be one of the following.
- 'json' (default)
- 'cli' |
def _before_request(self):
if utils.disable_tracing_url(flask.request.url, self.blacklist_paths):
return
try:
span_context = self.propagator.from_headers(flask.request.headers)
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
span = tracer.start_span()
span.span_kind = span_module.SpanKind.SERVER
span.name = '[{}]{}'.format(
flask.request.method,
flask.request.url)
tracer.add_attribute_to_current_span(
HTTP_METHOD, flask.request.method)
tracer.add_attribute_to_current_span(
HTTP_URL, str(flask.request.url))
execution_context.set_opencensus_attr(
'blacklist_hostnames',
self.blacklist_hostnames)
except Exception:
log.error('Failed to trace request', exc_info=True) | A function to be run before each request.
See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request |
def atmospheric_station_pressure(self, value=999999):
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
'value {} need to be of type int '
'for field `atmospheric_station_pressure`'.format(value))
if value <= 31000:
raise ValueError('value need to be greater 31000 '
'for field `atmospheric_station_pressure`')
if value >= 120000:
raise ValueError('value need to be smaller 120000 '
'for field `atmospheric_station_pressure`')
self._atmospheric_station_pressure = value | Corresponds to IDD Field `atmospheric_station_pressure`
Args:
value (int): value for IDD Field `atmospheric_station_pressure`
Unit: Pa
value > 31000
value < 120000
Missing value: 999999
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def _loadSources(self):
self.confstems = {}
self.sourceDict = newtrigdict.Trigdict()
for fName in self.authorityFiles:
self._loadOneSource(fName)
for stem in self.sourceDict.values():
cleanStem = stem.replace(".", "").upper()
self._addPub(stem, cleanStem) | creates a trigdict and populates it with data from self.autorityFiles |
async def set_state(self, parameter):
command_send = CommandSend(pyvlx=self.pyvlx, node_id=self.node_id, parameter=parameter)
await command_send.do_api_call()
if not command_send.success:
raise PyVLXException("Unable to send command")
self.parameter = parameter
await self.after_update() | Set switch to desired state. |
def get_handlerecord_indices_for_key(self, key, list_of_entries):
LOGGER.debug('get_handlerecord_indices_for_key...')
indices = []
for entry in list_of_entries:
if entry['type'] == key:
indices.append(entry['index'])
return indices | Finds the Handle entry indices of all entries that have a specific
type.
*Important:* It finds the Handle System indices! These are not
the python indices of the list, so they can not be used for
iteration.
:param key: The key (Handle Record type)
:param list_of_entries: A list of the existing entries in which to find
the indices.
:return: A list of strings, the indices of the entries of type "key" in
the given handle record. |
def exclude_package(cls, package_name=None, recursive=False):
if not package_name:
return Shading.create_exclude('**' if recursive else '*')
return Shading.create_exclude_package(package_name, recursive=recursive) | Excludes the given fully qualified package name from shading.
:param unicode package_name: A fully qualified package_name; eg: `org.pantsbuild`; `None` for
the java default (root) package.
:param bool recursive: `True` to exclude any package with `package_name` as a proper prefix;
`False` by default.
:returns: A `Shader.Rule` describing the shading exclusion. |
def du(*components, **kwargs):
human_readable = kwargs.get("human_readable", True)
_path = path(*components)
if not exists(_path):
raise Error("file '{}' not found".format(_path))
size = os.stat(_path).st_size
if human_readable:
return naturalsize(size)
else:
return size | Get the size of a file in bytes or as a human-readable string.
Arguments:
*components (str[]): Path to file.
**kwargs: If "human_readable" is True, return a formatted string,
e.g. "976.6 KiB" (default True)
Returns:
int or str: If "human_readble" kwarg is True, return str, else int. |
def anchorCompute(self, anchorInput, learn):
if learn:
self._anchorComputeLearningMode(anchorInput)
else:
overlaps = self.anchorConnections.computeActivity(
anchorInput, self.connectedPermanence)
self.activeSegments = np.where(overlaps >= self.activationThreshold)[0]
self.activeCells = np.unique(
self.anchorConnections.mapSegmentsToCells(self.activeSegments)) | Compute the
"sensor's location relative to a specific object"
from the feature-location pair.
@param anchorInput (numpy array)
Active cells in the feature-location pair layer
@param learn (bool)
If true, maintain current cell activity and learn this input on the
currently active cells |
def value_to_bytes(self, obj, value, default_endianness=DEFAULT_ENDIANNESS):
return struct.pack(str(self.endianness or default_endianness) + self.struct_format, value) | Converts the given value to an appropriately encoded string of bytes that represents it.
:param obj: The parent :class:`.PebblePacket` of this field
:type obj: .PebblePacket
:param value: The python value to serialise.
:param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the
:class:`Field` constructor.
:type default_endianness: str
:return: The serialised value
:rtype: bytes |
def resolve(self, key, is_local):
try:
if is_local:
return self.scope[key]
if self.has_resolvers:
return self.resolvers[key]
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local) | Resolve a variable name in a possibly local context
Parameters
----------
key : str
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable |
def _GetCurrentControlSet(self, key_path_suffix):
select_key_path = 'HKEY_LOCAL_MACHINE\\System\\Select'
select_key = self.GetKeyByPath(select_key_path)
if not select_key:
return None
control_set = None
for value_name in ('Current', 'Default', 'LastKnownGood'):
value = select_key.GetValueByName(value_name)
if not value or not value.DataIsInteger():
continue
control_set = value.GetDataAsObject()
if control_set > 0 or control_set <= 999:
break
if not control_set or control_set <= 0 or control_set > 999:
return None
control_set_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet{0:03d}'.format(
control_set)
key_path = ''.join([control_set_path, key_path_suffix])
return self.GetKeyByPath(key_path) | Virtual key callback to determine the current control set.
Args:
key_path_suffix (str): current control set Windows Registry key path
suffix with leading path separator.
Returns:
WinRegistryKey: the current control set Windows Registry key or None
if not available. |
def notebook_authenticate(cmd_args, force=False, silent=True):
server = server_url(cmd_args)
network.check_ssl()
access_token = None
if not force:
try:
access_token = refresh_local_token(server)
except OAuthException as e:
if not silent:
raise e
return notebook_authenticate(cmd_args, force=True, silent=False)
if not access_token:
access_token = perform_oauth(
get_code_via_terminal,
cmd_args,
copy_msg=NOTEBOOK_COPY_MESSAGE,
paste_msg=NOTEBOOK_PASTE_MESSAGE)
email = display_student_email(cmd_args, access_token)
if email is None and not force:
return notebook_authenticate(cmd_args, force=True)
elif email is None:
log.warning('Could not get login email. You may have been logged out. '
' Try logging in again.')
return access_token | Similiar to authenticate but prints student emails after
all calls and uses a different way to get codes. If SILENT is True,
it will suppress the error message and redirect to FORCE=True |
def strip_glob(string, split_str=' '):
string = _GLOB_PORTION_RE.sub(split_str, string)
return string.strip() | Strip glob portion in `string`.
>>> strip_glob('*glob*like')
'glob like'
>>> strip_glob('glob?')
'glo'
>>> strip_glob('glob[seq]')
'glob'
>>> strip_glob('glob[!seq]')
'glob'
:type string: str
:rtype: str |
def _add(self, name, *args, **kw):
argname = list(self.argdict)[self._argno]
if argname != name:
raise NameError(
'Setting argument %s, but it should be %s' % (name, argname))
self._group.add_argument(*args, **kw)
self.all_arguments.append((args, kw))
self.names.append(name)
self._argno += 1 | Add an argument to the underlying parser and grow the list
.all_arguments and the set .names |
def eval_facet_vars(data, vars, env):
def I(value):
return value
env = env.with_outer_namespace({'I': I})
facet_vals = pd.DataFrame(index=data.index)
for name in vars:
if name in data:
res = data[name]
elif str.isidentifier(name):
continue
else:
try:
res = env.eval(name, inner_namespace=data)
except NameError:
continue
facet_vals[name] = res
return facet_vals | Evaluate facet variables
Parameters
----------
data : DataFrame
Factet dataframe
vars : list
Facet variables
env : environment
Plot environment
Returns
-------
facet_vals : DataFrame
Facet values that correspond to the specified
variables. |
def resize_to_contents(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.resizeColumnsToContents()
self.model().fetch_more(columns=True)
self.resizeColumnsToContents()
QApplication.restoreOverrideCursor() | Resize cells to contents |
def random_dois(self, sample = 10, **kwargs):
res = request(self.mailto, self.base_url, "/works/", None,
None, None, None, None, sample, None,
None, None, None, True, None, None, None, **kwargs)
return [ z['DOI'] for z in res['message']['items'] ] | Get a random set of DOIs
:param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: [Array] of DOIs
Usage::
from habanero import Crossref
cr = Crossref()
cr.random_dois(1)
cr.random_dois(10)
cr.random_dois(50)
cr.random_dois(100) |
def readlink(path):
if sys.getwindowsversion().major < 6:
raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')
try:
return salt.utils.path.readlink(path)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise CommandExecutionError('{0} is not a symbolic link'.format(path))
raise CommandExecutionError(exc.__str__())
except Exception as exc:
raise CommandExecutionError(exc) | Return the path that a symlink points to
This is only supported on Windows Vista or later.
Inline with Unix behavior, this function will raise an error if the path is
not a symlink, however, the error raised will be a SaltInvocationError, not
an OSError.
Args:
path (str): The path to the symlink
Returns:
str: The path that the symlink points to
CLI Example:
.. code-block:: bash
salt '*' file.readlink /path/to/link |
def check_schedule():
all_items = prefetch_schedule_items()
for validator, _type, _msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
return False
all_slots = prefetch_slots()
for validator, _type, _msg in SLOT_VALIDATORS:
if validator(all_slots):
return False
return True | Helper routine to easily test if the schedule is valid |
def delist(values):
assert isinstance(values, list)
if not values:
return None
elif len(values) == 1:
return values[0]
return values | Reduce lists of zero or one elements to individual values. |
def render(self):
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=True,
render_close_tag=True,
render_before=True,
render_sections=True,
render_after=True,
generate_csrf_token=None if self.disable_csrf else _csrf_generation_function)) | Render the form and all sections to HTML |
async def items(self):
accumulator = Accumulator()
for graft in load_grafts():
accumulator.spawn(graft())
response = await accumulator.join()
return response.items() | Expose all grafts. |
def query(self, value):
if not isinstance(value, Q):
raise Exception('Must only be passed a Django (Q)uery object')
s = QSerializer(base64=True)
self.b64_query = s.dumps(value) | Serialize an ORM query, Base-64 encode it and set it to
the b64_query field |
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices):
num_bins = len(bins) - 1
chisq = shift_sum(corr, indices, bins)
return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0) | Calculate the chisq timeseries from precomputed values for only select points.
This function calculates the chisq at each point by explicitly time shifting
and summing each bin. No FFT is involved.
Parameters
----------
corr: FrequencySeries
The product of the template and data in the frequency domain.
snr: numpy.ndarray
The unnormalized array of snr values at only the selected points in `indices`.
snr_norm: float
The normalization of the snr (EXPLAINME : refer to Findchirp paper?)
bins: List of integers
The edges of the equal power bins
indices: Array
The indices where we will calculate the chisq. These must be relative
to the given `corr` series.
Returns
-------
chisq: Array
An array containing only the chisq at the selected points. |
def end_output (self, **kwargs):
if self.has_part("stats"):
self.write_stats()
if self.has_part("outro"):
self.write_outro()
self.close_fileoutput() | Write end of checking info as HTML. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.