code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def get_summary(session):
profile = get_profile(session)
return {
'user': {
'email': profile['userProfile']['eMail'],
'name': '{} {}'.format(profile['userProfile']['firstName'],
profile['userProfile']['lastName'])
},
'vehicles': [
{
'vin': vehicle['vin'],
'year': vehicle['year'],
'make': vehicle['make'],
'model': _get_model(vehicle),
'odometer': vehicle['odometerMileage']
} for vehicle in profile['vehicles']
]
} | Get vehicle summary. |
def init(directory):
username = click.prompt("Input your username")
password = click.prompt("Input your password", hide_input=True,
confirmation_prompt=True)
log_directory = click.prompt("Input your log directory")
if not path.exists(log_directory):
sys.exit("Invalid log directory, please have a check.")
config_file_path = path.join(directory, 'v2ex_config.json')
config = {
"username": username,
"password": password,
"log_directory": path.abspath(log_directory)
}
with open(config_file_path, 'w') as f:
json.dump(config, f)
click.echo("Init the config file at: {0}".format(config_file_path)) | Init the config fle. |
def select(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
where = dict(where, **kwargs).items()
sql, args = makeSQL("SELECT", table, cols, where, group, order, limit)
return execute(sql, args) | Convenience wrapper for database SELECT. |
def set_token(self, token):
self.token = token
self.set_header(
'Authorization',
"Bearer {}".format(token)
) | Set the token for the v20 context
Args:
token: The token used to access the v20 REST api |
def sg_input(shape=None, dtype=sg_floatx, name=None):
r
if shape is None:
return tf.placeholder(dtype, shape=None, name=name)
else:
if not isinstance(shape, (list, tuple)):
shape = [shape]
return tf.placeholder(dtype, shape=[None] + list(shape), name=name) | r"""Creates a placeholder.
Args:
shape: A tuple/list of integers. If an integers is given, it will turn to a list.
dtype: A data type. Default is float32.
name: A name for the placeholder.
Returns:
A wrapped placeholder `Tensor`. |
def add_column(self, func, name=None, show=True):
assert func
name = name or func.__name__
if name == '<lambda>':
raise ValueError("Please provide a valid name for " + name)
d = {'func': func,
'show': show,
}
self._columns[name] = d
data = _create_json_dict(cols=self.column_names,
)
self.eval_js('table.setHeaders({});'.format(data))
return func | Add a column function which takes an id as argument and
returns a value. |
def makeCys(segID, N, CA, C, O, geo):
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_SG_length= geo.CB_SG_length
CA_CB_SG_angle= geo.CA_CB_SG_angle
N_CA_CB_SG_diangle= geo.N_CA_CB_SG_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
sulfur_g= calculateCoordinates(N, CA, CB, CB_SG_length, CA_CB_SG_angle, N_CA_CB_SG_diangle)
SG= Atom("SG", sulfur_g, 0.0, 1.0, " ", " SG", 0, "S")
res= Residue((' ', segID, ' '), "CYS", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(SG)
return res | Creates a Cysteine residue |
def get_brizo_url(config):
brizo_url = 'http://localhost:8030'
if config.has_option('resources', 'brizo.url'):
brizo_url = config.get('resources', 'brizo.url') or brizo_url
brizo_path = '/api/v1/brizo'
return f'{brizo_url}{brizo_path}' | Return the Brizo component url.
:param config: Config
:return: Url, str |
def destroy(self):
if self.client:
self.client.setWebView(self.widget, None)
del self.client
super(AndroidWebView, self).destroy() | Destroy the client |
def transform_aglistener_output(result):
from collections import OrderedDict
from msrestazure.tools import parse_resource_id
try:
resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group']
output = OrderedDict([('id', result.id),
('name', result.name),
('provisioningState', result.provisioning_state),
('port', result.port),
('resourceGroup', resource_group)])
if result.load_balancer_configurations is not None:
output['loadBalancerConfigurations'] = format_load_balancer_configuration_list(result.load_balancer_configurations)
return output
except AttributeError:
return result | Transforms the result of Availability Group Listener to eliminate unnecessary parameters. |
def dist(x1, x2=None, metric='sqeuclidean', to_numpy=True):
if x2 is None:
x2 = x1
if metric == "sqeuclidean":
return euclidean_distances(x1, x2, squared=True, to_numpy=to_numpy)
elif metric == "euclidean":
return euclidean_distances(x1, x2, squared=False, to_numpy=to_numpy)
else:
raise NotImplementedError | Compute distance between samples in x1 and x2 on gpu
Parameters
----------
x1 : np.array (n1,d)
matrix with n1 samples of size d
x2 : np.array (n2,d), optional
matrix with n2 samples of size d (if None then x2=x1)
metric : str
Metric from 'sqeuclidean', 'euclidean',
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric |
def get_network(self):
ref_key = self.ref_key
if ref_key == 'NETWORK':
return self.network
elif ref_key == 'NODE':
return self.node.network
elif ref_key == 'LINK':
return self.link.network
elif ref_key == 'GROUP':
return self.group.network
elif ref_key == 'PROJECT':
return None | Get the network that this resource attribute is in. |
def setup_address(self, name, address=default, transact={}):
owner = self.setup_owner(name, transact=transact)
self._assert_control(owner, name)
if is_none_or_zero_address(address):
address = None
elif address is default:
address = owner
elif is_binary_address(address):
address = to_checksum_address(address)
elif not is_checksum_address(address):
raise ValueError("You must supply the address in checksum format")
if self.address(name) == address:
return None
if address is None:
address = EMPTY_ADDR_HEX
transact['from'] = owner
resolver = self._set_resolver(name, transact=transact)
return resolver.functions.setAddr(raw_name_to_hash(name), address).transact(transact) | Set up the name to point to the supplied address.
The sender of the transaction must own the name, or
its parent name.
Example: If the caller owns ``parentname.eth`` with no subdomains
and calls this method with ``sub.parentname.eth``,
then ``sub`` will be created as part of this call.
:param str name: ENS name to set up
:param str address: name will point to this address, in checksum format. If ``None``,
erase the record. If not specified, name will point to the owner's address.
:param dict transact: the transaction configuration, like in
:meth:`~web3.eth.Eth.sendTransaction`
:raises InvalidName: if ``name`` has invalid syntax
:raises UnauthorizedError: if ``'from'`` in `transact` does not own `name` |
def plot_point(self, x, y, visible=True, color='black', size=5):
xp = (self.px_x * (x - self.x_min)) / self.x_tick
yp = (self.px_y * (self.y_max - y)) / self.y_tick
coord = 50 + xp, 50 + yp
if visible:
size = int(size/2) if int(size/2) > 1 else 1
x, y = coord
self.canvas.create_oval(
x-size, y-size,
x+size, y+size,
fill=color
)
return coord | Places a single point on the grid
:param x: the x coordinate
:param y: the y coordinate
:param visible: True if the individual point should be visible
:param color: the color of the point
:param size: the point size in pixels
:return: The absolute coordinates as a tuple |
def load_fileobj(fileobj, gz = None, xmldoc = None, contenthandler = None):
fileobj = MD5File(fileobj)
md5obj = fileobj.md5obj
if gz or gz is None:
fileobj = RewindableInputFile(fileobj)
magic = fileobj.read(2)
fileobj.seek(0, os.SEEK_SET)
if gz or magic == '\037\213':
fileobj = gzip.GzipFile(mode = "rb", fileobj = fileobj)
if xmldoc is None:
xmldoc = ligolw.Document()
ligolw.make_parser(contenthandler(xmldoc)).parse(fileobj)
return xmldoc, md5obj.hexdigest() | Parse the contents of the file object fileobj, and return the
contents as a LIGO Light Weight document tree. The file object
does not need to be seekable.
If the gz parameter is None (the default) then gzip compressed data
will be automatically detected and decompressed, otherwise
decompression can be forced on or off by setting gz to True or
False respectively.
If the optional xmldoc argument is provided and not None, the
parsed XML tree will be appended to that document, otherwise a new
document will be created. The return value is a tuple, the first
element of the tuple is the XML document and the second is a string
containing the MD5 digest in hex digits of the bytestream that was
parsed.
Example:
>>> from pycbc_glue.ligolw import ligolw
>>> import StringIO
>>> f = StringIO.StringIO('<?xml version="1.0" encoding="utf-8" ?><!DOCTYPE LIGO_LW SYSTEM "http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt"><LIGO_LW><Table Name="demo:table"><Column Name="name" Type="lstring"/><Column Name="value" Type="real8"/><Stream Name="demo:table" Type="Local" Delimiter=",">"mass",0.5,"velocity",34</Stream></Table></LIGO_LW>')
>>> xmldoc, digest = load_fileobj(f, contenthandler = ligolw.LIGOLWContentHandler)
>>> digest
'6bdcc4726b892aad913531684024ed8e'
The contenthandler argument specifies the SAX content handler to
use when parsing the document. The contenthandler is a required
argument. See the pycbc_glue.ligolw package documentation for typical
parsing scenario involving a custom content handler. See
pycbc_glue.ligolw.ligolw.PartialLIGOLWContentHandler and
pycbc_glue.ligolw.ligolw.FilteringLIGOLWContentHandler for examples of
custom content handlers used to load subsets of documents into
memory. |
def _get_client_fqdn(self, client_info_contents):
yamldict = yaml.safe_load(client_info_contents)
fqdn = yamldict['system_info']['fqdn']
client_id = yamldict['client_id'].split('/')[1]
return client_id, fqdn | Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN. |
def set(self, instance, value, **kw):
ref = []
if api.is_uid(value):
ref.append(value)
if u.is_dict(value):
ref = ref.append(value.get("uid"))
if api.is_at_content(value):
ref.append(value)
if u.is_list(value):
for item in value:
if api.is_uid(item):
ref.append(item)
elif u.is_dict(item):
uid = item.get('uid', None)
if uid:
ref.append(uid)
if not self.multi_valued:
if len(ref) > 1:
raise ValueError("Multiple values given for single valued "
"field {}".format(repr(self.field)))
else:
ref = ref[0]
return self._set(instance, ref, **kw) | Set the value of the uid reference field |
def scale(cls, *scaling):
if len(scaling) == 1:
sx = sy = float(scaling[0])
else:
sx, sy = scaling
return tuple.__new__(cls, (sx, 0.0, 0.0, 0.0, sy, 0.0, 0.0, 0.0, 1.0)) | Create a scaling transform from a scalar or vector.
:param scaling: The scaling factor. A scalar value will
scale in both dimensions equally. A vector scaling
value scales the dimensions independently.
:type scaling: float or sequence
:rtype: Affine |
def certs(self):
certstack = libcrypto.CMS_get1_certs(self.ptr)
if certstack is None:
raise CMSError("getting certs")
return StackOfX509(ptr=certstack, disposable=True) | List of the certificates contained in the structure |
def find_multiplex_by_name(self, multiplex_name: str) -> Multiplex:
for multiplex in self.multiplexes:
if multiplex.name == multiplex_name:
return multiplex
raise AttributeError(f'multiplex "{multiplex_name}" does not exist') | Find and return a multiplex in the influence graph with the given name.
Raise an AttributeError if there is no multiplex in the graph with the given name. |
def check_data(self):
assert os.path.exists(self.data_fp)
if gis:
with fiona.drivers():
with fiona.open(self.faces_fp) as src:
assert src.meta
gpkg_hash = json.load(open(self.data_fp))['metadata']['sha256']
assert gpkg_hash == sha256(self.faces_fp) | Check that definitions file is present, and that faces file is readable. |
def read_raw_parser_conf(data: str) -> dict:
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
try:
_data: dict = dict(config["commitizen"])
if "files" in _data:
files = _data["files"]
_f = json.loads(files)
_data.update({"files": _f})
return _data
except KeyError:
return {} | We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
``` |
def to_path_globs(self, relpath, conjunction):
return PathGlobs(
include=tuple(os.path.join(relpath, glob) for glob in self._file_globs),
exclude=tuple(os.path.join(relpath, exclude) for exclude in self._excluded_file_globs),
conjunction=conjunction) | Return a PathGlobs representing the included and excluded Files for these patterns. |
def __create_image(self, inpt, hashfun):
if hashfun not in generator.HASHES.keys():
print ("Unknown or unsupported hash function. Using default: %s"
% self.DEFAULT_HASHFUN)
algo = self.DEFAULT_HASHFUN
else:
algo = hashfun
return generator.generate(inpt, algo) | Creates the avatar based on the input and
the chosen hash function. |
def abort(self, count=2, timeout=60):
for counter in xrange(0, count):
self.putc(CAN, timeout) | Send an abort sequence using CAN bytes. |
def read_requirements():
reqs_path = os.path.join('.', 'requirements.txt')
install_reqs = parse_requirements(reqs_path, session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]
return reqs | parses requirements from requirements.txt |
def main():
if not sys.platform.startswith("win"):
if "--daemon" in sys.argv:
daemonize()
from gns3server.run import run
run() | Entry point for GNS3 server |
async def prover_get_credentials(wallet_handle: int,
filter_json: str) -> str:
logger = logging.getLogger(__name__)
logger.debug("prover_get_credentials: >>> wallet_handle: %r, filter_json: %r",
wallet_handle,
filter_json)
if not hasattr(prover_get_credentials, "cb"):
logger.debug("prover_get_credentials: Creating callback")
prover_get_credentials.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_filter_json = c_char_p(filter_json.encode('utf-8'))
credentials_json = await do_call('indy_prover_get_credentials',
c_wallet_handle,
c_filter_json,
prover_get_credentials.cb)
res = credentials_json.decode()
logger.debug("prover_get_credentials: <<< res: %r", res)
return res | Gets human readable credentials according to the filter.
If filter is NULL, then all credentials are returned.
Credentials can be filtered by tags created during saving of credential.
NOTE: This method is deprecated because immediately returns all fetched credentials.
Use <prover_search_credentials> to fetch records by small batches.
:param wallet_handle: wallet handler (created by open_wallet).
:param filter_json: filter for credentials
{
"schema_id": string, (Optional)
"schema_issuer_did": string, (Optional)
"schema_name": string, (Optional)
"schema_version": string, (Optional)
"issuer_did": string, (Optional)
"cred_def_id": string, (Optional)
}
:return: credentials json
[{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}] |
def looks_like_xml(text):
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv | Check if a doctype exists or if we have some tags. |
def _update_message_request(self, message):
for each in self.row_keys:
message.rows.row_keys.append(_to_bytes(each))
for each in self.row_ranges:
r_kwrags = each.get_range_kwargs()
message.rows.row_ranges.add(**r_kwrags) | Add row keys and row range to given request message
:type message: class:`data_messages_v2_pb2.ReadRowsRequest`
:param message: The ``ReadRowsRequest`` protobuf |
def to_internal(self, attribute_profile, external_dict):
internal_dict = {}
for internal_attribute_name, mapping in self.from_internal_attributes.items():
if attribute_profile not in mapping:
logger.debug("no attribute mapping found for internal attribute '%s' the attribute profile '%s'" % (
internal_attribute_name, attribute_profile))
continue
external_attribute_name = mapping[attribute_profile]
attribute_values = self._collate_attribute_values_by_priority_order(external_attribute_name,
external_dict)
if attribute_values:
logger.debug("backend attribute '%s' mapped to %s" % (external_attribute_name,
internal_attribute_name))
internal_dict[internal_attribute_name] = attribute_values
else:
logger.debug("skipped backend attribute '%s': no value found", external_attribute_name)
internal_dict = self._handle_template_attributes(attribute_profile, internal_dict)
return internal_dict | Converts the external data from "type" to internal
:type attribute_profile: str
:type external_dict: dict[str, str]
:rtype: dict[str, str]
:param attribute_profile: From which external type to convert (ex: oidc, saml, ...)
:param external_dict: Attributes in the external format
:return: Attributes in the internal format |
def disconnect(self):
self.connState = Client.DISCONNECTED
if self.conn is not None:
self._logger.info('Disconnecting')
self.conn.disconnect()
self.wrapper.connectionClosed()
self.reset() | Disconnect from IB connection. |
def get_client_settings_env(**_):
return {
'proxy': os.environ.get('https_proxy'),
'username': os.environ.get('SL_USERNAME'),
'api_key': os.environ.get('SL_API_KEY'),
} | Retrieve client settings from environment settings.
:param \\*\\*kwargs: Arguments that are passed into the client instance |
def to_regular_array(self, A):
return A.view((int, len(A.dtype.names))).reshape(A.shape + (-1,)) | Converts from an array of type `self.dtype` to an array
of type `int` with an additional index labeling the
tuple indeces.
:param np.ndarray A: An `np.array` of type `self.dtype`.
:rtype: `np.ndarray` |
def save_grade_system(self, grade_system_form, *args, **kwargs):
if grade_system_form.is_for_update():
return self.update_grade_system(grade_system_form, *args, **kwargs)
else:
return self.create_grade_system(grade_system_form, *args, **kwargs) | Pass through to provider GradeSystemAdminSession.update_grade_system |
def encipher_shift(plaintext, plain_vocab, shift):
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext | Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text. |
def escapePlaceholders(self,inputString):
escaped = inputString.replace(MapConstants.placeholder,'\\'+MapConstants.placeholder)
escaped = escaped.replace(MapConstants.placeholderFileName,'\\'+MapConstants.placeholderFileName)
escaped = escaped.replace(MapConstants.placeholderPath,'\\'+MapConstants.placeholderPath)
escaped = escaped.replace(MapConstants.placeholderExtension,'\\'+MapConstants.placeholderExtension)
escaped = escaped.replace(MapConstants.placeholderCounter,'\\'+MapConstants.placeholderCounter)
return escaped | This is an internal method that escapes all the placeholders
defined in MapConstants.py. |
def serve_forever(self, poll_interval=0.5):
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
r, w, e = _eintr_retry(select.select, [self], [], [], poll_interval)
if self in r:
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set() | Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread. |
def txinfo(self, txid: str) -> dict:
return cast(dict, self.ext_fetch('txinfo/' + txid)) | Returns information about given transaction. |
def adjust_for_triggers(self):
triggers = self.template['spec'].get('triggers', [])
remove_plugins = [
("prebuild_plugins", "check_and_set_rebuild"),
("prebuild_plugins", "stop_autorebuild_if_disabled"),
]
should_remove = False
if triggers and (self.is_custom_base_image() or self.is_from_scratch_image()):
if self.is_custom_base_image():
msg = "removing %s from request because custom base image"
elif self.is_from_scratch_image():
msg = 'removing %s from request because FROM scratch image'
del self.template['spec']['triggers']
should_remove = True
elif not triggers:
msg = "removing %s from request because there are no triggers"
should_remove = True
if should_remove:
for when, which in remove_plugins:
logger.info(msg, which)
self.dj.remove_plugin(when, which) | Remove trigger-related plugins when needed
If there are no triggers defined, it's assumed the
feature is disabled and all trigger-related plugins
are removed.
If there are triggers defined, and this is a custom
base image, some trigger-related plugins do not apply.
Additionally, this method ensures that custom base
images never have triggers since triggering a base
image rebuild is not a valid scenario. |
def execute_cmd(self, userid, cmdStr):
LOG.debug("executing cmd: %s", cmdStr)
return self._smtclient.execute_cmd(userid, cmdStr) | Execute commands on the guest vm. |
def config_mode(self, config_command="configure", pattern=r"[edit]"):
return super(VyOSSSH, self).config_mode(
config_command=config_command, pattern=pattern
) | Enter configuration mode. |
def _alpha2rho0(self, theta_Rs, Rs):
rho0 = theta_Rs / (4. * Rs ** 2 * (1. + np.log(1. / 2.)))
return rho0 | convert angle at Rs into rho0 |
def remove_line_breaks(text):
return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \
.replace('\r', '').replace(u'\xe2\x80\xa8', '') \
.replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \
.encode('utf-8') | Remove line breaks from input.
Including unicode 'line separator', 'paragraph separator',
and 'next line' characters. |
def stop(self):
self.state = False
with display_manager(self.display) as d:
d.record_disable_context(self.ctx)
d.ungrab_keyboard(X.CurrentTime)
with display_manager(self.display2):
d.record_disable_context(self.ctx)
d.ungrab_keyboard(X.CurrentTime) | Stop listening for keyboard input events. |
def add_section(self, section):
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section | Add a new Section object to the config. Should be a subclass of
_AbstractSection. |
def real(self):
def re(val):
if hasattr(val, 'real'):
return val.real
elif hasattr(val, 'as_real_imag'):
return val.as_real_imag()[0]
elif hasattr(val, 'conjugate'):
return (val.conjugate() + val) / 2
else:
raise NoConjugateMatrix(
"Matrix entry %s contains has no defined "
"conjugate" % str(val))
return self.element_wise(re) | Element-wise real part
Raises:
NoConjugateMatrix: if entries have no `conjugate` method and no
other way to determine the real part
Note:
A mathematically equivalent way to obtain a real matrix from a
complex matrix ``M`` is::
(M.conjugate() + M) / 2
However, the result may not be identical to ``M.real``, as the
latter tries to convert elements of the matrix to real values
directly, if possible, and only uses the conjugate as a fall-back |
def assignParameters(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac):
self.solution_next = solution_next
self.DiscFac = DiscFac
self.LivPrb = LivPrb
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac | Saves necessary parameters as attributes of self for use by other methods.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
DiscFac : float
Intertemporal discount factor for future utility.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
Returns
-------
none |
def re_run_file(self):
if self.get_option('save_all_before_run'):
self.save_all()
if self.__last_ec_exec is None:
return
(fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace) = self.__last_ec_exec
if not systerm:
self.run_in_current_ipyclient.emit(fname, wdir, args,
debug, post_mortem,
current, clear_namespace)
else:
self.main.open_external_console(fname, wdir, args, interact,
debug, python, python_args,
systerm, post_mortem) | Re-run last script |
def merge_entity(self, table_name, entity, if_match='*', timeout=None):
_validate_not_none('table_name', table_name)
request = _merge_entity(entity, if_match, self.require_encryption,
self.key_encryption_key)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
return self._perform_request(request, _extract_etag) | Updates an existing entity by merging the entity's properties. Throws
if the entity does not exist.
This operation does not replace the existing entity as the update_entity
operation does. A property cannot be removed with merge_entity.
Any properties with null values are ignored. All other properties will be
updated or added.
:param str table_name:
The name of the table containing the entity to merge.
:param entity:
The entity to merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The merge operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional merge, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str |
def _generate_rpc_method(self, method):
def _(**kwargs):
msg_id = self.get_unique_msg_id()
params = encode_data(kwargs)
payload = {
'method': method,
'params': params,
'jsonrpc': '2.0',
'id': msg_id
}
response = requests.post(self.url, data=json.dumps(payload), headers=self.headers).json()
if ('error' in response):
if response['error']['code'] == JSONRPC_NO_RESULT:
return None
raise Exception('Got error from RPC server when called "%s" error: %s' % (method, response['error']))
if 'result' in response:
result = decode_data(response['result'])
return result
return _ | Generate a function that performs rpc call
:param method: method name
:return: rpc function |
def _archive_single_dir(archive):
common_root = None
for info in _list_archive_members(archive):
fn = _info_name(info)
if fn in set(['.', '/']):
continue
sep = None
if '/' in fn:
sep = '/'
elif '\\' in fn:
sep = '\\'
if sep is None:
root_dir = fn
else:
root_dir, _ = fn.split(sep, 1)
if common_root is None:
common_root = root_dir
else:
if common_root != root_dir:
return None
return common_root | Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name |
def add_handler(cls, level, fmt, colorful, **kwargs):
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler | Add a configured handler to the global logger. |
def _get_load_ramping_construct(self):
bus_no = integer.setResultsName("bus_no")
s_rating = real.setResultsName("s_rating")
up_rate = real.setResultsName("up_rate")
down_rate = real.setResultsName("down_rate")
min_up_time = real.setResultsName("min_up_time")
min_down_time = real.setResultsName("min_down_time")
n_period_up = integer.setResultsName("n_period_up")
n_period_down = integer.setResultsName("n_period_down")
status = boolean.setResultsName("status")
l_ramp_data = bus_no + s_rating + up_rate + down_rate + \
min_up_time + min_down_time + n_period_up + \
n_period_down + status + scolon
l_ramp_array = Literal("Rmpl.con") + "=" + "[" + \
ZeroOrMore(l_ramp_data + Optional("]" + scolon))
return l_ramp_array | Returns a construct for an array of load ramping data. |
def _find_files(root, includes, excludes, follow_symlinks):
root = os.path.abspath(root)
file_set = formic.FileSet(
directory=root, include=includes,
exclude=excludes, symlinks=follow_symlinks,
)
for filename in file_set.qualified_files(absolute=False):
yield filename | List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html |
def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None:
ip = headers.get("X-Forwarded-For", self.remote_ip)
for ip in (cand.strip() for cand in reversed(ip.split(","))):
if ip not in self.trusted_downstream:
break
ip = headers.get("X-Real-Ip", ip)
if netutil.is_valid_ip(ip):
self.remote_ip = ip
proto_header = headers.get(
"X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)
)
if proto_header:
proto_header = proto_header.split(",")[-1].strip()
if proto_header in ("http", "https"):
self.protocol = proto_header | Rewrite the ``remote_ip`` and ``protocol`` fields. |
def get_alt_description(self):
if 'altDescription' in self.attributes and bool(self.attributes['altDescription'].strip()):
return self.attributes['altDescription']
else:
return None | Returns the alternate description of a parameter.
Only pipeline prompt-when-run parameters
can have alternate names and alternate descriptions |
def rename_to_tmp_name(self):
self.client.rename(
self.id,
'%s_%s' % (self.short_id, self.name)
) | Rename the container to a hopefully unique temporary container name
by prepending the short id. |
def wait_for_element_by_selector(self, selector, seconds):
def assert_element_present():
if not find_elements_by_jquery(world.browser, selector):
raise AssertionError("Expected a matching element.")
wait_for(assert_element_present)(timeout=int(seconds)) | Assert an element exists matching the given selector within the given time
period. |
def parse_services(rule):
parser = argparse.ArgumentParser()
rules = shlex.split(rule)
rules.pop(0)
parser.add_argument('--disabled', dest='disabled', action='store')
parser.add_argument('--enabled', dest='enabled', action='store')
args = clean_args(vars(parser.parse_args(rules)))
parser = None
return args | Parse the services line |
def replace_variables(sentence: List[str],
sentence_variables: Dict[str, str]) -> Tuple[List[str], List[str]]:
tokens = []
tags = []
for token in sentence:
if token not in sentence_variables:
tokens.append(token)
tags.append("O")
else:
for word in sentence_variables[token].split():
tokens.append(word)
tags.append(token)
return tokens, tags | Replaces abstract variables in text with their concrete counterparts. |
def get_local_environnement(self):
local_env = os.environ.copy()
for local_var in self.env:
local_env[local_var] = self.env[local_var]
return local_env | Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict |
def add_behave_arguments(parser):
conflicts = [
'--no-color',
'--version',
'-c',
'-k',
'-v',
'-S',
'--simple',
]
parser.add_argument(
'paths',
action='store',
nargs='*',
help="Feature directory, file or file location (FILE:LINE)."
)
for fixed, keywords in behave_options:
keywords = keywords.copy()
if not fixed:
continue
option_strings = []
for option in fixed:
if option in conflicts:
prefix = '--' if option.startswith('--') else '-'
option = option.replace(prefix, '--behave-', 1)
option_strings.append(option)
if 'config_help' in keywords:
keywords['help'] = keywords['config_help']
del keywords['config_help']
parser.add_argument(*option_strings, **keywords) | Additional command line arguments extracted directly from behave |
def sget_steptime(self, cycle, step, dataset_number=None):
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
step_time_header = self.headers_normal.step_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[dataset_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f"The varialbe step is a list."
f"Should be an integer."
f"{step}")
step = step[0]
c = test.loc[
(test[cycle_index_header] == cycle) &
(test[step_index_header] == step), :
]
if not self.is_empty(c):
t = c[step_time_header]
return t
else:
return None | Returns step time for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][step_time_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty |
def _get_jwt_for_audience(self, audience):
token, expiry = self._cache.get(audience, (None, None))
if token is None or expiry < _helpers.utcnow():
token, expiry = self._make_jwt_for_audience(audience)
self._cache[audience] = token, expiry
return token | Get a JWT For a given audience.
If there is already an existing, non-expired token in the cache for
the audience, that token is used. Otherwise, a new token will be
created.
Args:
audience (str): The intended audience.
Returns:
bytes: The encoded JWT. |
def subvolume_find_new(name, last_gen):
cmd = ['btrfs', 'subvolume', 'find-new', name, last_gen]
res = __salt__['cmd.run_all'](cmd)
salt.utils.fsutils._verify_run(res)
lines = res['stdout'].splitlines()
files = [l.split()[-1] for l in lines if l.startswith('inode')]
transid = lines[-1].split()[-1]
return {
'files': files,
'transid': transid,
} | List the recently modified files in a subvolume
name
Name of the subvolume
last_gen
Last transid marker from where to compare
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024 |
def _apply_over_vars_with_dim(func, self, dim=None, **kwargs):
ds = type(self)(coords=self.coords, attrs=self.attrs)
for name, var in self.data_vars.items():
if dim in var.dims:
ds[name] = func(var, dim=dim, **kwargs)
else:
ds[name] = var
return ds | wrapper for datasets |
def substring_search(query, list_of_strings, limit_results=DEFAULT_LIMIT):
matching = []
query_words = query.split(' ')
query_words.sort(key=len, reverse=True)
counter = 0
for s in list_of_strings:
target_words = s.split(' ')
if(anyword_substring_search(target_words, query_words)):
matching.append(s)
counter += 1
if(counter == limit_results):
break
return matching | main function to call for searching |
def use_comparative_sequence_rule_enabler_view(self):
self._object_views['sequence_rule_enabler'] = COMPARATIVE
for session in self._get_provider_sessions():
try:
session.use_comparative_sequence_rule_enabler_view()
except AttributeError:
pass | Pass through to provider SequenceRuleEnablerLookupSession.use_comparative_sequence_rule_enabler_view |
def _get_event_cls(view_obj, events_map):
request = view_obj.request
view_method = getattr(view_obj, request.action)
event_action = (
getattr(view_method, '_event_action', None) or
request.action)
return events_map[event_action] | Helper function to get event class.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Found event class. |
def is_cleanly_mergable(*dicts: Dict[Any, Any]) -> bool:
if len(dicts) <= 1:
return True
elif len(dicts) == 2:
if not all(isinstance(d, Mapping) for d in dicts):
return False
else:
shared_keys = set(dicts[0].keys()) & set(dicts[1].keys())
return all(is_cleanly_mergable(dicts[0][key], dicts[1][key]) for key in shared_keys)
else:
dict_combinations = itertools.combinations(dicts, 2)
return all(is_cleanly_mergable(*combination) for combination in dict_combinations) | Check that nothing will be overwritten when dictionaries are merged using `deep_merge`.
Examples:
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"c": 3})
True
>>> is_cleanly_mergable({"a": 1}, {"b": 2}, {"a": 0, c": 3})
False
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"c": 3, {"b": {"bb": 4}})
True
>>> is_cleanly_mergable({"a": 1, "b": {"ba": 2}}, {"b": {"ba": 4}})
False |
def _dedent(text):
lines = text.split('\n')
if len(lines) == 1:
indent = 0
elif lines[0].strip():
raise ValueError('when multiple lines, first line must be blank')
elif lines[-1].strip():
raise ValueError('last line must only contain indent whitespace')
else:
indent = len(lines[-1])
if any(line[:indent].strip() for line in lines):
raise ValueError(
'indents must equal or exceed indent in last line')
lines = [line[indent:] for line in lines][1:-1]
return indent, '\n'.join(lines) | Remove common indentation from each line in a text block.
When text block is a single line, return text block. Otherwise
determine common indentation from last line, strip common
indentation from each line, and return text block consisting of
inner lines (don't include first and last lines since they either
empty or contain whitespace and are present in baselined
string to make them pretty and delineate the common indentation).
:param str text: text block
:returns: text block with common indentation removed
:rtype: str
:raises ValueError: when text block violates whitespace rules |
def revnet(inputs, hparams, reuse=None):
training = hparams.mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('RevNet', reuse=reuse):
x1, x2 = init(inputs,
num_channels=hparams.num_channels_init_block,
dim=hparams.dim,
kernel_size=hparams.init_kernel_size,
maxpool=hparams.init_maxpool,
stride=hparams.init_stride,
training=training)
for block_num in range(len(hparams.num_layers_per_block)):
block = {'depth': hparams.num_channels[block_num],
'num_layers': hparams.num_layers_per_block[block_num],
'first_batch_norm': hparams.first_batch_norm[block_num],
'stride': hparams.strides[block_num],
'bottleneck': hparams.bottleneck}
x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,
**block)
pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)
return pre_logits | Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet. |
def _json_safe(cls, value):
if type(value) == date:
return str(value)
elif type(value) == datetime:
return value.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(value, ObjectId):
return str(value)
elif isinstance(value, _BaseFrame):
return value.to_json_type()
elif isinstance(value, (list, tuple)):
return [cls._json_safe(v) for v in value]
elif isinstance(value, dict):
return {k:cls._json_safe(v) for k, v in value.items()}
return value | Return a JSON safe value |
def _package_conf_file_to_dir(file_name):
if file_name in SUPPORTED_CONFS:
path = BASE_PATH.format(file_name)
if os.path.exists(path):
if os.path.isdir(path):
return False
else:
os.rename(path, path + '.tmpbak')
os.mkdir(path, 0o755)
os.rename(path + '.tmpbak', os.path.join(path, 'tmp'))
return True
else:
os.mkdir(path, 0o755)
return True | Convert a config file to a config directory. |
def from_representation(self, data):
if data in self._TRUE_VALUES:
return True
elif data in self._FALSE_VALUES:
return False
else:
raise ValueError(
"{type} type value must be one of {values}".format(
type=self.type,
values=self._TRUE_VALUES.union(self._FALSE_VALUES)
)
) | Convert representation value to ``bool`` if it has expected form. |
def add_term(self, t):
if t not in self.terms:
if t.parent_term_lc == 'root':
self.terms.append(t)
self.doc.add_term(t, add_section=False)
t.set_ownership()
else:
raise GenerateError("Can only add or move root-level terms. Term '{}' parent is '{}' "
.format(t, t.parent_term_lc))
assert t.section or t.join_lc == 'root.root', t | Add a term to this section and set it's ownership. Should only be used on root level terms |
def isom(self,coolingFactor=None,EdgeAttribute=None,initialAdaptation=None,\
maxEpoch=None,minAdaptation=None,minRadius=None,network=None,NodeAttribute=None,\
nodeList=None,radius=None,radiusConstantTime=None,singlePartition=None,\
sizeFactor=None,verbose=None):
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(['coolingFactor','EdgeAttribute','initialAdaptation',\
'maxEpoch','minAdaptation','minRadius','network','NodeAttribute','nodeList',\
'radius','radiusConstantTime','singlePartition','sizeFactor'],[coolingFactor,\
EdgeAttribute,initialAdaptation,maxEpoch,minAdaptation,minRadius,network,\
NodeAttribute,nodeList,radius,radiusConstantTime,singlePartition,sizeFactor])
response=api(url=self.__url+"/isom", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Execute the Inverted Self-Organizing Map Layout on a network.
:param coolingFactor (string, optional): Cooling factor, in numeric value
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param initialAdaptation (string, optional): Initial adaptation, in numeric
value
:param maxEpoch (string, optional): Number of iterations, in numeric value
:param minAdaptation (string, optional): Minimum adaptation value, in numer
ic value
:param minRadius (string, optional): Minimum radius, in numeric value
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param radius (string, optional): Radius, in numeric value
:param radiusConstantTime (string, optional): Radius constant, in numeric v
alue
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
:param sizeFactor (string, optional): Size factor, in numeric value |
def _should_set(self, key, mode):
if mode is None or mode not in ["nx", "xx"]:
return True
if mode == "nx":
if key in self.redis:
return False
elif key not in self.redis:
return False
return True | Determine if it is okay to set a key.
If the mode is None, returns True, otherwise, returns True of false based on
the value of ``key`` and the ``mode`` (nx | xx). |
def decorate_event_js(js_code):
def add_annotation(method):
setattr(method, "__is_event", True )
setattr(method, "_js_code", js_code )
return method
return add_annotation | setup a method as an event, adding also javascript code to generate
Args:
js_code (str): javascript code to generate the event client-side.
js_code is added to the widget html as
widget.attributes['onclick'] = js_code%{'emitter_identifier':widget.identifier, 'event_name':'onclick'} |
def get_position_searchable(self):
ids = gkr.list_item_ids_sync(self.keyring)
position_searchable = {}
for i in ids:
item_attrs = gkr.item_get_attributes_sync(self.keyring, i)
position_searchable[i] = item_attrs['searchable']
return position_searchable | Return dict of the position and corrasponding searchable str |
def _clear(self, pipe=None):
redis = self.redis if pipe is None else pipe
redis.delete(self.key) | Helper for clear operations.
:param pipe: Redis pipe in case update is performed as a part
of transaction.
:type pipe: :class:`redis.client.StrictPipeline` or
:class:`redis.client.StrictRedis` |
def discharge_coefficient_to_K(D, Do, C):
r
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2 | r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates. |
def get_table_info(self, tablename):
conn = self.__get_conn()
ret = a99.get_table_info(conn, tablename)
if len(ret) == 0:
raise RuntimeError("Cannot get info for table '{}'".format(tablename))
more = self.gui_info.get(tablename)
for row in ret.values():
caption, tooltip = None, None
if more:
info = more.get(row["name"])
if info:
caption, tooltip = info
row["caption"] = caption
row["tooltip"] = tooltip
return ret | Returns information about fields of a specific table
Returns: OrderedDict(("fieldname", MyDBRow), ...))
**Note** Fields "caption" and "tooltip" are added to rows using information in moldb.gui_info |
def train(self, conversation):
previous_statement_text = None
previous_statement_search_text = ''
statements_to_create = []
for conversation_count, text in enumerate(conversation):
if self.show_training_progress:
utils.print_progress_bar(
'List Trainer',
conversation_count + 1, len(conversation)
)
statement_search_text = self.chatbot.storage.tagger.get_bigram_pair_string(text)
statement = self.get_preprocessed_statement(
Statement(
text=text,
search_text=statement_search_text,
in_response_to=previous_statement_text,
search_in_response_to=previous_statement_search_text,
conversation='training'
)
)
previous_statement_text = statement.text
previous_statement_search_text = statement_search_text
statements_to_create.append(statement)
self.chatbot.storage.create_many(statements_to_create) | Train the chat bot based on the provided list of
statements that represents a single conversation. |
def cancel_expired_invitations(invitations=None):
expiration_date = timezone.now() - settings.WALDUR_CORE['INVITATION_LIFETIME']
if not invitations:
invitations = models.Invitation.objects.filter(state=models.Invitation.State.PENDING)
invitations = invitations.filter(created__lte=expiration_date)
invitations.update(state=models.Invitation.State.EXPIRED) | Invitation lifetime must be specified in Waldur Core settings with parameter
"INVITATION_LIFETIME". If invitation creation time is less than expiration time, the invitation will set as expired. |
def copyNode(node, children=False, parent=False):
if parent is not False:
element = SubElement(
parent,
node.tag,
attrib=node.attrib,
nsmap={None: "http://www.tei-c.org/ns/1.0"}
)
else:
element = Element(
node.tag,
attrib=node.attrib,
nsmap={None: "http://www.tei-c.org/ns/1.0"}
)
if children:
if node.text:
element._setText(node.text)
for child in xmliter(node):
element.append(copy(child))
return element | Copy an XML Node
:param node: Etree Node
:param children: Copy children nodes is set to True
:param parent: Append copied node to parent if given
:return: New Element |
def week_to_datetime(iso_year, iso_week):
"datetime instance for the start of the given ISO year and week"
gregorian = iso_to_gregorian(iso_year, iso_week, 0)
return datetime.datetime.combine(gregorian, datetime.time(0)) | datetime instance for the start of the given ISO year and week |
def LDAP_search(pattern_search, attribute):
connection, ldap_base = _get_LDAP_connection()
connection.search(
search_base=ldap_base,
search_filter=pattern_search,
attributes=[attribute]
)
return connection.response | Do a LDAP search |
def _clean_streams(repo, mapped_streams):
for stream_name in ('stdout', 'stderr'):
stream = mapped_streams.get(stream_name)
if not stream:
continue
path = os.path.relpath(stream, start=repo.working_dir)
if (path, 0) not in repo.index.entries:
os.remove(stream)
else:
blob = repo.index.entries[(path, 0)].to_blob(repo)
with open(path, 'wb') as fp:
fp.write(blob.data_stream.read()) | Clean mapped standard streams. |
def put_logging(Bucket,
TargetBucket=None, TargetPrefix=None, TargetGrants=None,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
logstate = {}
targets = {'TargetBucket': TargetBucket,
'TargetGrants': TargetGrants,
'TargetPrefix': TargetPrefix}
for key, val in six.iteritems(targets):
if val is not None:
logstate[key] = val
if logstate:
logstatus = {'LoggingEnabled': logstate}
else:
logstatus = {}
if TargetGrants is not None and isinstance(TargetGrants, six.string_types):
TargetGrants = salt.utils.json.loads(TargetGrants)
conn.put_bucket_logging(Bucket=Bucket, BucketLoggingStatus=logstatus)
return {'updated': True, 'name': Bucket}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | Given a valid config, update the logging parameters for a bucket.
Returns {updated: true} if parameters were updated and returns
{updated: False} if parameters were not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_logging my_bucket log_bucket '[{...}]' prefix |
def removeLayer(self, layer):
if isinstance(layer, BaseGlyph):
layer = layer.layer.name
layerName = layer
layerName = normalizers.normalizeLayerName(layerName)
if self._getLayer(layerName).layer.name == layerName:
self._removeLayer(layerName) | Remove ``layer`` from this glyph.
>>> glyph.removeLayer("background")
Layer can be a :ref:`type-glyph-layer` or a :ref:`type-string`
representing a layer name. |
def getStartTag(self):
attributeStrings = []
for name, val in self._attributes.items():
if val:
val = tostr(val)
if val or name not in TAG_ITEM_BINARY_ATTRIBUTES:
val = escapeQuotes(val)
attributeStrings.append('%s="%s"' %(name, val) )
else:
attributeStrings.append(name)
if attributeStrings:
attributeString = ' ' + ' '.join(attributeStrings)
else:
attributeString = ''
if self.isSelfClosing is False:
return "%s<%s%s >" %(self._indent, self.tagName, attributeString)
else:
return "%s<%s%s />" %(self._indent, self.tagName, attributeString) | getStartTag - Returns the start tag represented as HTML
@return - String of start tag with attributes |
def _try_dump_cnt(self):
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log() | Dump counters every 60 seconds |
def put(self, device_id: int) -> Device:
device = self._get_or_abort(device_id)
self.update(device)
session.commit()
session.add(device)
return device | Updates the Device Resource with the
name. |
def make(self):
logger.debug("preparing to add all git files")
num_added = self.local_repo.add_all_files()
if num_added:
self.local_repo.commit("Initial import from Project Gutenberg")
file_handler = NewFilesHandler(self)
file_handler.add_new_files()
num_added = self.local_repo.add_all_files()
if num_added:
self.local_repo.commit(
"Updates Readme, contributing, license files, cover, metadata."
) | turn fetched files into a local repo, make auxiliary files |
def get_s2_pixel_cloud_detector(threshold=0.4, average_over=4, dilation_size=2, all_bands=True):
return S2PixelCloudDetector(threshold=threshold,
average_over=average_over,
dilation_size=dilation_size,
all_bands=all_bands) | Wrapper function for pixel-based S2 cloud detector `S2PixelCloudDetector` |
def chdir(path):
cur_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cur_cwd) | Change the working directory to `path` for the duration of this context
manager.
:param str path: The path to change to |
def inside_softimage():
try:
import maya
return False
except ImportError:
pass
try:
from win32com.client import Dispatch as disp
disp('XSI.Application')
return True
except:
return False | Returns a boolean indicating if the code is executed inside softimage. |
def constructor(
self,
name=None,
function=None,
return_type=None,
arg_types=None,
header_dir=None,
header_file=None,
recursive=None):
return (
self._find_single(
self._impl_matchers[scopedef_t.constructor],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.constructor],
return_type=return_type,
arg_types=arg_types,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
) | returns reference to constructor declaration, that is matched
defined criteria |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.