code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def set_shell(self, svc_ref):
"""
Binds the given shell service.
:param svc_ref: A service reference
"""
if svc_ref is None:
return
with self._lock:
# Get the service
self._shell_ref = svc_ref
self._shell = self._context.get_service(self._shell_ref)
# Set the readline completer
if readline is not None:
readline.set_completer(self.readline_completer)
# Set the flag
self._shell_event.set()
|
Binds the given shell service.
:param svc_ref: A service reference
|
def extract_keys(self,key_list):
""" >>> d = {'a':1,'b':2,'c':3}
>>> print d.extract_keys('b,c,d')
>>> {'b':2,'c':3}
>>> print d.extract_keys(['b','c','d'])
>>> {'b':2,'c':3} """
if isinstance(key_list,basestring):
key_list = key_list.split(',')
return type(self)([ (k,self[k]) for k in key_list if k in self ])
|
>>> d = {'a':1,'b':2,'c':3}
>>> print d.extract_keys('b,c,d')
>>> {'b':2,'c':3}
>>> print d.extract_keys(['b','c','d'])
>>> {'b':2,'c':3}
|
def _validate_action_parameters(func, params):
""" Verifies that the parameters specified are actual parameters for the
function `func`, and that the field types are FIELD_* types in fields.
"""
if params is not None:
# Verify field name is valid
valid_fields = [getattr(fields, f) for f in dir(fields) \
if f.startswith("FIELD_")]
for param in params:
param_name, field_type = param['name'], param['fieldType']
if param_name not in func.__code__.co_varnames:
raise AssertionError("Unknown parameter name {0} specified for"\
" action {1}".format(
param_name, func.__name__))
if field_type not in valid_fields:
raise AssertionError("Unknown field type {0} specified for"\
" action {1} param {2}".format(
field_type, func.__name__, param_name))
|
Verifies that the parameters specified are actual parameters for the
function `func`, and that the field types are FIELD_* types in fields.
|
def _migrate_subresource(subresource, parent, migrations):
"""
Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource
"""
for key, doc in getattr(parent, subresource.parent_key, {}).items():
for migration in migrations['migrations']:
instance = migration(subresource(id=key, **doc))
parent._resource['doc_version'] = unicode(migration.version)
instance = _migrate_subresources(
instance,
migrations['subresources']
)
doc = instance._resource
doc.pop('id', None)
doc.pop(instance.resource_type + '_id', None)
getattr(parent, subresource.parent_key)[key] = doc
return parent
|
Migrate a resource's subresource
:param subresource: the perch.SubResource instance
:param parent: the parent perch.Document instance
:param migrations: the migrations for a resource
|
def _make_request_with_auth_fallback(self, url, headers=None, params=None):
"""
Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error codes
"""
self.log.debug("Request URL and Params: %s, %s", url, params)
try:
resp = requests.get(
url,
headers=headers,
verify=self._ssl_verify,
params=params,
timeout=DEFAULT_API_REQUEST_TIMEOUT,
proxies=self.proxy_config,
)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
self.log.debug("Error contacting openstack endpoint: %s", e)
if resp.status_code == 401:
self.log.info('Need to reauthenticate before next check')
# Delete the scope, we'll populate a new one on the next run for this instance
self.delete_current_scope()
elif resp.status_code == 409:
raise InstancePowerOffFailure()
elif resp.status_code == 404:
raise e
else:
raise
return resp.json()
|
Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error codes
|
def _pretend_to_run(self, migration, method):
"""
Pretend to run the migration.
:param migration: The migration
:type migration: eloquent.migrations.migration.Migration
:param method: The method to execute
:type method: str
"""
for query in self._get_queries(migration, method):
name = migration.__class__.__name__
self._note('<info>%s:</info> <comment>%s</comment>' % (name, query))
|
Pretend to run the migration.
:param migration: The migration
:type migration: eloquent.migrations.migration.Migration
:param method: The method to execute
:type method: str
|
def send_command(self, command, as_list=False):
"""Send a :class:`~panoramisk.actions.Command` to the server::
manager = Manager()
resp = manager.send_command('http show status')
Return a response :class:`~panoramisk.message.Message`.
See https://wiki.asterisk.org/wiki/display/AST/ManagerAction_Command
"""
action = actions.Action({'Command': command, 'Action': 'Command'},
as_list=as_list)
return self.send_action(action)
|
Send a :class:`~panoramisk.actions.Command` to the server::
manager = Manager()
resp = manager.send_command('http show status')
Return a response :class:`~panoramisk.message.Message`.
See https://wiki.asterisk.org/wiki/display/AST/ManagerAction_Command
|
def port_bindings(val, **kwargs):
'''
On the CLI, these are passed as multiple instances of a given CLI option.
In Salt, we accept these as a comma-delimited list but the API expects a
Python dictionary mapping ports to their bindings. The format the API
expects is complicated depending on whether or not the external port maps
to a different internal port, or if the port binding is for UDP instead of
TCP (the default). For reference, see the "Port bindings" section in the
docker-py documentation at the following URL:
http://docker-py.readthedocs.io/en/stable/api.html
'''
validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
if not isinstance(val, dict):
if not isinstance(val, list):
try:
val = helpers.split(val)
except AttributeError:
val = helpers.split(six.text_type(val))
for idx in range(len(val)):
if not isinstance(val[idx], six.string_types):
val[idx] = six.text_type(val[idx])
def _format_port(port_num, proto):
return six.text_type(port_num) + '/udp' if proto.lower() == 'udp' else port_num
bindings = {}
for binding in val:
bind_parts = helpers.split(binding, ':')
num_bind_parts = len(bind_parts)
if num_bind_parts == 1:
# Single port or port range being passed through (no
# special mapping)
container_port = six.text_type(bind_parts[0])
if container_port == '':
raise SaltInvocationError(
'Empty port binding definition found'
)
container_port, _, proto = container_port.partition('/')
try:
start, end = helpers.get_port_range(container_port)
except ValueError as exc:
# Using __str__() to avoid deprecation warning for using
# the message attribute of the ValueError.
raise SaltInvocationError(exc.__str__())
bind_vals = [
(_format_port(port_num, proto), None)
for port_num in range(start, end + 1)
]
elif num_bind_parts == 2:
if bind_parts[0] == '':
raise SaltInvocationError(
'Empty host port in port binding definition '
'\'{0}\''.format(binding)
)
if bind_parts[1] == '':
raise SaltInvocationError(
'Empty container port in port binding definition '
'\'{0}\''.format(binding)
)
container_port, _, proto = bind_parts[1].partition('/')
try:
cport_start, cport_end = \
helpers.get_port_range(container_port)
hport_start, hport_end = \
helpers.get_port_range(bind_parts[0])
except ValueError as exc:
# Using __str__() to avoid deprecation warning for
# using the message attribute of the ValueError.
raise SaltInvocationError(exc.__str__())
if (hport_end - hport_start) != (cport_end - cport_start):
# Port range is mismatched
raise SaltInvocationError(
'Host port range ({0}) does not have the same '
'number of ports as the container port range '
'({1})'.format(bind_parts[0], container_port)
)
cport_list = list(range(cport_start, cport_end + 1))
hport_list = list(range(hport_start, hport_end + 1))
bind_vals = [
(_format_port(cport_list[x], proto), hport_list[x])
for x in range(len(cport_list))
]
elif num_bind_parts == 3:
host_ip, host_port = bind_parts[0:2]
if validate_ip_addrs:
helpers.validate_ip(host_ip)
container_port, _, proto = bind_parts[2].partition('/')
try:
cport_start, cport_end = \
helpers.get_port_range(container_port)
except ValueError as exc:
# Using __str__() to avoid deprecation warning for
# using the message attribute of the ValueError.
raise SaltInvocationError(exc.__str__())
cport_list = list(range(cport_start, cport_end + 1))
if host_port == '':
hport_list = [None] * len(cport_list)
else:
try:
hport_start, hport_end = \
helpers.get_port_range(host_port)
except ValueError as exc:
# Using __str__() to avoid deprecation warning for
# using the message attribute of the ValueError.
raise SaltInvocationError(exc.__str__())
hport_list = list(range(hport_start, hport_end + 1))
if (hport_end - hport_start) != (cport_end - cport_start):
# Port range is mismatched
raise SaltInvocationError(
'Host port range ({0}) does not have the same '
'number of ports as the container port range '
'({1})'.format(host_port, container_port)
)
bind_vals = [(
_format_port(val, proto),
(host_ip,) if hport_list[idx] is None
else (host_ip, hport_list[idx])
) for idx, val in enumerate(cport_list)]
else:
raise SaltInvocationError(
'\'{0}\' is an invalid port binding definition (at most '
'3 components are allowed, found {1})'.format(
binding, num_bind_parts
)
)
for cport, bind_def in bind_vals:
if cport not in bindings:
bindings[cport] = bind_def
else:
if isinstance(bindings[cport], list):
# Append to existing list of bindings for this
# container port.
bindings[cport].append(bind_def)
else:
bindings[cport] = [bindings[cport], bind_def]
for idx in range(len(bindings[cport])):
if bindings[cport][idx] is None:
# Now that we are adding multiple
# bindings
try:
# Convert 1234/udp to 1234
bindings[cport][idx] = int(cport.split('/')[0])
except AttributeError:
# Port was tcp, the AttributeError
# signifies that the split failed
# because the port number was
# already defined as an integer.
# Just use the cport.
bindings[cport][idx] = cport
val = bindings
return val
|
On the CLI, these are passed as multiple instances of a given CLI option.
In Salt, we accept these as a comma-delimited list but the API expects a
Python dictionary mapping ports to their bindings. The format the API
expects is complicated depending on whether or not the external port maps
to a different internal port, or if the port binding is for UDP instead of
TCP (the default). For reference, see the "Port bindings" section in the
docker-py documentation at the following URL:
http://docker-py.readthedocs.io/en/stable/api.html
|
def run_command(self, cmd, new_prompt=True):
"""Run command in interpreter"""
if cmd == 'exit()':
self.exit_flag = True
self.write('\n')
return
# -- Special commands type I
# (transformed into commands executed in the interpreter)
# ? command
special_pattern = r"^%s (?:r\')?(?:u\')?\"?\'?([a-zA-Z0-9_\.]+)"
run_match = re.match(special_pattern % 'run', cmd)
help_match = re.match(r'^([a-zA-Z0-9_\.]+)\?$', cmd)
cd_match = re.match(r"^\!cd \"?\'?([a-zA-Z0-9_ \.]+)", cmd)
if help_match:
cmd = 'help(%s)' % help_match.group(1)
# run command
elif run_match:
filename = guess_filename(run_match.groups()[0])
cmd = "runfile('%s', args=None)" % remove_backslashes(filename)
# !cd system command
elif cd_match:
cmd = 'import os; os.chdir(r"%s")' % cd_match.groups()[0].strip()
# -- End of Special commands type I
# -- Special commands type II
# (don't need code execution in interpreter)
xedit_match = re.match(special_pattern % 'xedit', cmd)
edit_match = re.match(special_pattern % 'edit', cmd)
clear_match = re.match(r"^clear ([a-zA-Z0-9_, ]+)", cmd)
# (external) edit command
if xedit_match:
filename = guess_filename(xedit_match.groups()[0])
self.widget_proxy.edit(filename, external_editor=True)
# local edit command
elif edit_match:
filename = guess_filename(edit_match.groups()[0])
if osp.isfile(filename):
self.widget_proxy.edit(filename)
else:
self.stderr_write.write(
"No such file or directory: %s\n" % filename)
# remove reference (equivalent to MATLAB's clear command)
elif clear_match:
varnames = clear_match.groups()[0].replace(' ', '').split(',')
for varname in varnames:
try:
self.namespace.pop(varname)
except KeyError:
pass
# Execute command
elif cmd.startswith('!'):
# System ! command
pipe = programs.run_shell_command(cmd[1:])
txt_out = encoding.transcode( pipe.stdout.read().decode() )
txt_err = encoding.transcode( pipe.stderr.read().decode().rstrip() )
if txt_err:
self.stderr_write.write(txt_err)
if txt_out:
self.stdout_write.write(txt_out)
self.stdout_write.write('\n')
self.more = False
# -- End of Special commands type II
else:
# Command executed in the interpreter
# self.widget_proxy.set_readonly(True)
self.more = self.push(cmd)
# self.widget_proxy.set_readonly(False)
if new_prompt:
self.widget_proxy.new_prompt(self.p2 if self.more else self.p1)
if not self.more:
self.resetbuffer()
|
Run command in interpreter
|
def wrap_as_node(self, func):
'wrap a function as a node'
name = self.get_name(func)
@wraps(func)
def wrapped(*args, **kwargs):
'wrapped version of func'
message = self.get_message_from_call(*args, **kwargs)
self.logger.info('calling "%s" with %r', name, message)
result = func(message)
# functions can return multiple values ("emit" multiple times)
# by yielding instead of returning. Handle this case by making
# a list of the results and processing them all after the
# generator successfully exits. If we were to process them as
# they came out of the generator, we might get a partially
# processed input sent down the graph. This may be possible in
# the future via a flag.
if isinstance(result, GeneratorType):
results = [
self.wrap_result(name, item)
for item in result
if item is not NoResult
]
self.logger.debug(
'%s returned generator yielding %d items', func, len(results)
)
[self.route(name, item) for item in results]
return tuple(results)
# the case of a direct return is simpler. wrap, route, and
# return the value.
else:
if result is NoResult:
return result
result = self.wrap_result(name, result)
self.logger.debug(
'%s returned single value %s', func, result
)
self.route(name, result)
return result
return wrapped
|
wrap a function as a node
|
def iso8601_datetime(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if d == values.unset:
return d
elif isinstance(d, datetime.datetime) or isinstance(d, datetime.date):
return d.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(d, str):
return d
|
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
|
def __get_sigmas(self):
"""will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes"""
stack_sigma = {}
_stack = self.stack
_file_path = os.path.abspath(os.path.dirname(__file__))
_database_folder = os.path.join(_file_path, 'reference_data', self.database)
_list_compounds = _stack.keys()
for _compound in _list_compounds:
_list_element = _stack[_compound]['elements']
stack_sigma[_compound] = {}
for _element in _list_element:
stack_sigma[_compound][_element] = {}
_list_isotopes = _stack[_compound][_element]['isotopes']['list']
_list_file_names = _stack[_compound][_element]['isotopes']['file_names']
_list_isotopic_ratio = _stack[_compound][_element]['isotopes']['isotopic_ratio']
_iso_file_ratio = zip(_list_isotopes, _list_file_names, _list_isotopic_ratio)
stack_sigma[_compound][_element]['isotopic_ratio'] = _list_isotopic_ratio
# _dict_sigma_isotopes_sum = {}
_sigma_all_isotopes = 0
_energy_all_isotpes = 0
for _iso, _file, _ratio in _iso_file_ratio:
stack_sigma[_compound][_element][_iso] = {}
_file = os.path.join(_database_folder, _file)
_dict = _utilities.get_sigma(database_file_name=_file,
e_min=self.energy_min,
e_max=self.energy_max,
e_step=self.energy_step)
stack_sigma[_compound][_element][_iso]['energy_eV'] = _dict['energy_eV']
stack_sigma[_compound][_element][_iso]['sigma_b'] = _dict['sigma_b'] * _ratio
stack_sigma[_compound][_element][_iso]['sigma_b_raw'] = _dict['sigma_b']
# sigma for all isotopes with their isotopic ratio
_sigma_all_isotopes += _dict['sigma_b'] * _ratio
_energy_all_isotpes += _dict['energy_eV']
# energy axis (x-axis) is averaged to take into account differences between x-axis of isotopes
_mean_energy_all_isotopes = _energy_all_isotpes / len(_list_isotopes)
stack_sigma[_compound][_element]['energy_eV'] = _mean_energy_all_isotopes
stack_sigma[_compound][_element]['sigma_b'] = _sigma_all_isotopes
self.stack_sigma = stack_sigma
|
will populate the stack_sigma dictionary with the energy and sigma array
for all the compound/element and isotopes
|
def pagination_calc(items_count, page_size, cur_page=1, nearby=2):
"""
:param nearby:
:param items_count: count of all items
:param page_size: size of one page
:param cur_page: current page number, accept string digit
:return: num of pages, an iterator
"""
if type(cur_page) == str:
# noinspection PyUnresolvedReferences
cur_page = int(cur_page) if cur_page.isdigit() else 1
elif type(cur_page) == int:
if cur_page <= 0:
cur_page = 1
else:
cur_page = 1
page_count = 1 if page_size == -1 else int(math.ceil(items_count / page_size))
items_length = nearby * 2 + 1
# if first page in page items, first_page is None,
# it means the "go to first page" button should not be available.
first_page = None
last_page = None
prev_page = cur_page - 1 if cur_page != 1 else None
next_page = cur_page + 1 if cur_page != page_count else None
if page_count <= items_length:
items = range(1, page_count + 1)
elif cur_page <= nearby:
# start of items
items = range(1, items_length + 1)
last_page = True
elif cur_page >= page_count - nearby:
# end of items
items = range(page_count - items_length + 1, page_count + 1)
first_page = True
else:
items = range(cur_page - nearby, cur_page + nearby + 1)
first_page, last_page = True, True
if first_page:
first_page = 1
if last_page:
last_page = page_count
return {
'cur_page': cur_page,
'prev_page': prev_page,
'next_page': next_page,
'first_page': first_page,
'last_page': last_page,
'page_numbers': list(items),
'info': {
'page_size': page_size,
'page_count': page_count,
'items_count': items_count,
}
}
|
:param nearby:
:param items_count: count of all items
:param page_size: size of one page
:param cur_page: current page number, accept string digit
:return: num of pages, an iterator
|
def _array(group_idx, a, size, fill_value, dtype=None):
"""groups a into separate arrays, keeping the order intact."""
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret
|
groups a into separate arrays, keeping the order intact.
|
def process(self):
"""
This method handles the actual processing of Modules and Transforms
"""
self.modules.sort(key=lambda x: x.priority)
for module in self.modules:
transforms = module.transform(self.data)
transforms.sort(key=lambda x: x.linenum, reverse=True)
for transform in transforms:
linenum = transform.linenum
if isinstance(transform.data, basestring):
transform.data = [transform.data]
if transform.oper == "prepend":
self.data[linenum:linenum] = transform.data
elif transform.oper == "append":
self.data[linenum+1:linenum+1] = transform.data
elif transform.oper == "swap":
self.data[linenum:linenum+1] = transform.data
elif transform.oper == "drop":
self.data[linenum:linenum+1] = []
elif transform.oper == "noop":
pass
|
This method handles the actual processing of Modules and Transforms
|
def shared(self, value, name=None):
"""
Create a shared theano scalar value.
"""
if type(value) == int:
final_value = np.array(value, dtype="int32")
elif type(value) == float:
final_value = np.array(value, dtype=env.FLOATX)
else:
final_value = value
return theano.shared(final_value, name=name)
|
Create a shared theano scalar value.
|
def get_most_distinct_words(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None):
"""
Order the words from `vocab` by "distinctiveness score" (Chuang et al. 2012) from most to least distinctive.
Optionally only return the `n` most distinctive words.
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
"""
return _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n)
|
Order the words from `vocab` by "distinctiveness score" (Chuang et al. 2012) from most to least distinctive.
Optionally only return the `n` most distinctive words.
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
|
def add_portal(self, origin, destination, symmetrical=False, **kwargs):
"""Connect the origin to the destination with a :class:`Portal`.
Keyword arguments are the :class:`Portal`'s
attributes. Exception: if keyword ``symmetrical`` == ``True``,
a mirror-:class:`Portal` will be placed in the opposite
direction between the same nodes. It will always appear to
have the placed :class:`Portal`'s stats, and any change to the
mirror :class:`Portal`'s stats will affect the placed
:class:`Portal`.
"""
if isinstance(origin, Node):
origin = origin.name
if isinstance(destination, Node):
destination = destination.name
super().add_edge(origin, destination, **kwargs)
if symmetrical:
self.add_portal(destination, origin, is_mirror=True)
|
Connect the origin to the destination with a :class:`Portal`.
Keyword arguments are the :class:`Portal`'s
attributes. Exception: if keyword ``symmetrical`` == ``True``,
a mirror-:class:`Portal` will be placed in the opposite
direction between the same nodes. It will always appear to
have the placed :class:`Portal`'s stats, and any change to the
mirror :class:`Portal`'s stats will affect the placed
:class:`Portal`.
|
def mark_dead(self, proxy, _time=None):
""" Mark a proxy as dead """
if proxy not in self.proxies:
logger.warn("Proxy <%s> was not found in proxies list" % proxy)
return
if proxy in self.good:
logger.debug("GOOD proxy became DEAD: <%s>" % proxy)
else:
logger.debug("Proxy <%s> is DEAD" % proxy)
self.unchecked.discard(proxy)
self.good.discard(proxy)
self.dead.add(proxy)
now = _time or time.time()
state = self.proxies[proxy]
state.backoff_time = self.backoff(state.failed_attempts)
state.next_check = now + state.backoff_time
state.failed_attempts += 1
|
Mark a proxy as dead
|
def get_group_hidden(self):
"""Determine if the entire group of elements is hidden
(decide whether to hide the entire group).
"""
# Loop through all the elements in the group.
for element in self.group_list:
# Handle element that is not hidden or has a form.
if element.form.view_type != 'none':
return False
# Loop through the children to make sure elements aren't hidden.
for child_element in element.children:
# Handle child element that is not hidden or has a form.
if child_element.form.view_type != 'none':
return False
return True
|
Determine if the entire group of elements is hidden
(decide whether to hide the entire group).
|
def execute(self, resource, **kw):
"""
Execute the task and return a TaskOperationPoller.
:rtype: TaskOperationPoller
"""
params = kw.pop('params', {})
json = kw.pop('json', None)
task = self.make_request(
TaskRunFailed,
method='create',
params=params,
json=json,
resource=resource)
timeout = kw.pop('timeout', 5)
wait_for_finish = kw.pop('wait_for_finish', True)
return TaskOperationPoller(
task=task, timeout=timeout,
wait_for_finish=wait_for_finish,
**kw)
|
Execute the task and return a TaskOperationPoller.
:rtype: TaskOperationPoller
|
def api_reference(root_url, service, version):
"""Generate URL for a Taskcluster api reference."""
root_url = root_url.rstrip('/')
if root_url == OLD_ROOT_URL:
return 'https://references.taskcluster.net/{}/{}/api.json'.format(service, version)
else:
return '{}/references/{}/{}/api.json'.format(root_url, service, version)
|
Generate URL for a Taskcluster api reference.
|
def get_oauth_access_token(url_base, client_id, client_secret, company_id, user_id, user_type):
""" Retrieves OAuth 2.0 access token using the client credentials grant.
Args:
url_base (str): Oauth2 access token endpoint
client_id (str): client ID
client_secret (str): client secret
company_id (str): SAP company ID
user_id (str): SAP user ID
user_type (str): type of SAP user (admin or user)
Returns:
tuple: Tuple containing access token string and expiration datetime.
Raises:
HTTPError: If we received a failure response code from SAP SuccessFactors.
RequestException: If an unexpected response format was received that we could not parse.
"""
SAPSuccessFactorsGlobalConfiguration = apps.get_model( # pylint: disable=invalid-name
'sap_success_factors',
'SAPSuccessFactorsGlobalConfiguration'
)
global_sap_config = SAPSuccessFactorsGlobalConfiguration.current()
url = url_base + global_sap_config.oauth_api_path
response = requests.post(
url,
json={
'grant_type': 'client_credentials',
'scope': {
'userId': user_id,
'companyId': company_id,
'userType': user_type,
'resourceType': 'learning_public_api',
}
},
auth=(client_id, client_secret),
headers={'content-type': 'application/json'}
)
response.raise_for_status()
data = response.json()
try:
return data['access_token'], datetime.datetime.utcfromtimestamp(data['expires_in'] + int(time.time()))
except KeyError:
raise requests.RequestException(response=response)
|
Retrieves OAuth 2.0 access token using the client credentials grant.
Args:
url_base (str): Oauth2 access token endpoint
client_id (str): client ID
client_secret (str): client secret
company_id (str): SAP company ID
user_id (str): SAP user ID
user_type (str): type of SAP user (admin or user)
Returns:
tuple: Tuple containing access token string and expiration datetime.
Raises:
HTTPError: If we received a failure response code from SAP SuccessFactors.
RequestException: If an unexpected response format was received that we could not parse.
|
def print_clusters(fastas, info, ANI):
"""
choose represenative genome and
print cluster information
*if ggKbase table is provided, use SCG info to choose best genome
"""
header = ['#cluster', 'num. genomes', 'rep.', 'genome', '#SCGs', '#SCG duplicates', \
'genome size (bp)', 'fragments', 'list']
yield header
in_cluster = []
for cluster_num, cluster in enumerate(connected_components(ANI)):
cluster = sorted([genome_info(genome, info[genome]) \
for genome in cluster], \
key = lambda x: x[0:], reverse = True)
rep = cluster[0][-1]
cluster = [i[-1] for i in cluster]
size = len(cluster)
for genome in cluster:
in_cluster.append(genome)
try:
stats = [size, rep, genome, \
info[genome]['#SCGs'], info[genome]['#SCG duplicates'], \
info[genome]['genome size (bp)'], info[genome]['# contigs'], cluster]
except:
stats = [size, rep, genome, \
'n/a', 'n/a', \
info[genome]['genome size (bp)'], info[genome]['# contigs'], cluster]
if rep == genome:
stats = ['*%s' % (cluster_num)] + stats
else:
stats = [cluster_num] + stats
yield stats
# print singletons
try:
start = cluster_num + 1
except:
start = 0
fastas = set([i.rsplit('.', 1)[0].rsplit('/', 1)[-1].rsplit('.contigs')[0] for i in fastas])
for cluster_num, genome in \
enumerate(fastas.difference(set(in_cluster)), start):
try:
stats = ['*%s' % (cluster_num), 1, genome, genome, \
info[genome]['#SCGs'], info[genome]['#SCG duplicates'], \
info[genome]['genome size (bp)'], info[genome]['# contigs'], [genome]]
except:
stats = ['*%s' % (cluster_num), 1, genome, genome, \
'n/a', 'n/a', \
info[genome]['genome size (bp)'], info[genome]['# contigs'], [genome]]
yield stats
|
choose represenative genome and
print cluster information
*if ggKbase table is provided, use SCG info to choose best genome
|
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited)
|
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
|
def get_request_params(self) -> List[ExtensionParameter]:
"""
Build request parameters.
"""
return _build_parameters(
self.server_no_context_takeover,
self.client_no_context_takeover,
self.server_max_window_bits,
self.client_max_window_bits,
)
|
Build request parameters.
|
def load(self, key, noexpire=None):
'''Lookup an item in the cache and return the raw content of
the file as a string.'''
with self.load_fd(key, noexpire=noexpire) as fd:
return fd.read()
|
Lookup an item in the cache and return the raw content of
the file as a string.
|
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
"""
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
return d
|
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
|
def inactive_response(self, request):
"""
Return an inactive message.
"""
inactive_url = getattr(settings, 'LOGIN_INACTIVE_REDIRECT_URL', '')
if inactive_url:
return HttpResponseRedirect(inactive_url)
else:
return self.error_to_response(request, {'error': _("This user account is marked as inactive.")})
|
Return an inactive message.
|
def _construct_state_machines(self):
""" :return: dict in format <state_machine_common_name: instance_of_the_state_machine> """
state_machines = dict()
for state_machine in [StateMachineRecomputing(self.logger, self),
StateMachineContinuous(self.logger, self),
StateMachineDiscrete(self.logger, self),
StateMachineFreerun(self.logger)]:
state_machines[state_machine.name] = state_machine
return state_machines
|
:return: dict in format <state_machine_common_name: instance_of_the_state_machine>
|
def apply(self, df):
"""Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`.
"""
if hasattr(self.definition, '__call__'):
r = self.definition(df)
elif self.definition in df.columns:
r = df[self.definition]
elif not isinstance(self.definition, string_types):
r = pd.Series(self.definition, index=df.index)
else:
raise ValueError("Invalid column definition: %s" % str(self.definition))
return r.astype(self.astype) if self.astype else r
|
Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`.
|
def unique(iterable, key=identity):
"""Yields all the unique values in an iterable maintaining order"""
seen = set()
for item in iterable:
item_key = key(item)
if item_key not in seen:
seen.add(item_key)
yield item
|
Yields all the unique values in an iterable maintaining order
|
def calendar(self, val):
"""
Update ``self._calendar_i``if ``self.calendar`` changes.
"""
self._calendar = val
if val is not None and not val.empty:
self._calendar_i = self._calendar.set_index("service_id")
else:
self._calendar_i = None
|
Update ``self._calendar_i``if ``self.calendar`` changes.
|
def load_from_db(self, cache=False):
"""Return a dictionary of preferences by section directly from DB"""
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except KeyError:
db_pref = self.create_db_pref(
section=preference.section.name,
name=preference.name,
value=preference.get('default'))
else:
# cache if create_db_pref() hasn't already done so
if cache:
self.to_cache(db_pref)
a[preference.identifier()] = db_pref.value
return a
|
Return a dictionary of preferences by section directly from DB
|
def get_all_targets(self):
"""Returns all targets for all batches of this Executor."""
result = []
for batch in self.batches:
result.extend(batch.targets)
return result
|
Returns all targets for all batches of this Executor.
|
def put(self, url: StrOrURL,
*, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP PUT request."""
return _RequestContextManager(
self._request(hdrs.METH_PUT, url,
data=data,
**kwargs))
|
Perform HTTP PUT request.
|
def tag_array(events):
"""
Return a numpy array mapping events to tags
- Rows corresponds to events
- Columns correspond to tags
"""
all_tags = sorted(set(tag for event in events for tag in event.tags))
array = np.zeros((len(events), len(all_tags)))
for row, event in enumerate(events):
for tag in event.tags:
array[row, all_tags.index(tag)] = 1
return array
|
Return a numpy array mapping events to tags
- Rows corresponds to events
- Columns correspond to tags
|
def _neg_bounded_fun(fun, bounds, x, args=()):
"""
Wrapper for bounding and taking the negative of `fun` for the
Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be minimized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables at which `fun` is
to be evaluated.
args : tuple, optional
Extra arguments passed to the objective function.
Returns
----------
scalar
`-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise.
"""
if _check_bounds(x, bounds):
return -fun(x, *args)
else:
return np.inf
|
Wrapper for bounding and taking the negative of `fun` for the
Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be minimized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables at which `fun` is
to be evaluated.
args : tuple, optional
Extra arguments passed to the objective function.
Returns
----------
scalar
`-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise.
|
def load_metadata_csv(input_filepath):
"""
Return dict of metadata.
Format is either dict (filenames are keys) or dict-of-dicts (project member
IDs as top level keys, then filenames as keys).
:param input_filepath: This field is the filepath of the csv file.
"""
with open(input_filepath) as f:
csv_in = csv.reader(f)
header = next(csv_in)
if 'tags' in header:
tags_idx = header.index('tags')
else:
raise ValueError('"tags" is a compulsory column in metadata file.')
if header[0] == 'project_member_id':
if header[1] == 'filename':
metadata = load_metadata_csv_multi_user(csv_in, header,
tags_idx)
else:
raise ValueError('The second column must be "filename"')
elif header[0] == 'filename':
metadata = load_metadata_csv_single_user(csv_in, header, tags_idx)
else:
raise ValueError('Incorrect Formatting of metadata. The first' +
' column for single user upload should be' +
' "filename". For multiuser uploads the first ' +
'column should be "project member id" and the' +
' second column should be "filename"')
return metadata
|
Return dict of metadata.
Format is either dict (filenames are keys) or dict-of-dicts (project member
IDs as top level keys, then filenames as keys).
:param input_filepath: This field is the filepath of the csv file.
|
def _validate(self, validator, data, key, position=None, includes=None):
"""
Run through a schema and a data structure,
validating along the way.
Ignores fields that are in the data structure, but not in the schema.
Returns an array of errors.
"""
errors = []
if position:
position = '%s.%s' % (position, key)
else:
position = key
try: # Pull value out of data. Data can be a map or a list/sequence
data_item = util.get_value(data, key)
except KeyError: # Oops, that field didn't exist.
if validator.is_optional: # Optional? Who cares.
return errors
# SHUT DOWN EVERTYHING
errors.append('%s: Required field missing' % position)
return errors
return self._validate_item(validator, data_item, position, includes)
|
Run through a schema and a data structure,
validating along the way.
Ignores fields that are in the data structure, but not in the schema.
Returns an array of errors.
|
def __start_waiting_for_events(self):
'''
This waits until the whole chain of callback methods triggered by
"trigger_connection_to_rabbit_etc()" has finished, and then starts
waiting for publications.
This is done by starting the ioloop.
Note: In the pika usage example, these things are both called inside the run()
method, so I wonder if this check-and-wait here is necessary. Maybe not.
But the usage example does not implement a Thread, so it probably blocks during
the opening of the connection. Here, as it is a different thread, the run()
might get called before the __init__ has finished? I'd rather stay on the
safe side, as my experience of threading in Python is limited.
'''
# Start ioloop if connection object ready:
if self.thread._connection is not None:
try:
logdebug(LOGGER, 'Starting ioloop...')
logtrace(LOGGER, 'ioloop is owned by connection %s...', self.thread._connection)
# Tell the main thread that we're now open for events.
# As soon as the thread._connection object is not None anymore, it
# can receive events.
self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events()
self.thread.continue_gently_closing_if_applicable()
self.thread._connection.ioloop.start()
except PIDServerException as e:
raise e
# It seems that some connection problems do not cause
# RabbitMQ to call any callback (on_connection_closed
# or on_connection_error) - it just silently swallows the
# problem.
# So we need to manually trigger reconnection to the next
# host here, which we do by manually calling the callback.
# We start the ioloop, so it can handle the reconnection events,
# or also receive events from the publisher in the meantime.
except Exception as e:
# This catches any error during connection startup and during the entire
# time the ioloop runs, blocks and waits for events.
time_passed = datetime.datetime.now() - self.__start_connect_time
time_passed_seconds = time_passed.total_seconds()
# Some pika errors:
if isinstance(e, pika.exceptions.ProbableAuthenticationError):
errorname = self.__make_error_name(e, 'e.g. wrong user or password')
elif isinstance(e, pika.exceptions.ProbableAccessDeniedError):
errorname = self.__make_error_name(e, 'e.g. wrong virtual host name')
elif isinstance(e, pika.exceptions.IncompatibleProtocolError):
errorname = self.__make_error_name(e, 'e.g. trying TLS/SSL on wrong port')
# Other errors:
else:
errorname = self.__make_error_name(e)
logdebug(LOGGER, 'Unexpected error during event listener\'s lifetime (after %s seconds): %s', time_passed_seconds, errorname)
# Now trigger reconnection:
self.statemachine.set_to_waiting_to_be_available()
self.on_connection_error(self.thread._connection, errorname)
self.thread._connection.ioloop.start()
else:
# I'm quite sure that this cannot happen, as the connection object
# is created in "trigger_connection_...()" and thus exists, no matter
# if the actual connection to RabbitMQ succeeded (yet) or not.
logdebug(LOGGER, 'This cannot happen: Connection object is not ready.')
logerror(LOGGER, 'Cannot happen. Cannot properly start the thread. Connection object is not ready.')
|
This waits until the whole chain of callback methods triggered by
"trigger_connection_to_rabbit_etc()" has finished, and then starts
waiting for publications.
This is done by starting the ioloop.
Note: In the pika usage example, these things are both called inside the run()
method, so I wonder if this check-and-wait here is necessary. Maybe not.
But the usage example does not implement a Thread, so it probably blocks during
the opening of the connection. Here, as it is a different thread, the run()
might get called before the __init__ has finished? I'd rather stay on the
safe side, as my experience of threading in Python is limited.
|
async def shutdown(self, container, force=False):
'''
Shutdown all connections. Exclusive connections created by get_connection will shutdown after release()
'''
p = self._connpool
self._connpool = []
self._shutdown = True
if self._defaultconn:
p.append(self._defaultconn)
self._defaultconn = None
if self._subscribeconn:
p.append(self._subscribeconn)
self._subscribeconn = None
await container.execute_all([self._shutdown_conn(container, o, force)
for o in p])
|
Shutdown all connections. Exclusive connections created by get_connection will shutdown after release()
|
def toIndex(self, value):
'''
toIndex - An optional method which will return the value prepped for index.
By default, "toStorage" will be called. If you provide "hashIndex=True" on the constructor,
the field will be md5summed for indexing purposes. This is useful for large strings, etc.
'''
if self._isIrNull(value):
ret = IR_NULL_STR
else:
ret = self._toIndex(value)
if self.isIndexHashed is False:
return ret
return md5(tobytes(ret)).hexdigest()
|
toIndex - An optional method which will return the value prepped for index.
By default, "toStorage" will be called. If you provide "hashIndex=True" on the constructor,
the field will be md5summed for indexing purposes. This is useful for large strings, etc.
|
def status_for_all_orders_in_a_stock(self, stock):
"""Status for all orders in a stock
https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock
"""
url_fragment = 'venues/{venue}/accounts/{account}/stocks/{stock}/orders'.format(
stock=stock,
venue=self.venue,
account=self.account,
)
url = urljoin(self.base_url, url_fragment)
return self.session.get(url).json()
|
Status for all orders in a stock
https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock
|
def predict(self, choosers, alternatives, debug=False):
"""
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
"""
self.assert_fitted()
logger.debug('start: predict LCM model {}'.format(self.name))
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if len(choosers) == 0:
return pd.Series()
if len(alternatives) == 0:
return pd.Series(index=choosers.index)
probabilities = self.probabilities(
choosers, alternatives, filter_tables=False)
if debug:
self.sim_pdf = probabilities
if self.choice_mode == 'aggregate':
choices = unit_choice(
choosers.index.values,
probabilities.index.get_level_values('alternative_id').values,
probabilities.values)
elif self.choice_mode == 'individual':
def mkchoice(probs):
probs.reset_index(0, drop=True, inplace=True)
return np.random.choice(
probs.index.values, p=probs.values / probs.sum())
choices = probabilities.groupby(level='chooser_id', sort=False)\
.apply(mkchoice)
else:
raise ValueError(
'Unrecognized choice_mode option: {}'.format(self.choice_mode))
logger.debug('finish: predict LCM model {}'.format(self.name))
return choices
|
Choose from among alternatives for a group of agents.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
debug : bool
If debug is set to true, will set the variable "sim_pdf" on
the object to store the probabilities for mapping of the
outcome.
Returns
-------
choices : pandas.Series
Mapping of chooser ID to alternative ID. Some choosers
will map to a nan value when there are not enough alternatives
for all the choosers.
|
def _get_or_create_uaa(self, uaa):
"""
Returns a valid UAA instance for performing administrative functions
on services.
"""
if isinstance(uaa, predix.admin.uaa.UserAccountAuthentication):
return uaa
logging.debug("Initializing a new UAA")
return predix.admin.uaa.UserAccountAuthentication()
|
Returns a valid UAA instance for performing administrative functions
on services.
|
def ccor(alt, r, h1, zh):
"""
/* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS
* ALT - altitude
* R - target ratio
* H1 - transition scale length
* ZH - altitude of 1/2 R
*/
"""
e = (alt - zh) / h1
if(e>70.0):
return 1.0 # exp(0) # pragma: no cover
elif (e < -70.0):
return exp(r)
ex = exp(e)
e = r / (1.0 + ex)
return exp(e)
|
/* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS
* ALT - altitude
* R - target ratio
* H1 - transition scale length
* ZH - altitude of 1/2 R
*/
|
def get_default_config_file(rootdir=None):
"""Search for configuration file."""
if rootdir is None:
return DEFAULT_CONFIG_FILE
for path in CONFIG_FILES:
path = os.path.join(rootdir, path)
if os.path.isfile(path) and os.access(path, os.R_OK):
return path
|
Search for configuration file.
|
def _learn(# mutated args
permanences, rng,
# activity
activeCells, activeInput, growthCandidateInput,
# configuration
sampleSize, initialPermanence, permanenceIncrement,
permanenceDecrement, connectedPermanence):
"""
For each active cell, reinforce active synapses, punish inactive synapses,
and grow new synapses to a subset of the active input bits that the cell
isn't already connected to.
Parameters:
----------------------------
@param permanences (SparseMatrix)
Matrix of permanences, with cells as rows and inputs as columns
@param rng (Random)
Random number generator
@param activeCells (sorted sequence)
Sorted list of the cells that are learning
@param activeInput (sorted sequence)
Sorted list of active bits in the input
@param growthCandidateInput (sorted sequence)
Sorted list of active bits in the input that the activeCells may
grow new synapses to
For remaining parameters, see the __init__ docstring.
"""
permanences.incrementNonZerosOnOuter(
activeCells, activeInput, permanenceIncrement)
permanences.incrementNonZerosOnRowsExcludingCols(
activeCells, activeInput, -permanenceDecrement)
permanences.clipRowsBelowAndAbove(
activeCells, 0.0, 1.0)
if sampleSize == -1:
permanences.setZerosOnOuter(
activeCells, activeInput, initialPermanence)
else:
existingSynapseCounts = permanences.nNonZerosPerRowOnCols(
activeCells, activeInput)
maxNewByCell = numpy.empty(len(activeCells), dtype="int32")
numpy.subtract(sampleSize, existingSynapseCounts, out=maxNewByCell)
permanences.setRandomZerosOnOuter(
activeCells, growthCandidateInput, maxNewByCell, initialPermanence, rng)
|
For each active cell, reinforce active synapses, punish inactive synapses,
and grow new synapses to a subset of the active input bits that the cell
isn't already connected to.
Parameters:
----------------------------
@param permanences (SparseMatrix)
Matrix of permanences, with cells as rows and inputs as columns
@param rng (Random)
Random number generator
@param activeCells (sorted sequence)
Sorted list of the cells that are learning
@param activeInput (sorted sequence)
Sorted list of active bits in the input
@param growthCandidateInput (sorted sequence)
Sorted list of active bits in the input that the activeCells may
grow new synapses to
For remaining parameters, see the __init__ docstring.
|
def dereference(self, data, host=None):
"""Dereferences RefObjects stuck in the hierarchy. This is a bit
of an ugly hack."""
return self.deep_decode(self.deep_encode(data, host), deref=True)
|
Dereferences RefObjects stuck in the hierarchy. This is a bit
of an ugly hack.
|
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the `ReplaceTemplate` or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m)
|
Expand with either the `ReplaceTemplate` or compile on the fly, or return None.
|
def get_lm_challenge_response(self):
"""
[MS-NLMP] v28.0 2016-07-14
3.3.1 - NTLM v1 Authentication
3.3.2 - NTLM v2 Authentication
This method returns the LmChallengeResponse key based on the ntlm_compatibility chosen
and the target_info supplied by the CHALLENGE_MESSAGE. It is quite different from what
is set in the document as it combines the NTLMv1, NTLM2 and NTLMv2 methods into one
and calls separate methods based on the ntlm_compatibility flag chosen.
:return: response (LmChallengeResponse) - The LM response to the server challenge. Computed by the client
"""
if self._negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY and self._ntlm_compatibility < 3:
response = ComputeResponse._get_LMv1_with_session_security_response(self._client_challenge)
elif 0 <= self._ntlm_compatibility <= 1:
response = ComputeResponse._get_LMv1_response(self._password, self._server_challenge)
elif self._ntlm_compatibility == 2:
# Based on the compatibility level we don't want to use LM responses, ignore the session_base_key as it is returned in nt
response, ignore_key = ComputeResponse._get_NTLMv1_response(self._password, self._server_challenge)
else:
"""
[MS-NLMP] v28.0 page 45 - 2016-07-14
3.1.5.12 Client Received a CHALLENGE_MESSAGE from the Server
If NTLMv2 authentication is used and the CHALLENGE_MESSAGE TargetInfo field has an MsvAvTimestamp present,
the client SHOULD NOT send the LmChallengeResponse and SHOULD send Z(24) instead.
"""
response = ComputeResponse._get_LMv2_response(self._user_name, self._password, self._domain_name,
self._server_challenge,
self._client_challenge)
if self._server_target_info is not None:
timestamp = self._server_target_info[TargetInfo.MSV_AV_TIMESTAMP]
if timestamp is not None:
response = b'\0' * 24
return response
|
[MS-NLMP] v28.0 2016-07-14
3.3.1 - NTLM v1 Authentication
3.3.2 - NTLM v2 Authentication
This method returns the LmChallengeResponse key based on the ntlm_compatibility chosen
and the target_info supplied by the CHALLENGE_MESSAGE. It is quite different from what
is set in the document as it combines the NTLMv1, NTLM2 and NTLMv2 methods into one
and calls separate methods based on the ntlm_compatibility flag chosen.
:return: response (LmChallengeResponse) - The LM response to the server challenge. Computed by the client
|
def load_and_assign_npz_dict(name='model.npz', sess=None):
"""Restore the parameters saved by ``tl.files.save_npz_dict()``.
Parameters
----------
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session.
"""
if sess is None:
raise ValueError("session is None.")
if not os.path.exists(name):
logging.error("file {} doesn't exist.".format(name))
return False
params = np.load(name)
if len(params.keys()) != len(set(params.keys())):
raise Exception("Duplication in model npz_dict %s" % name)
ops = list()
for key in params.keys():
try:
# tensor = tf.get_default_graph().get_tensor_by_name(key)
# varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=key)
varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key)
if len(varlist) > 1:
raise Exception("[!] Multiple candidate variables to be assigned for name %s" % key)
elif len(varlist) == 0:
raise KeyError
else:
ops.append(varlist[0].assign(params[key]))
logging.info("[*] params restored: %s" % key)
except KeyError:
logging.info("[!] Warning: Tensor named %s not found in network." % key)
sess.run(ops)
logging.info("[*] Model restored from npz_dict %s" % name)
|
Restore the parameters saved by ``tl.files.save_npz_dict()``.
Parameters
----------
name : str
The name of the `.npz` file.
sess : Session
TensorFlow Session.
|
def rename(name, new_name, root=None):
'''
Change the username for a named user
name
User to modify
new_name
New value of the login name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.rename name new_name
'''
if info(new_name, root=root):
raise CommandExecutionError('User \'{0}\' already exists'.format(new_name))
return _chattrib(name, 'name', new_name, '-l', root=root)
|
Change the username for a named user
name
User to modify
new_name
New value of the login name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.rename name new_name
|
def map(self, func):
"""
Return a dictionary of the results of func applied to each
of the segmentlist objects in self.
Example:
>>> x = segmentlistdict()
>>> x["H1"] = segmentlist([segment(0, 10)])
>>> x["H2"] = segmentlist([segment(5, 15)])
>>> x.map(lambda l: 12 in l)
{'H2': True, 'H1': False}
"""
return dict((key, func(value)) for key, value in self.iteritems())
|
Return a dictionary of the results of func applied to each
of the segmentlist objects in self.
Example:
>>> x = segmentlistdict()
>>> x["H1"] = segmentlist([segment(0, 10)])
>>> x["H2"] = segmentlist([segment(5, 15)])
>>> x.map(lambda l: 12 in l)
{'H2': True, 'H1': False}
|
def list_tables(self, limit=None, start_table=None):
"""
Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
"""
result = self.layer1.list_tables(limit, start_table)
return result['TableNames']
|
Return a list of the names of all Tables associated with the
current account and region.
TODO - Layer2 should probably automatically handle pagination.
:type limit: int
:param limit: The maximum number of tables to return.
:type start_table: str
:param limit: The name of the table that starts the
list. If you ran a previous list_tables and not
all results were returned, the response dict would
include a LastEvaluatedTableName attribute. Use
that value here to continue the listing.
|
def create(self, serviceBinding):
"""
Create a new external service.
The service must include all of the details required to connect
and authenticate to the external service in the credentials property.
Parameters:
- serviceName (string) - Name of the service
- serviceType (string) - must be either eventstreams or cloudant
- credentials (json object) - Should have a valid structure for the service type.
- description (string) - description of the service
Throws APIException on failure
"""
if not isinstance(serviceBinding, ServiceBindingCreateRequest):
if serviceBinding["type"] == "cloudant":
serviceBinding = CloudantServiceBindingCreateRequest(**serviceBinding)
elif serviceBinding["type"] == "eventstreams":
serviceBinding = EventStreamsServiceBindingCreateRequest(**serviceBinding)
else:
raise Exception("Unsupported service binding type")
url = "api/v0002/s2s/services"
r = self._apiClient.post(url, data=serviceBinding)
if r.status_code == 201:
return ServiceBinding(**r.json())
else:
raise ApiException(r)
|
Create a new external service.
The service must include all of the details required to connect
and authenticate to the external service in the credentials property.
Parameters:
- serviceName (string) - Name of the service
- serviceType (string) - must be either eventstreams or cloudant
- credentials (json object) - Should have a valid structure for the service type.
- description (string) - description of the service
Throws APIException on failure
|
def getDynDnsClientForConfig(config, plugins=None):
"""Instantiate and return a complete and working dyndns client.
:param config: a dictionary with configuration keys
:param plugins: an object that implements PluginManager
"""
initparams = {}
if "interval" in config:
initparams["detect_interval"] = config["interval"]
if plugins is not None:
initparams["plugins"] = plugins
if "updater" in config:
for updater_name, updater_options in config["updater"]:
initparams["updater"] = get_updater_class(updater_name)(**updater_options)
# find class and instantiate the detector:
if "detector" in config:
detector_name, detector_opts = config["detector"][-1]
try:
klass = get_detector_class(detector_name)
except KeyError as exc:
LOG.warning("Invalid change detector configuration: '%s'",
detector_name, exc_info=exc)
return None
thedetector = klass(**detector_opts)
initparams["detector"] = thedetector
return DynDnsClient(**initparams)
|
Instantiate and return a complete and working dyndns client.
:param config: a dictionary with configuration keys
:param plugins: an object that implements PluginManager
|
def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None):
'''
Return the final state output.
ret
The initial state output structure.
loaded
The loaded dictionary.
'''
# Always get the comment
changes = {}
ret['comment'] = loaded['comment']
if 'diff' in loaded:
changes['diff'] = loaded['diff']
if 'commit_id' in loaded:
changes['commit_id'] = loaded['commit_id']
if 'compliance_report' in loaded:
if compliance_report:
changes['compliance_report'] = loaded['compliance_report']
if debug and 'loaded_config' in loaded:
changes['loaded_config'] = loaded['loaded_config']
if changes.get('diff'):
ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'],
diff=changes['diff'])
if changes.get('loaded_config'):
ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format(
comment_base=ret['comment'],
loaded_cfg=changes['loaded_config'])
if changes.get('compliance_report'):
ret['comment'] = '{comment_base}\n\nCompliance report:\n\n{compliance}'.format(
comment_base=ret['comment'],
compliance=salt.output.string_format(changes['compliance_report'], 'nested', opts=opts))
if not loaded.get('result', False):
# Failure of some sort
return ret
if not loaded.get('already_configured', True):
# We're making changes
if test:
ret['result'] = None
return ret
# Not test, changes were applied
ret.update({
'result': True,
'changes': changes,
'comment': "Configuration changed!\n{}".format(loaded['comment'])
})
return ret
# No changes
ret.update({
'result': True,
'changes': {}
})
return ret
|
Return the final state output.
ret
The initial state output structure.
loaded
The loaded dictionary.
|
def get_root_families(self):
"""Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bins
if self._catalog_session is not None:
return self._catalog_session.get_root_catalogs()
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(list(self.get_root_family_ids()))
|
Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
|
def get_flux(self, reaction):
"""Get resulting flux value for reaction."""
return self._prob.result.get_value(self._v(reaction))
|
Get resulting flux value for reaction.
|
def create_powerflow_problem(timerange, components):
"""
Create PyPSA network object and fill with data
Parameters
----------
timerange: Pandas DatetimeIndex
Time range to be analyzed by PF
components: dict
Returns
-------
network: PyPSA powerflow problem object
"""
# initialize powerflow problem
network, snapshots = init_pypsa_network(timerange)
# add components to network
for component in components.keys():
network.import_components_from_dataframe(components[component],
component)
return network, snapshots
|
Create PyPSA network object and fill with data
Parameters
----------
timerange: Pandas DatetimeIndex
Time range to be analyzed by PF
components: dict
Returns
-------
network: PyPSA powerflow problem object
|
def process_amqp_msgs(self):
"""Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
"""
LOG.info('Starting process_amqp_msgs...')
while True:
(mtd_fr, hdr_fr, body) = (None, None, None)
try:
if self.consume_channel:
(mtd_fr, hdr_fr, body) = self.consume_channel.basic_get(
self._dcnm_queue_name)
if mtd_fr:
# Queue has messages.
LOG.info('RX message: %s', body)
self._cb_dcnm_msg(mtd_fr, body)
self.consume_channel.basic_ack(mtd_fr.delivery_tag)
else:
# Queue is empty.
try:
self._conn.sleep(1)
except AttributeError:
time.sleep(1)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_str = traceback.format_exception(exc_type,
exc_value, exc_tb)
LOG.exception("Failed to read from queue: %(queue)s "
"%(exc_type)s, %(exc_value)s, %(exc_tb)s.", {
'queue': self._dcnm_queue_name,
'exc_type': exc_type,
'exc_value': exc_value,
'exc_tb': tb_str})
|
Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
|
def make_rendition(self, width, height):
'''build a rendition
0 x 0 -> will give master URL
only width -> will make a renditions with master's aspect ratio
width x height -> will make an image potentialy cropped
'''
image = Image.open(self.master)
format = image.format
target_w = float(width)
target_h = float(height)
if (target_w == 0):
target_w = self.master_width
if (target_h == 0):
target_h = self.master_height
rendition_key = '%dx%d' % (target_w, target_h)
if rendition_key in self.renditions:
return self.renditions[rendition_key]
if (target_w != self.master_width or target_h != self.master_height):
r = target_w / target_h
R = float(self.master_width) / self.master_height
if r != R:
if r > R:
crop_w = self.master_width
crop_h = crop_w / r
x = 0
y = int(self.master_height - crop_h) >> 1
else:
crop_h = self.master_height
crop_w = crop_h * r
x = int(self.master_width - crop_w) >> 1
y = 0
image = image.crop((x, y, int(crop_w + x), int(crop_h + y)))
image.thumbnail((int(target_w), int(target_h)), Image.ANTIALIAS)
filename, ext = os.path.splitext(self.get_master_filename())
rendition_name = '%s/%s_%s%s' % (
IMAGE_DIRECTORY,
filename,
rendition_key,
ext
)
fd = BytesIO()
image.save(fd, format)
default_storage.save(rendition_name, fd)
self.renditions[rendition_key] = rendition_name
self.save()
return rendition_name
return self.master.name
|
build a rendition
0 x 0 -> will give master URL
only width -> will make a renditions with master's aspect ratio
width x height -> will make an image potentialy cropped
|
def add_precip_file(self, precip_file_path, interpolation_type=None):
"""
Adds a precip file to project with interpolation_type
"""
# precip file read in
self._update_card('PRECIP_FILE', precip_file_path, True)
if interpolation_type is None:
# check if precip type exists already in card
if not self.project_manager.getCard('RAIN_INV_DISTANCE') \
and not self.project_manager.getCard('RAIN_THIESSEN'):
# if no type exists, then make it theissen
self._update_card('RAIN_THIESSEN', '')
else:
if interpolation_type.upper() not in self.PRECIP_INTERP_TYPES:
raise IndexError("Invalid interpolation_type {0}".format(interpolation_type))
interpolation_type = interpolation_type.upper()
if interpolation_type == "INV_DISTANCE":
self._update_card('RAIN_INV_DISTANCE', '')
self.project_manager.deleteCard('RAIN_THIESSEN', self.db_session)
else:
self._update_card('RAIN_THIESSEN', '')
self.project_manager.deleteCard('RAIN_INV_DISTANCE', self.db_session)
|
Adds a precip file to project with interpolation_type
|
def info():
"""
Generate information for a bug report.
Based on the requests package help utility module.
"""
try:
platform_info = {"system": platform.system(), "release": platform.release()}
except IOError:
platform_info = {"system": "Unknown", "release": "Unknown"}
implementation = platform.python_implementation()
if implementation == "CPython":
implementation_version = platform.python_version()
elif implementation == "PyPy":
implementation_version = "%s.%s.%s" % (
sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro,
)
if sys.pypy_version_info.releaselevel != "final":
implementation_version = "".join(
[implementation_version, sys.pypy_version_info.releaselevel]
)
else:
implementation_version = "Unknown"
return {
"platform": platform_info,
"implementation": {"name": implementation, "version": implementation_version},
"cryptography": {"version": getattr(cryptography, "__version__", "")},
"pyjwt": {"version": pyjwt_version},
}
|
Generate information for a bug report.
Based on the requests package help utility module.
|
def _check_uuid_fmt(self):
"""Checks .uuid_fmt, and raises an exception if it is not valid."""
if self.uuid_fmt not in UUIDField.FORMATS:
raise FieldValueRangeException(
"Unsupported uuid_fmt ({})".format(self.uuid_fmt))
|
Checks .uuid_fmt, and raises an exception if it is not valid.
|
def release(ctx, version):
"""
``version`` should be a string like '0.4' or '1.0'.
"""
invoke.run("git tag -s {0} -m '{0} release'".format(version))
invoke.run("git push --tags")
invoke.run("python setup.py sdist")
invoke.run("twine upload -s dist/PyNaCl-{0}* ".format(version))
session = requests.Session()
token = getpass.getpass("Input the Jenkins token: ")
response = session.post(
"{0}/build".format(JENKINS_URL),
params={
"cause": "Building wheels for {0}".format(version),
"token": token
}
)
response.raise_for_status()
wait_for_build_completed(session)
paths = download_artifacts(session)
invoke.run("twine upload {0}".format(" ".join(paths)))
|
``version`` should be a string like '0.4' or '1.0'.
|
def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={},
empty_to_None=[]):
"""Conversion from CSV to PyDbLite
csvfile : name of the CSV file in the file system
out : path for the new PyDbLite base in the file system
fieldnames : list of field names. If set to None, the field names must
be present in the first line of the CSV file
fmtparams : the format parameters for the CSV file, as described in
the csv module of the standard distribution
conv_func is a dictionary mapping a field name to the function used to
convert the string read in the CSV to the appropriate Python type. For
instance if field "age" must be converted to an integer :
conv_func["age"] = int
empty_to_None is a list of the fields such that when the value read in
the CSV file is the empty string, the field value is set to None
"""
import csv
import time
import datetime
if out is None:
out = os.path.splitext(csvfile)[0]+".pdl"
if fieldnames is None:
# read field names in the first line of CSV file
reader = csv.reader(open(csvfile))
fieldnames = reader.next()
reader = csv.DictReader(open(csvfile),fieldnames,fmtparams)
reader.next() # skip first line
db = PyDbLite.Base(out)
conv_func.update({"__id__":int})
auto_id = not "__id__" in fieldnames
fieldnames = [ f for f in fieldnames if not f in ("__id__") ]
kw = {"mode":"override"}
db.create(*fieldnames,**kw)
print db.fields
next_id = 0
records = {}
while True:
try:
record = reader.next()
except StopIteration:
break
if auto_id:
record["__id__"] = next_id
next_id += 1
# replace empty strings by None
for field in empty_to_None:
if not record[field]:
record[field] = None
# type conversion
for field in conv_func:
if not isinstance(conv_func[field],(tuple,list)):
record[field] = conv_func[field](record[field])
else:
# date or datetime
date_class,date_fmt = conv_func[field]
if not record[field]:
record[field] = None
else:
time_tuple = time.strptime(record[field],date_fmt)
if date_class is datetime.date:
time_tuple = time_tuple[:3]
record[field] = date_class(*time_tuple)
records[record["__id__"]] = record
db.records = records
db.commit()
print len(db)
return db
|
Conversion from CSV to PyDbLite
csvfile : name of the CSV file in the file system
out : path for the new PyDbLite base in the file system
fieldnames : list of field names. If set to None, the field names must
be present in the first line of the CSV file
fmtparams : the format parameters for the CSV file, as described in
the csv module of the standard distribution
conv_func is a dictionary mapping a field name to the function used to
convert the string read in the CSV to the appropriate Python type. For
instance if field "age" must be converted to an integer :
conv_func["age"] = int
empty_to_None is a list of the fields such that when the value read in
the CSV file is the empty string, the field value is set to None
|
def index():
"""Display the Scout dashboard."""
accessible_institutes = current_user.institutes
if not 'admin' in current_user.roles:
accessible_institutes = current_user.institutes
if not accessible_institutes:
flash('Not allowed to see information - please visit the dashboard later!')
return redirect(url_for('cases.dahboard_general.html'))
LOG.debug('User accessible institutes: {}'.format(accessible_institutes))
institutes = [inst for inst in store.institutes(accessible_institutes)]
# Insert a entry that displays all institutes in the beginning of the array
institutes.insert(0, {'_id': None, 'display_name': 'All institutes'})
institute_id = None
slice_query = None
panel=1
if request.method=='POST':
institute_id = request.form.get('institute')
slice_query = request.form.get('query')
panel=request.form.get('pane_id')
elif request.method=='GET':
institute_id = request.args.get('institute')
slice_query = request.args.get('query')
# User should be restricted to their own institute if:
#1) Their default institute when the page is first loaded
#2) if they ask for an institute that they don't belong to
#3) if they want perform a query on all institutes
if not institute_id:
institute_id = accessible_institutes[0]
elif (not current_user.is_admin) and (slice_query and institute_id == 'None'):
institute_id = accessible_institutes[0]
elif (not institute_id in accessible_institutes) and not (institute_id == 'None'):
institute_id = accessible_institutes[0]
LOG.info("Fetch all cases with institute: %s", institute_id)
data = get_dashboard_info(store, institute_id, slice_query)
data['institutes'] = institutes
data['choice'] = institute_id
total_cases = data['total_cases']
LOG.info("Found %s cases", total_cases)
if total_cases == 0:
flash('no cases found for institute {} (with that query) - please visit the dashboard later!'.format(institute_id), 'info')
# return redirect(url_for('cases.index'))
return render_template(
'dashboard/dashboard_general.html', institute=institute_id, query=slice_query, panel=panel, **data)
|
Display the Scout dashboard.
|
def refresh(self, only_closed=False):
"""refresh ports status
Args:
only_closed - check status only for closed ports
"""
if only_closed:
opened = filter(self.__check_port, self.__closed)
self.__closed = self.__closed.difference(opened)
self.__ports = self.__ports.union(opened)
else:
ports = self.__closed.union(self.__ports)
self.__ports = set(filter(self.__check_port, ports))
self.__closed = ports.difference(self.__ports)
|
refresh ports status
Args:
only_closed - check status only for closed ports
|
def _summarize_combined(samples, vkey):
"""Prepare summarized CSV and plot files for samples to combine together.
Helps handle cases where we want to summarize over multiple samples.
"""
validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey))
combined, _ = _group_validate_samples(samples, vkey, [["metadata", "validate_combine"]])
for vname, vitems in combined.items():
if vname:
cur_combined = collections.defaultdict(int)
for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))):
validations = [variant.get(vkey) for variant in data.get("variants", [])]
validations = [v for v in validations if v]
if len(validations) == 0 and vkey in data:
validations = [data.get(vkey)]
for validate in validations:
with open(validate["summary"]) as in_handle:
reader = csv.reader(in_handle)
next(reader) # header
for _, caller, vtype, metric, value in reader:
cur_combined[(caller, vtype, metric)] += int(value)
out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname)
with open(out_csv, "w") as out_handle:
writer = csv.writer(out_handle)
header = ["sample", "caller", "vtype", "metric", "value"]
writer.writerow(header)
for (caller, variant_type, category), val in cur_combined.items():
writer.writerow(["combined-%s" % vname, caller, variant_type, category, val])
plots = validateplot.classifyplot_from_valfile(out_csv)
|
Prepare summarized CSV and plot files for samples to combine together.
Helps handle cases where we want to summarize over multiple samples.
|
def cleanup(self):
"""
Attempt to set a new current symlink if it is broken. If no other
prefixes exist and the workdir is empty, try to delete the entire
workdir.
Raises:
:exc:`~MalformedWorkdir`: if no prefixes were found, but the
workdir is not empty.
"""
current = self.join('current')
if not os.path.exists(current):
LOGGER.debug('found broken current symlink, removing: %s', current)
os.unlink(self.join('current'))
self.current = None
try:
self._update_current()
except PrefixNotFound:
if not os.listdir(self.path):
LOGGER.debug('workdir is empty, removing %s', self.path)
os.rmdir(self.path)
else:
raise MalformedWorkdir(
(
'Unable to find any prefixes in {0}, '
'but the directory looks malformed. '
'Try deleting it manually.'
).format(self.path)
)
|
Attempt to set a new current symlink if it is broken. If no other
prefixes exist and the workdir is empty, try to delete the entire
workdir.
Raises:
:exc:`~MalformedWorkdir`: if no prefixes were found, but the
workdir is not empty.
|
def upgrade(*pkgs):
'''
Runs an update operation on the specified packages, or all packages if none is specified.
:type pkgs: list(str)
:param pkgs:
List of packages to update
:return: The upgraded packages. Example element: ``['libxslt-1.1.0', 'libxslt-1.1.10']``
:rtype: list(tuple(str, str))
.. code-block:: bash
salt '*' nix.update
salt '*' nix.update pkgs=one,two
'''
cmd = _quietnix()
cmd.append('--upgrade')
cmd.extend(pkgs)
out = _run(cmd)
upgrades = [_format_upgrade(s.split(maxsplit=1)[1])
for s in out['stderr'].splitlines()
if s.startswith('upgrading')]
return [[_strip_quotes(s_) for s_ in s]
for s in upgrades]
|
Runs an update operation on the specified packages, or all packages if none is specified.
:type pkgs: list(str)
:param pkgs:
List of packages to update
:return: The upgraded packages. Example element: ``['libxslt-1.1.0', 'libxslt-1.1.10']``
:rtype: list(tuple(str, str))
.. code-block:: bash
salt '*' nix.update
salt '*' nix.update pkgs=one,two
|
def interface_lookup(interfaces, hwaddr, address_type):
"""Search the address within the interface list."""
for interface in interfaces.values():
if interface.get('hwaddr') == hwaddr:
for address in interface.get('addrs'):
if address.get('type') == address_type:
return address.get('addr')
|
Search the address within the interface list.
|
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True
|
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
|
def call_runtime(self):
'''
Execute the runtime
'''
cache = self.gather_cache()
chunks = self.get_chunks()
interval = self.opts['thorium_interval']
recompile = self.opts.get('thorium_recompile', 300)
r_start = time.time()
while True:
events = self.get_events()
if not events:
time.sleep(interval)
continue
start = time.time()
self.state.inject_globals['__events__'] = events
self.state.call_chunks(chunks)
elapsed = time.time() - start
left = interval - elapsed
if left > 0:
time.sleep(left)
self.state.reset_run_num()
if (start - r_start) > recompile:
cache = self.gather_cache()
chunks = self.get_chunks()
if self.reg_ret is not None:
self.returners['{0}.save_reg'.format(self.reg_ret)](chunks)
r_start = time.time()
|
Execute the runtime
|
def dataframe(self, spark, group_by='greedy', limit=None, sample=1, seed=42, decode=None, summaries=None, schema=None, table_name=None):
"""Convert RDD returned from records function to a dataframe
:param spark: a SparkSession object
:param group_by: specifies a paritition strategy for the objects
:param limit: maximum number of objects to retrieve
:param decode: an optional transformation to apply to the objects retrieved
:param sample: percentage of results to return. Useful to return a sample
of the dataset. This parameter is ignored when 'limit' is set.
:param seed: initialize internal state of the random number generator (42 by default).
This is used to make the dataset sampling reproducible. It an be set to None to obtain
different samples.
:param summaries: an iterable containing the summary for each item in the dataset. If None, it
will compute calling the summaries dataset.
:param schema: a Spark schema that overrides automatic conversion to a dataframe
:param table_name: allows resulting dataframe to easily be queried using SparkSQL
:return: a Spark DataFrame
"""
rdd = self.records(spark.sparkContext, group_by, limit, sample, seed, decode, summaries)
if not schema:
df = rdd.map(lambda d: Row(**d)).toDF()
else:
df = spark.createDataFrame(rdd, schema=schema)
if table_name:
df.createOrReplaceTempView(table_name)
return df
|
Convert RDD returned from records function to a dataframe
:param spark: a SparkSession object
:param group_by: specifies a paritition strategy for the objects
:param limit: maximum number of objects to retrieve
:param decode: an optional transformation to apply to the objects retrieved
:param sample: percentage of results to return. Useful to return a sample
of the dataset. This parameter is ignored when 'limit' is set.
:param seed: initialize internal state of the random number generator (42 by default).
This is used to make the dataset sampling reproducible. It an be set to None to obtain
different samples.
:param summaries: an iterable containing the summary for each item in the dataset. If None, it
will compute calling the summaries dataset.
:param schema: a Spark schema that overrides automatic conversion to a dataframe
:param table_name: allows resulting dataframe to easily be queried using SparkSQL
:return: a Spark DataFrame
|
def _validate_type(cls, typeobj):
"""
Validate that all required type methods are implemented.
At minimum a type must have:
- a convert() or convert_binary() function
- a default_formatter() function
Raises an ArgumentError if the type is not valid
"""
if not (hasattr(typeobj, "convert") or hasattr(typeobj, "convert_binary")):
raise ArgumentError("type is invalid, does not have convert or convert_binary function", type=typeobj, methods=dir(typeobj))
if not hasattr(typeobj, "default_formatter"):
raise ArgumentError("type is invalid, does not have default_formatter function", type=typeobj, methods=dir(typeobj))
|
Validate that all required type methods are implemented.
At minimum a type must have:
- a convert() or convert_binary() function
- a default_formatter() function
Raises an ArgumentError if the type is not valid
|
def _create_event(instance, action):
"""
Create a new event, getting the use if django-cuser is available.
"""
user = None
user_repr = repr(user)
if CUSER:
user = CuserMiddleware.get_user()
user_repr = repr(user)
if user is not None and user.is_anonymous:
user = None
return TrackingEvent.objects.create(
action=action,
object=instance,
object_repr=repr(instance),
user=user,
user_repr=user_repr,
)
|
Create a new event, getting the use if django-cuser is available.
|
def decode_body(cls, header, f):
"""Generates a `MqttPingresp` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `pingresp`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttPingresp
Object extracted from ``f``.
"""
assert header.packet_type == MqttControlPacketType.pingresp
if header.remaining_len != 0:
raise DecodeError('Extra bytes at end of packet.')
return 0, MqttPingresp()
|
Generates a `MqttPingresp` packet given a
`MqttFixedHeader`. This method asserts that header.packet_type
is `pingresp`.
Parameters
----------
header: MqttFixedHeader
f: file
Object with a read method.
Raises
------
DecodeError
When there are extra bytes at the end of the packet.
Returns
-------
int
Number of bytes consumed from ``f``.
MqttPingresp
Object extracted from ``f``.
|
def validate_token(self, request, consumer, token):
"""
Check the token and raise an `oauth.Error` exception if invalid.
"""
oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)
oauth_server.verify_request(oauth_request, consumer, token)
|
Check the token and raise an `oauth.Error` exception if invalid.
|
def temp_url(self, duration=120):
"""Returns a temporary URL for the given key."""
return self.bucket._boto_s3.meta.client.generate_presigned_url(
'get_object',
Params={'Bucket': self.bucket.name, 'Key': self.name},
ExpiresIn=duration
)
|
Returns a temporary URL for the given key.
|
def _init(self):
"""Read the b"\\r\\n" at the end of the message."""
read_values = []
read = self._file.read
last = read(1)
current = read(1)
while last != b'' and current != b'' and not \
(last == b'\r' and current == b'\n'):
read_values.append(last)
last = current
current = read(1)
if current == b'' and last != b'\r':
read_values.append(last)
self._bytes = b''.join(read_values)
|
Read the b"\\r\\n" at the end of the message.
|
def searchFilesIndex(self, nameData, fileData, fileIndex, searchString, category="", math=False, game=False, extension=""):
"""Search the files index using the namedata and returns the filedata"""
try:
fileFile = open(fileIndex, 'rt')
except IOError:
self.repo.printd("Error: Unable to read index file " + self.fileIndex)
return None, None
count = 1
for line in fileFile:
count += 1
try:
if nameData[count] != None:
#category argument
if category in line:
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
#extension argument
if extension in line:
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
#Both game and math
if (game and math):
if ("/games/" in line or "/math/" in line or "/science" in line):
nameData[count] = line[:len(line) - 1]
else:
nameData[count] = None
#game option switch
elif game:
if "/games/" in line:
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
#math option switch
elif math:
if ("/math/" in line or "/science/" in line):
fileData[count] = line[:len(line) - 1]
else:
nameData[count] = None
fileData[count] = None
except:
pass
#Close the file and return
fileFile.close()
return fileData, nameData
|
Search the files index using the namedata and returns the filedata
|
def getAsKmlGridAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0.0):
"""
Retrieve the WMS dataset as a gridded time stamped KML string.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Create a raster converter
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString = converter.getAsKmlGridAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
noDataValue=noDataValue)
if path:
with open(path, 'w') as f:
f.write(kmlString)
return kmlString
|
Retrieve the WMS dataset as a gridded time stamped KML string.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
Returns:
str: KML string
|
def disable_snapshots(self, volume_id, schedule_type):
"""Disables snapshots for a specific block volume at a given schedule
:param integer volume_id: The id of the volume
:param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY'
:return: Returns whether successfully disabled or not
"""
return self.client.call('Network_Storage', 'disableSnapshots',
schedule_type, id=volume_id)
|
Disables snapshots for a specific block volume at a given schedule
:param integer volume_id: The id of the volume
:param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY'
:return: Returns whether successfully disabled or not
|
def normalize_weekly(data):
"""Normalization for dining menu data"""
if "tblMenu" not in data["result_data"]["Document"]:
data["result_data"]["Document"]["tblMenu"] = []
if isinstance(data["result_data"]["Document"]["tblMenu"], dict):
data["result_data"]["Document"]["tblMenu"] = [data["result_data"]["Document"]["tblMenu"]]
for day in data["result_data"]["Document"]["tblMenu"]:
if "tblDayPart" not in day:
continue
if isinstance(day["tblDayPart"], dict):
day["tblDayPart"] = [day["tblDayPart"]]
for meal in day["tblDayPart"]:
if isinstance(meal["tblStation"], dict):
meal["tblStation"] = [meal["tblStation"]]
for station in meal["tblStation"]:
if isinstance(station["tblItem"], dict):
station["tblItem"] = [station["tblItem"]]
return data
|
Normalization for dining menu data
|
def normalize_name(name):
"""
Given a key name (e.g. "LEFT CONTROL"), clean up the string and convert to
the canonical representation (e.g. "left ctrl") if one is known.
"""
if not name or not isinstance(name, basestring):
raise ValueError('Can only normalize non-empty string names. Unexpected '+ repr(name))
if len(name) > 1:
name = name.lower()
if name != '_' and '_' in name:
name = name.replace('_', ' ')
return canonical_names.get(name, name)
|
Given a key name (e.g. "LEFT CONTROL"), clean up the string and convert to
the canonical representation (e.g. "left ctrl") if one is known.
|
def nCr(n, r):
"""
Calculates nCr.
Args:
n (int): total number of items.
r (int): items to choose
Returns:
nCr.
"""
f = math.factorial
return int(f(n) / f(r) / f(n-r))
|
Calculates nCr.
Args:
n (int): total number of items.
r (int): items to choose
Returns:
nCr.
|
def repl_update(self, config):
"""Reconfig Replicaset with new config"""
cfg = config.copy()
cfg['version'] += 1
try:
result = self.run_command("replSetReconfig", cfg)
if int(result.get('ok', 0)) != 1:
return False
except pymongo.errors.AutoReconnect:
self.update_server_map(cfg) # use new server_map
self.waiting_member_state()
self.waiting_config_state()
return self.connection() and True
|
Reconfig Replicaset with new config
|
def _mean_prediction(self, mu, Y, h, t_z):
""" Creates a h-step ahead mean prediction
Parameters
----------
mu : np.ndarray
The past predicted values
Y : np.ndarray
The past data
h : int
How many steps ahead for the prediction
t_z : np.ndarray
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
Y_exp = Y.copy()
# Loop over h time periods
for t in range(0,h):
if self.ar != 0:
Y_exp_normalized = (Y_exp[-self.ar:][::-1] - self._norm_mean) / self._norm_std
new_value = self.predict_new(np.append(1.0, Y_exp_normalized), self.latent_variables.get_z_values())
else:
new_value = self.predict_new(np.array([1.0]), self.latent_variables.get_z_values())
Y_exp = np.append(Y_exp, [self.link(new_value)])
return Y_exp
|
Creates a h-step ahead mean prediction
Parameters
----------
mu : np.ndarray
The past predicted values
Y : np.ndarray
The past data
h : int
How many steps ahead for the prediction
t_z : np.ndarray
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
|
def get_authn_contexts(self):
"""
Gets the authentication contexts
:returns: The authentication classes for the SAML Response
:rtype: list
"""
authn_context_nodes = self.__query_assertion('/saml:AuthnStatement/saml:AuthnContext/saml:AuthnContextClassRef')
return [OneLogin_Saml2_Utils.element_text(node) for node in authn_context_nodes]
|
Gets the authentication contexts
:returns: The authentication classes for the SAML Response
:rtype: list
|
def _set_bgp_state(self, v, load=False):
"""
Setter method for bgp_state, mapped from YANG variable /bgp_state/neighbor/evpn/bgp_state (bgp-states)
If this variable is read-only (config: false) in the
source YANG file, then _set_bgp_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bgp_state() directly.
YANG Description: BGP state
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'established': {'value': 6}, u'open-sent': {'value': 4}, u'no-state': {'value': 0}, u'idle': {'value': 1}, u'active': {'value': 3}, u'connect-state': {'value': 2}, u'open-confirm': {'value': 5}},), is_leaf=True, yang_name="bgp-state", rest_name="bgp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='bgp-states', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bgp_state must be of a type compatible with bgp-states""",
'defined-type': "brocade-bgp-operational:bgp-states",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'established': {'value': 6}, u'open-sent': {'value': 4}, u'no-state': {'value': 0}, u'idle': {'value': 1}, u'active': {'value': 3}, u'connect-state': {'value': 2}, u'open-confirm': {'value': 5}},), is_leaf=True, yang_name="bgp-state", rest_name="bgp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='bgp-states', is_config=False)""",
})
self.__bgp_state = t
if hasattr(self, '_set'):
self._set()
|
Setter method for bgp_state, mapped from YANG variable /bgp_state/neighbor/evpn/bgp_state (bgp-states)
If this variable is read-only (config: false) in the
source YANG file, then _set_bgp_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bgp_state() directly.
YANG Description: BGP state
|
def validateDocument(self, ctxt):
"""Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocument(ctxt__o, self._o)
return ret
|
Try to validate the document instance basically it does
the all the checks described by the XML Rec i.e. validates
the internal and external subset (if present) and validate
the document tree.
|
def put_file(client, source_file, destination_file):
"""
Copy file to instance using Paramiko client connection.
"""
try:
sftp_client = client.open_sftp()
sftp_client.put(source_file, destination_file)
except Exception as error:
raise IpaUtilsException(
'Error copying file to instance: {0}.'.format(error)
)
finally:
with ignored(Exception):
sftp_client.close()
|
Copy file to instance using Paramiko client connection.
|
def generate_string_to_sign(date, region, canonical_request):
"""
Generate string to sign.
:param date: Date is input from :meth:`datetime.datetime`
:param region: Region should be set to bucket region.
:param canonical_request: Canonical request generated previously.
"""
formatted_date_time = date.strftime("%Y%m%dT%H%M%SZ")
canonical_request_hasher = hashlib.sha256()
canonical_request_hasher.update(canonical_request.encode('utf-8'))
canonical_request_sha256 = canonical_request_hasher.hexdigest()
scope = generate_scope_string(date, region)
return '\n'.join([_SIGN_V4_ALGORITHM,
formatted_date_time,
scope,
canonical_request_sha256])
|
Generate string to sign.
:param date: Date is input from :meth:`datetime.datetime`
:param region: Region should be set to bucket region.
:param canonical_request: Canonical request generated previously.
|
def _get_ids_from_hostname(self, hostname):
"""List VS ids which match the given hostname."""
results = self.list_instances(hostname=hostname, mask="id")
return [result['id'] for result in results]
|
List VS ids which match the given hostname.
|
def _init_map(self):
"""stub"""
QuestionTextFormRecord._init_map(self)
QuestionFilesFormRecord._init_map(self)
super(QuestionTextAndFilesMixin, self)._init_map()
|
stub
|
def _check_import_source():
"""Check if tlgu imported, if not import it."""
path_rel = '~/cltk_data/greek/software/greek_software_tlgu/tlgu.h'
path = os.path.expanduser(path_rel)
if not os.path.isfile(path):
try:
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_software_tlgu')
except Exception as exc:
logger.error('Failed to import TLGU: %s', exc)
raise
|
Check if tlgu imported, if not import it.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.