code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def open(self, options):
"""
Open and import the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, importing ns="%s", location="%s"',
self.id,
self.ns[1],
self.location
)
result = self.locate()
if result is None:
if self.location is None:
log.debug('imported schema (%s) not-found', self.ns[1])
else:
result = self.download(options)
log.debug('imported:\n%s', result)
return result
|
Open and import the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
|
def nodeInLanguageStem(_: Context, n: Node, s: ShExJ.LanguageStem) -> bool:
""" http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
"""
return isinstance(s, ShExJ.Wildcard) or \
(isinstance(n, Literal) and n.language is not None and str(n.language).startswith(str(s)))
|
http://shex.io/shex-semantics/#values
**nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
:py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`.
The expression `nodeInLanguageStem(n, s)` is satisfied iff:
#) `s` is a :py:class:`ShExJ.WildCard` or
#) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
|
def handle_option_error(error):
"""Raises exception if error in option command found.
Purpose: As of tmux 2.4, there are now 3 different types of option errors:
- unknown option
- invalid option
- ambiguous option
Before 2.4, unknown option was the user.
All errors raised will have the base error of :exc:`exc.OptionError`. So to
catch any option error, use ``except exc.OptionError``.
Parameters
----------
error : str
Error response from subprocess call.
Raises
------
:exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`,
:exc:`exc.AmbiguousOption`
"""
if 'unknown option' in error:
raise exc.UnknownOption(error)
elif 'invalid option' in error:
raise exc.InvalidOption(error)
elif 'ambiguous option' in error:
raise exc.AmbiguousOption(error)
else:
raise exc.OptionError(error)
|
Raises exception if error in option command found.
Purpose: As of tmux 2.4, there are now 3 different types of option errors:
- unknown option
- invalid option
- ambiguous option
Before 2.4, unknown option was the user.
All errors raised will have the base error of :exc:`exc.OptionError`. So to
catch any option error, use ``except exc.OptionError``.
Parameters
----------
error : str
Error response from subprocess call.
Raises
------
:exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`,
:exc:`exc.AmbiguousOption`
|
def _factorize_array(values, na_sentinel=-1, size_hint=None,
na_value=None):
"""Factorize an array-like to labels and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passsed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
Returns
-------
labels, uniques : ndarray
"""
(hash_klass, _), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
uniques, labels = table.factorize(values, na_sentinel=na_sentinel,
na_value=na_value)
labels = ensure_platform_int(labels)
return labels, uniques
|
Factorize an array-like to labels and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passsed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
Returns
-------
labels, uniques : ndarray
|
def igetattr(self, name, context=None):
"""Inferred getattr, which returns an iterator of inferred statements."""
try:
return bases._infer_stmts(self.getattr(name, context), context, frame=self)
except exceptions.AttributeInferenceError as error:
raise exceptions.InferenceError(
error.message, target=self, attribute=name, context=context
) from error
|
Inferred getattr, which returns an iterator of inferred statements.
|
def deserialize(data):
"""
Create instance from serial data
"""
# Import module & get class
try:
module = import_module(data.get('class').get('module'))
cls = getattr(module, data.get('class').get('name'))
except ImportError:
raise ImportError("No module named: %r" % data.get('class').get('module'))
except AttributeError:
raise ImportError("module %r does not contain class %r" % (
data.get('class').get('module'),
data.get('class').get('name')
))
# Deserialize parameters
class_params = cls.class_params(hidden=True)
params = dict(
(name, class_params[name].deserialize(value))
for (name, value) in data.get('params').items()
)
# Instantiate new instance
return cls(**params)
|
Create instance from serial data
|
def setEventCallback(self, event, callback):
"""
Set a function to call for a given event.
event must be one of:
TRANSFER_COMPLETED
TRANSFER_ERROR
TRANSFER_TIMED_OUT
TRANSFER_CANCELLED
TRANSFER_STALL
TRANSFER_NO_DEVICE
TRANSFER_OVERFLOW
"""
if event not in EVENT_CALLBACK_SET:
raise ValueError('Unknown event %r.' % (event, ))
self.__event_callback_dict[event] = callback
|
Set a function to call for a given event.
event must be one of:
TRANSFER_COMPLETED
TRANSFER_ERROR
TRANSFER_TIMED_OUT
TRANSFER_CANCELLED
TRANSFER_STALL
TRANSFER_NO_DEVICE
TRANSFER_OVERFLOW
|
def uuid4(self, cast_to=str):
"""
Generates a random UUID4 string.
:param cast_to: Specify what type the UUID should be cast to. Default is `str`
:type cast_to: callable
"""
# Based on http://stackoverflow.com/q/41186818
return cast_to(uuid.UUID(int=self.generator.random.getrandbits(128), version=4))
|
Generates a random UUID4 string.
:param cast_to: Specify what type the UUID should be cast to. Default is `str`
:type cast_to: callable
|
def normalize_linefeeds(self, a_string):
"""Convert `\r\r\n`,`\r\n`, `\n\r` to `\n.`
:param a_string: A string that may have non-normalized line feeds
i.e. output returned from device, or a device prompt
:type a_string: str
"""
newline = re.compile("(\r\r\r\n|\r\r\n|\r\n|\n\r)")
a_string = newline.sub(self.RESPONSE_RETURN, a_string)
if self.RESPONSE_RETURN == "\n":
# Convert any remaining \r to \n
return re.sub("\r", self.RESPONSE_RETURN, a_string)
|
Convert `\r\r\n`,`\r\n`, `\n\r` to `\n.`
:param a_string: A string that may have non-normalized line feeds
i.e. output returned from device, or a device prompt
:type a_string: str
|
def get_data(self, query, fields_convert_map, encoding='utf-8', auto_convert=True,
include_hidden=False, header=None):
"""
If convert=True, will convert field value
"""
fields_convert_map = fields_convert_map or {}
d = self.fields_convert_map.copy()
d.update(fields_convert_map)
if isinstance(query, Select):
query = do_(query)
# def get_value(name, value, record):
# convert = d.get(name)
# if convert:
# value = convert(value, record)
# return safe_unicode(value, encoding)
for record in query:
self._cal_sum(record)
row = []
record = self._get_record(record)
if self.before_record_render:
self.before_record_render(record)
if isinstance(record, orm.Model):
model = record.__class__
else:
model = None
for i, x in enumerate(self.table_info['fields_list']):
field = get_field(x['name'], model)
if not field:
field = {'name':x['name']}
else:
field = {'name':x['name'], 'prop':field}
if not include_hidden and x.get('hidden'):
continue
if isinstance(record, orm.Model):
v = make_view_field(field, record, fields_convert_map=d,
auto_convert=auto_convert)
else:
v = make_view_field(field, record, fields_convert_map=d,
auto_convert=auto_convert, value=record[x['name']])
value = v['display']
#value = safe_unicode(v['display'], encoding)
row.append(value)
if header:
ret = dict(zip(header, row))
else:
ret = row
yield ret
total = self._get_sum()
if total:
row = []
for x in total:
v = x
if isinstance(x, str):
v = safe_unicode(x, encoding)
row.append(v)
if header:
ret = dict(zip(header, row))
else:
ret = row
yield ret
|
If convert=True, will convert field value
|
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start):
"""
returns the mcmc analysis of the parameter space
"""
sampler = emcee.EnsembleSampler(n_walkers, self.cosmoParam.numParam, self.chain.likelihood)
p0 = emcee.utils.sample_ball(mean_start, sigma_start, n_walkers)
new_pos, _, _, _ = sampler.run_mcmc(p0, n_burn)
sampler.reset()
store = InMemoryStorageUtil()
for pos, prob, _, _ in sampler.sample(new_pos, iterations=n_run):
store.persistSamplingValues(pos, prob, None)
return store.samples
|
returns the mcmc analysis of the parameter space
|
def delete_pool(hostname, username, password, name):
'''
Delete an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
return _test_output(ret, 'delete', params={
'hostname': hostname,
'username': username,
'password': password,
'name': name,
}
)
#is this pool currently configured?
existing = __salt__['bigip.list_pool'](hostname, username, password, name)
# if it exists by name
if existing['code'] == 200:
deleted = __salt__['bigip.delete_pool'](hostname, username, password, name)
# did we get rid of it?
if deleted['code'] == 200:
ret['result'] = True
ret['comment'] = 'Pool was successfully deleted.'
ret['changes']['old'] = existing['content']
ret['changes']['new'] = {}
# something bad happened
else:
ret = _load_result(deleted, ret)
# not found
elif existing['code'] == 404:
ret['result'] = True
ret['comment'] = 'This pool already does not exist. No changes made.'
ret['changes']['old'] = {}
ret['changes']['new'] = {}
else:
ret = _load_result(existing, ret)
return ret
|
Delete an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
|
def get_ansible_by_id(self, ansible_id):
"""Return a ansible with that id or None."""
for elem in self.ansible_hosts:
if elem.id == ansible_id:
return elem
return None
|
Return a ansible with that id or None.
|
def rightsibling(node):
"""
Return Right Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> rightsibling(dan)
>>> rightsibling(jet)
Node('/Dan/Jan')
>>> rightsibling(jan)
Node('/Dan/Joe')
>>> rightsibling(joe)
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
try:
return pchildren[idx + 1]
except IndexError:
return None
else:
return None
|
Return Right Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> rightsibling(dan)
>>> rightsibling(jet)
Node('/Dan/Jan')
>>> rightsibling(jan)
Node('/Dan/Joe')
>>> rightsibling(joe)
|
def _process_element(self, pos, e):
"""
Parses an incoming HTML element/node for data.
pos -- the part of the element being parsed
(start/end)
e -- the element being parsed
"""
tag, class_attr = _tag_and_class_attr(e)
start_of_message = tag == 'div' and class_attr == 'message' and pos == 'start'
end_of_thread = tag == 'div' and 'thread' in class_attr and pos == 'end'
if start_of_message and not self.messages_started:
self.messages_started = True
elif tag == "span" and pos == "end":
if "user" in class_attr:
self.current_sender = self.name_resolver.resolve(e.text)
elif "meta" in class_attr:
self.current_timestamp =\
parse_timestamp(e.text, self.use_utc, self.timezone_hints)
elif tag == 'p' and pos == 'end':
# This is only necessary because of accidental double <p> nesting on
# Facebook's end. Clearly, QA and testing is one of Facebook's strengths ;)
if not self.current_text:
self.current_text = e.text.strip() if e.text else ''
elif tag == 'img' and pos == 'start':
self.current_text = '(image reference: {})'.format(e.attrib['src'])
elif (start_of_message or end_of_thread) and self.messages_started:
if not self.current_timestamp:
# This is the typical error when the new Facebook format is
# used with the legacy parser.
raise UnsuitableParserError
if not self.current_sender:
if not self.no_sender_warning_status:
sys.stderr.write(
"\rWARNING: The sender was missing in one or more parsed messages. "
"This is an error on Facebook's end that unfortunately cannot be "
"recovered from. Some or all messages in the output may show the "
"sender as 'Unknown' within each thread.\n")
self.no_sender_warning_status = True
self.current_sender = "Unknown"
cm = ChatMessage(timestamp=self.current_timestamp,
sender=self.current_sender,
content=self.current_text or '',
seq_num=self.seq_num)
self.messages += [cm]
self.seq_num -= 1
self.current_sender, self.current_timestamp, self.current_text = None, None, None
return end_of_thread
|
Parses an incoming HTML element/node for data.
pos -- the part of the element being parsed
(start/end)
e -- the element being parsed
|
def getWithPrompt(self):
"""Interactively prompt for parameter value"""
if self.prompt:
pstring = self.prompt.split("\n")[0].strip()
else:
pstring = self.name
if self.choice:
schoice = list(map(self.toString, self.choice))
pstring = pstring + " (" + "|".join(schoice) + ")"
elif self.min not in [None, INDEF] or \
self.max not in [None, INDEF]:
pstring = pstring + " ("
if self.min not in [None, INDEF]:
pstring = pstring + self.toString(self.min)
pstring = pstring + ":"
if self.max not in [None, INDEF]:
pstring = pstring + self.toString(self.max)
pstring = pstring + ")"
# add current value as default
if self.value is not None:
pstring = pstring + " (" + self.toString(self.value,quoted=1) + ")"
pstring = pstring + ": "
# don't redirect stdin/out unless redirected filehandles are also ttys
# or unless originals are NOT ttys
stdout = sys.__stdout__
try:
if sys.stdout.isatty() or not stdout.isatty():
stdout = sys.stdout
except AttributeError:
pass
stdin = sys.__stdin__
try:
if sys.stdin.isatty() or not stdin.isatty():
stdin = sys.stdin
except AttributeError:
pass
# print prompt, suppressing both newline and following space
stdout.write(pstring)
stdout.flush()
ovalue = irafutils.tkreadline(stdin)
value = ovalue.strip()
# loop until we get an acceptable value
while (1):
try:
# null input usually means use current value as default
# check it anyway since it might not be acceptable
if value == "": value = self._nullPrompt()
self.set(value)
# None (no value) is not acceptable value after prompt
if self.value is not None: return
# if not EOF, keep looping
if ovalue == "":
stdout.flush()
raise EOFError("EOF on parameter prompt")
print("Error: specify a value for the parameter")
except ValueError as e:
print(str(e))
stdout.write(pstring)
stdout.flush()
ovalue = irafutils.tkreadline(stdin)
value = ovalue.strip()
|
Interactively prompt for parameter value
|
def set_rendering_intent(self, rendering_intent):
"""Set rendering intent variant for sRGB chunk"""
if rendering_intent not in (None,
PERCEPTUAL,
RELATIVE_COLORIMETRIC,
SATURATION,
ABSOLUTE_COLORIMETRIC):
raise FormatError('Unknown redering intent')
self.rendering_intent = rendering_intent
|
Set rendering intent variant for sRGB chunk
|
def get_position(self, dt):
"""Given dt in [0, 1], return the current position of the tile."""
return self.sx + self.dx * dt, self.sy + self.dy * dt
|
Given dt in [0, 1], return the current position of the tile.
|
def porttree_matches(name):
'''
Returns a list containing the matches for a given package name from the
portage tree. Note that the specific version of the package will not be
provided for packages that have several versions in the portage tree, but
rather the name of the package (i.e. "dev-python/paramiko").
'''
matches = []
for category in _porttree().dbapi.categories:
if _porttree().dbapi.cp_list(category + "/" + name):
matches.append(category + "/" + name)
return matches
|
Returns a list containing the matches for a given package name from the
portage tree. Note that the specific version of the package will not be
provided for packages that have several versions in the portage tree, but
rather the name of the package (i.e. "dev-python/paramiko").
|
def render(value):
"""
This function finishes the url pattern creation by adding starting
character ^ end possibly by adding end character at the end
:param value: naive URL value
:return: raw string
"""
# Empty urls
if not value: # use case: wild card imports
return r'^$'
if value[0] != beginning:
value = beginning + value
if value[-1] != end:
value += end
return value
|
This function finishes the url pattern creation by adding starting
character ^ end possibly by adding end character at the end
:param value: naive URL value
:return: raw string
|
def selected(self, request, tag):
"""
Render a selected attribute on the given tag if the wrapped L{Option}
instance is selected.
"""
if self.option.selected:
tag(selected='selected')
return tag
|
Render a selected attribute on the given tag if the wrapped L{Option}
instance is selected.
|
def create_groups(iam_client, groups):
"""
Create a number of IAM group, silently handling exceptions when entity already exists
.
:param iam_client: AWS API client for IAM
:param groups: Name of IAM groups to be created.
:return: None
"""
groups_data = []
if type(groups) != list:
groups = [ groups ]
for group in groups:
errors = []
try:
printInfo('Creating group %s...' % group)
iam_client.create_group(GroupName = group)
except Exception as e:
if e.response['Error']['Code'] != 'EntityAlreadyExists':
printException(e)
errors.append('iam:creategroup')
groups_data.append({'groupname': group, 'errors': errors})
return groups_data
|
Create a number of IAM group, silently handling exceptions when entity already exists
.
:param iam_client: AWS API client for IAM
:param groups: Name of IAM groups to be created.
:return: None
|
def _adjusted_script_code(self, script):
'''
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
'''
script_code = ByteData()
if script[0] == len(script) - 1:
return script
script_code += VarInt(len(script))
script_code += script
return script_code
|
Checks if the script code pased in to the sighash function is already
length-prepended
This will break if there's a redeem script that's just a pushdata
That won't happen in practice
Args:
script (bytes): the spend script
Returns:
(bytes): the length-prepended script (if necessary)
|
def parse_model_group(path, group):
"""Parse a structured model group as obtained from a YAML file
Path can be given as a string or a context.
"""
context = FilePathContext(path)
for reaction_id in group.get('reactions', []):
yield reaction_id
# Parse subgroups
for reaction_id in parse_model_group_list(
context, group.get('groups', [])):
yield reaction_id
|
Parse a structured model group as obtained from a YAML file
Path can be given as a string or a context.
|
def _Decode(self, codec_name, data):
"""Decode data with the given codec name."""
try:
return data.decode(codec_name, "replace")
except LookupError:
raise RuntimeError("Codec could not be found.")
except AssertionError:
raise RuntimeError("Codec failed to decode")
|
Decode data with the given codec name.
|
def writeList(self, register, data):
"""Write bytes to the specified register."""
self._idle()
self._transaction_start()
self._i2c_start()
self._i2c_write_bytes([self._address_byte(False), register] + data)
self._i2c_stop()
response = self._transaction_end()
self._verify_acks(response)
|
Write bytes to the specified register.
|
def dev():
"""Define dev stage"""
env.roledefs = {
'web': ['192.168.1.2'],
'lb': ['192.168.1.2'],
}
env.user = 'vagrant'
env.backends = env.roledefs['web']
env.server_name = 'django_search_model-dev.net'
env.short_server_name = 'django_search_model-dev'
env.static_folder = '/site_media/'
env.server_ip = '192.168.1.2'
env.no_shared_sessions = False
env.server_ssl_on = False
env.goal = 'dev'
env.socket_port = '8001'
env.map_settings = {}
execute(build_env)
|
Define dev stage
|
def backup_db(
aws_access_key_id,
aws_secret_access_key,
bucket_name,
s3_folder,
database,
mysql_host,
mysql_port,
db_user,
db_pass,
db_backups_dir,
backup_aging_time):
"""
dumps databases into /backups, uploads to s3, deletes backups older than a month
fab -f ./fabfile.py backup_dbs
:param aws_access_key_id:
:param aws_secret_access_key:
:param bucket_name:
:param database:
:param mysql_host:
:param mysql_port:
:param db_pass:
:param db_backups_dir:
:param backup_aging_time:
:return:
"""
# Connect to the bucket
bucket = s3_bucket(aws_access_key_id, aws_secret_access_key, bucket_name)
key = boto.s3.key.Key(bucket)
bucketlist = bucket.list()
pat = "[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9]-%s.sql.bz2" % database
sql_file = '%s-%s.sql' % (dt.now().strftime(TIMESTAMP_FORMAT), database)
print('Dumping database %s to %s.bz2' % (database, sql_file))
sql_full_target = os.path.join(db_backups_dir, sql_file)
f = open(sql_full_target, "wb")
cmd = '/usr/bin/mysqldump -h%s -P%s -u%s -p%s %s ' % (mysql_host, mysql_port, db_user, db_pass, database)
print(cmd)
subprocess.call(cmd.split(), stdout=f)
cmd = 'bzip2 %s' % sql_full_target
print(cmd)
subprocess.call(cmd.split())
sql_local_full_target = sql_full_target
# append '.bz2'
key.key = os.path.join(s3_folder, '%s.bz2' % sql_file)
print('STARTING upload of %s to %s: %s' % (sql_file, key.key, dt.now()))
try:
key.set_contents_from_filename('%s.bz2' % os.path.join(db_backups_dir, sql_full_target))
print('Upload of %s FINISHED: %s' % (sql_local_full_target, dt.now()))
finally:
delete_expired_backups_in_bucket(bucket, bucketlist, pat, backup_aging_time=backup_aging_time)
delete_local_db_backups(pat, db_backups_dir, backup_aging_time)
|
dumps databases into /backups, uploads to s3, deletes backups older than a month
fab -f ./fabfile.py backup_dbs
:param aws_access_key_id:
:param aws_secret_access_key:
:param bucket_name:
:param database:
:param mysql_host:
:param mysql_port:
:param db_pass:
:param db_backups_dir:
:param backup_aging_time:
:return:
|
def breakRankTies(self, oldsym, newsym):
"""break Ties to form a new list with the same integer ordering
from high to low
Example
old = [ 4, 2, 4, 7, 8] (Two ties, 4 and 4)
new = [60, 2 61,90,99]
res = [ 4, 0, 3, 1, 2]
* * This tie is broken in this case
"""
stableSort = map(None, oldsym, newsym, range(len(oldsym)))
stableSort.sort()
lastOld, lastNew = None, None
x = -1
for old, new, index in stableSort:
if old != lastOld:
x += 1
# the last old value was changed, so update both
lastOld = old
lastNew = new
elif new != lastNew:
# break the tie based on the new info (update lastNew)
x += 1
lastNew = new
newsym[index] = x
|
break Ties to form a new list with the same integer ordering
from high to low
Example
old = [ 4, 2, 4, 7, 8] (Two ties, 4 and 4)
new = [60, 2 61,90,99]
res = [ 4, 0, 3, 1, 2]
* * This tie is broken in this case
|
def getAngle(self, mode='deg'):
""" return bend angle
:param mode: 'deg' or 'rad'
:return: deflecting angle in RAD
"""
if self.refresh is True:
self.getMatrix()
try:
if self.mflag:
if mode == 'deg':
return self.bangle / np.pi * 180
else: # rad
return self.bangle
else:
return 0
except AttributeError:
print("Please execute getMatrix() first.")
|
return bend angle
:param mode: 'deg' or 'rad'
:return: deflecting angle in RAD
|
def _ConvertDictToObject(self, json_dict):
"""Converts a JSON dict into a path specification object.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'PathSpec'
'type_indicator': 'OS'
'parent': { ... }
...
}
Here '__type__' indicates the object base type in this case this should
be 'PathSpec'. The rest of the elements of the dictionary make up the
path specification object properties. Note that json_dict is a dict of
dicts and the _ConvertDictToObject method will be called for every dict.
That is how the path specification parent objects are created.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
PathSpec: a path specification.
Raises:
TypeError: if the JSON serialized object does not contain a '__type__'
attribute that contains 'PathSpec'.
"""
# Use __type__ to indicate the object class type.
class_type = json_dict.get('__type__', None)
if class_type not in self._CLASS_TYPES:
raise TypeError('Missing path specification object type.')
# Remove the class type from the JSON dict since we cannot pass it.
del json_dict['__type__']
type_indicator = json_dict.get('type_indicator', None)
if type_indicator:
del json_dict['type_indicator']
# Convert row_condition back to a tuple.
if 'row_condition' in json_dict:
json_dict['row_condition'] = tuple(json_dict['row_condition'])
return path_spec_factory.Factory.NewPathSpec(type_indicator, **json_dict)
|
Converts a JSON dict into a path specification object.
The dictionary of the JSON serialized objects consists of:
{
'__type__': 'PathSpec'
'type_indicator': 'OS'
'parent': { ... }
...
}
Here '__type__' indicates the object base type in this case this should
be 'PathSpec'. The rest of the elements of the dictionary make up the
path specification object properties. Note that json_dict is a dict of
dicts and the _ConvertDictToObject method will be called for every dict.
That is how the path specification parent objects are created.
Args:
json_dict (dict[str, object]): JSON serialized objects.
Returns:
PathSpec: a path specification.
Raises:
TypeError: if the JSON serialized object does not contain a '__type__'
attribute that contains 'PathSpec'.
|
def most_similar(self, keyword, num):
"""
input: keyword term of top n
output: keyword result in json formmat
"""
try:
result = self.model.most_similar(keyword, topn = num) # most_similar return a list
return {'key':keyword, 'value':result, 'similarity':1}
except KeyError as e:
kemKeyword = self.kemNgram.find(keyword)
if kemKeyword:
result = self.model.most_similar(kemKeyword, topn = num)
return {'key':kemKeyword, 'value':result, 'similarity':self.kemNgram.compare(kemKeyword, keyword)}
return {'key':keyword, 'value':[], 'similarity':0}
|
input: keyword term of top n
output: keyword result in json formmat
|
def get_asset_notification_session(self, asset_receiver, proxy):
"""Gets the notification session for notifications pertaining to
asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NullArgument - asset_receiver is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() is false
compliance: optional - This method must be implemented if
supports_asset_notification() is true.
"""
if asset_receiver is None:
raise NullArgument()
if not self.supports_asset_notification():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetNotificationSession(asset_receiver, proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session
|
Gets the notification session for notifications pertaining to
asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NullArgument - asset_receiver is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() is false
compliance: optional - This method must be implemented if
supports_asset_notification() is true.
|
def get_set(self, flag, new):
"""
Return the boolean value of 'flag'. If 'new' is set,
the flag is updated, and the value before update is
returned.
"""
old = self._is_set(flag)
if new is True:
self._set(flag)
elif new is False:
self._clear(flag)
return old
|
Return the boolean value of 'flag'. If 'new' is set,
the flag is updated, and the value before update is
returned.
|
def remove_trailing(needle, haystack):
"""Remove trailing needle string (if exists).
>>> remove_trailing('Test', 'ThisAndThatTest')
'ThisAndThat'
>>> remove_trailing('Test', 'ArbitraryName')
'ArbitraryName'
"""
if haystack[-len(needle):] == needle:
return haystack[:-len(needle)]
return haystack
|
Remove trailing needle string (if exists).
>>> remove_trailing('Test', 'ThisAndThatTest')
'ThisAndThat'
>>> remove_trailing('Test', 'ArbitraryName')
'ArbitraryName'
|
def bibitems(self):
"""List of bibitem strings appearing in the document."""
bibitems = []
lines = self.text.split('\n')
for i, line in enumerate(lines):
if line.lstrip().startswith(u'\\bibitem'):
# accept this line
# check if next line is also part of bibitem
# FIXME ugh, re-write
j = 1
while True:
try:
if (lines[i + j].startswith(u'\\bibitem') is False) \
and (lines[i + j] != '\n'):
line += lines[i + j]
elif "\end{document}" in lines[i + j]:
break
else:
break
except IndexError:
break
else:
print line
j += 1
print "finished", line
bibitems.append(line)
return bibitems
|
List of bibitem strings appearing in the document.
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
values_dict = {}
for registry_value in registry_key.GetValues():
if not registry_value.name or not registry_value.data:
continue
if registry_value.name == 'UpdateKey':
self._ParseUpdateKeyValue(
parser_mediator, registry_value, registry_key.path)
else:
values_dict[registry_value.name] = registry_value.GetDataAsObject()
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
def _set_access_log(self, config, level):
""" Configure access logs
"""
access_handler = self._get_param(
'global',
'log.access_handler',
config,
'syslog',
)
# log format for syslog
syslog_formatter = logging.Formatter(
"ldapcherry[%(process)d]: %(message)s"
)
# replace access log handler by a syslog handler
if access_handler == 'syslog':
cherrypy.log.access_log.handlers = []
handler = logging.handlers.SysLogHandler(
address='/dev/log',
facility='user',
)
handler.setFormatter(syslog_formatter)
cherrypy.log.access_log.addHandler(handler)
# if stdout, open a logger on stdout
elif access_handler == 'stdout':
cherrypy.log.access_log.handlers = []
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
'ldapcherry.access - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
cherrypy.log.access_log.addHandler(handler)
# if file, we keep the default
elif access_handler == 'file':
pass
# replace access log handler by a null handler
elif access_handler == 'none':
cherrypy.log.access_log.handlers = []
handler = logging.NullHandler()
cherrypy.log.access_log.addHandler(handler)
# set log level
cherrypy.log.access_log.setLevel(level)
|
Configure access logs
|
def classify(self, dataset, missing_value_action='auto'):
"""
Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset: SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose model dependent missing value action
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
"""
if (missing_value_action == 'auto'):
missing_value_action = select_default_missing_value_policy(self, 'classify')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_classify(dataset, missing_value_action)
if isinstance(dataset, dict):
return self.__proxy__.fast_classify([dataset], missing_value_action)
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.classify(dataset, missing_value_action)
|
Return predictions for ``dataset``, using the trained supervised_learning
model. Predictions are generated as class labels (0 or
1).
Parameters
----------
dataset: SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action: str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Choose model dependent missing value action
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with prediction and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
|
def integrate(self, rate, timestep):
"""Advance a time varying quaternion to its value at a time `timestep` in the future.
The Quaternion object will be modified to its future value.
It is guaranteed to remain a unit quaternion.
Params:
rate: numpy 3-array (or array-like) describing rotation rates about the
global x, y and z axes respectively.
timestep: interval over which to integrate into the future.
Assuming *now* is `T=0`, the integration occurs over the interval
`T=0` to `T=timestep`. Smaller intervals are more accurate when
`rate` changes over time.
Note:
The solution is closed form given the assumption that `rate` is constant
over the interval of length `timestep`.
"""
self._fast_normalise()
rate = self._validate_number_sequence(rate, 3)
rotation_vector = rate * timestep
rotation_norm = np.linalg.norm(rotation_vector)
if rotation_norm > 0:
axis = rotation_vector / rotation_norm
angle = rotation_norm
q2 = Quaternion(axis=axis, angle=angle)
self.q = (self * q2).q
self._fast_normalise()
|
Advance a time varying quaternion to its value at a time `timestep` in the future.
The Quaternion object will be modified to its future value.
It is guaranteed to remain a unit quaternion.
Params:
rate: numpy 3-array (or array-like) describing rotation rates about the
global x, y and z axes respectively.
timestep: interval over which to integrate into the future.
Assuming *now* is `T=0`, the integration occurs over the interval
`T=0` to `T=timestep`. Smaller intervals are more accurate when
`rate` changes over time.
Note:
The solution is closed form given the assumption that `rate` is constant
over the interval of length `timestep`.
|
def tree_to_nodes(tree, context=None, metadata=None):
"""Assembles ``tree`` nodes into object models.
If ``context`` is supplied, it will be used to contextualize
the contents of the nodes. Metadata will pass non-node identifying
values down to child nodes, if not overridden (license, timestamps, etc)
"""
nodes = []
for item in tree['contents']:
if 'contents' in item:
sub_nodes = tree_to_nodes(item, context=context, metadata=metadata)
if metadata is None:
metadata = {}
else:
metadata = metadata.copy()
for key in ('title', 'id', 'shortid',
'cnx-archive-uri', 'cnx-archive-shortid'):
if key in metadata:
metadata.pop(key)
for key in ('title', 'id', 'shortId'):
if item.get(key):
metadata[key] = item[key]
if item[key] != 'subcol':
if key == 'id':
metadata['cnx-archive-uri'] = item[key]
elif key == 'shortId':
metadata['cnx-archive-shortid'] = item[key]
titles = _title_overrides_from_tree(item)
if item.get('id') is not None:
tbinder = cnxepub.Binder(item.get('id'),
sub_nodes,
metadata=metadata,
title_overrides=titles)
else:
tbinder = cnxepub.TranslucentBinder(sub_nodes,
metadata=metadata,
title_overrides=titles)
nodes.append(tbinder)
else:
doc = document_factory(item['id'], context=context)
for key in ('title', 'id', 'shortId'):
if item.get(key):
doc.metadata[key] = item[key]
if key == 'id':
doc.metadata['cnx-archive-uri'] = item[key]
elif key == 'shortId':
doc.metadata['cnx-archive-shortid'] = item[key]
nodes.append(doc)
return nodes
|
Assembles ``tree`` nodes into object models.
If ``context`` is supplied, it will be used to contextualize
the contents of the nodes. Metadata will pass non-node identifying
values down to child nodes, if not overridden (license, timestamps, etc)
|
def _grid_widgets(self):
"""
Place the widgets in the correct positions
:return: None
"""
if self.__label:
self._header_label.grid(row=0, column=1, columnspan=3, sticky="nw", padx=5, pady=(5, 0))
self._bold_button.grid(row=1, column=1, sticky="nswe", padx=5, pady=2)
self._italic_button.grid(row=1, column=2, sticky="nswe", padx=(0, 5), pady=2)
self._underline_button.grid(row=1, column=3, sticky="nswe", padx=(0, 5), pady=2)
self._overstrike_button.grid(row=1, column=4, sticky="nswe", padx=(0, 5), pady=2)
|
Place the widgets in the correct positions
:return: None
|
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super().set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = self.mem_per_proc
|
Set the memory per process in megabytes
|
def setup_seq_signals(self, ):
"""Setup the signals for the sequence page
:returns: None
:rtype: None
:raises: None
"""
log.debug("Setting up sequence page signals.")
self.seq_prj_view_pb.clicked.connect(self.seq_view_prj)
self.seq_shot_view_pb.clicked.connect(self.seq_view_shot)
self.seq_shot_create_pb.clicked.connect(self.seq_create_shot)
self.seq_desc_pte.textChanged.connect(self.seq_save)
|
Setup the signals for the sequence page
:returns: None
:rtype: None
:raises: None
|
def get_file_from_s3(job, s3_url, encryption_key=None, per_file_encryption=True,
write_to_jobstore=True):
"""
Download a supplied URL that points to a file on Amazon S3. If the file is encrypted using
sse-c (with the user-provided key or with a hash of the usesr provided master key) then the
encryption keys will be used when downloading. The file is downloaded and written to the
jobstore if requested.
:param str s3_url: URL for the file (can be s3, S3 or https)
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
"""
work_dir = job.fileStore.getLocalTempDir()
parsed_url = urlparse(s3_url)
if parsed_url.scheme == 'https':
download_url = 'S3:/' + parsed_url.path # path contains the second /
elif parsed_url.scheme in ('s3', 'S3'):
download_url = s3_url
else:
raise RuntimeError('Unexpected url scheme: %s' % s3_url)
filename = '/'.join([work_dir, os.path.basename(s3_url)])
# This is common to encrypted and unencrypted downloads
download_call = ['s3am', 'download', '--download-exists', 'resume']
# If an encryption key was provided, use it.
if encryption_key:
download_call.extend(['--sse-key-file', encryption_key])
if per_file_encryption:
download_call.append('--sse-key-is-master')
# This is also common to both types of downloads
download_call.extend([download_url, filename])
attempt = 0
exception = ''
while True:
try:
with open(work_dir + '/stderr', 'w') as stderr_file:
subprocess.check_call(download_call, stderr=stderr_file)
except subprocess.CalledProcessError:
# The last line of the stderr will have the error
with open(stderr_file.name) as stderr_file:
for line in stderr_file:
line = line.strip()
if line:
exception = line
if exception.startswith('boto'):
exception = exception.split(': ')
if exception[-1].startswith('403'):
raise RuntimeError('s3am failed with a "403 Forbidden" error while obtaining '
'(%s). Did you use the correct credentials?' % s3_url)
elif exception[-1].startswith('400'):
raise RuntimeError('s3am failed with a "400 Bad Request" error while obtaining '
'(%s). Are you trying to download an encrypted file without '
'a key, or an unencrypted file with one?' % s3_url)
else:
raise RuntimeError('s3am failed with (%s) while downloading (%s)' %
(': '.join(exception), s3_url))
elif exception.startswith('AttributeError'):
exception = exception.split(': ')
if exception[-1].startswith("'NoneType'"):
raise RuntimeError('Does (%s) exist on s3?' % s3_url)
else:
raise RuntimeError('s3am failed with (%s) while downloading (%s)' %
(': '.join(exception), s3_url))
else:
if attempt < 3:
attempt += 1
continue
else:
raise RuntimeError('Could not diagnose the error while downloading (%s)' %
s3_url)
except OSError:
raise RuntimeError('Failed to find "s3am". Install via "apt-get install --pre s3am"')
else:
break
finally:
os.remove(stderr_file.name)
assert os.path.exists(filename)
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename
|
Download a supplied URL that points to a file on Amazon S3. If the file is encrypted using
sse-c (with the user-provided key or with a hash of the usesr provided master key) then the
encryption keys will be used when downloading. The file is downloaded and written to the
jobstore if requested.
:param str s3_url: URL for the file (can be s3, S3 or https)
:param str encryption_key: Path to the master key
:param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method?
:param bool write_to_jobstore: Should the file be written to the job store?
:return: Path to the downloaded file or fsID (if write_to_jobstore was True)
:rtype: str|toil.fileStore.FileID
|
def gdal2np_dtype(b):
"""
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
"""
dt_dict = gdal_array.codes
if isinstance(b, str):
b = gdal.Open(b)
if isinstance(b, gdal.Dataset):
b = b.GetRasterBand(1)
if isinstance(b, gdal.Band):
b = b.DataType
if isinstance(b, int):
np_dtype = dt_dict[b]
else:
np_dtype = None
print("Input must be GDAL Dataset or RasterBand object")
return np_dtype
|
Get NumPy datatype that corresponds with GDAL RasterBand datatype
Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
|
def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, argmax=numpy.argmax):
"""Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time1: numpy.ndarray
first time vector
time2: numpy.ndarray
second time vector
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
length to cluster over
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences.
"""
logging.info('clustering coinc triggers over %ss window' % window)
if len(time1) == 0 or len(time2) == 0:
logging.info('No coinc triggers in one, or both, ifos.')
return numpy.array([])
if numpy.isfinite(slide):
# for a time shifted coinc, time1 is greater than time2 by approximately timeslide_id*slide
# adding this quantity gives a mean coinc time located around time1
time = (time1 + time2 + timeslide_id * slide) / 2
else:
time = 0.5 * (time2 + time1)
tslide = timeslide_id.astype(numpy.float128)
time = time.astype(numpy.float128)
span = (time.max() - time.min()) + window * 10
time = time + span * tslide
cidx = cluster_over_time(stat, time, window, argmax)
return cidx
|
Cluster coincident events for each timeslide separately, across
templates, based on the ranking statistic
Parameters
----------
stat: numpy.ndarray
vector of ranking values to maximize
time1: numpy.ndarray
first time vector
time2: numpy.ndarray
second time vector
timeslide_id: numpy.ndarray
vector that determines the timeslide offset
slide: float
length of the timeslides offset interval
window: float
length to cluster over
Returns
-------
cindex: numpy.ndarray
The set of indices corresponding to the surviving coincidences.
|
def compat_string(value):
"""
Provide a python2/3 compatible string representation of the value
:type value:
:rtype :
"""
if isinstance(value, bytes):
return value.decode(encoding='utf-8')
return str(value)
|
Provide a python2/3 compatible string representation of the value
:type value:
:rtype :
|
def chat_post_message(self, channel, text, **params):
"""chat.postMessage
This method posts a message to a channel.
https://api.slack.com/methods/chat.postMessage
"""
method = 'chat.postMessage'
params.update({
'channel': channel,
'text': text,
})
return self._make_request(method, params)
|
chat.postMessage
This method posts a message to a channel.
https://api.slack.com/methods/chat.postMessage
|
def redirect_legacy_content(request):
"""Redirect from legacy /content/id/version to new /contents/uuid@version.
Handles collection context (book) as well.
"""
routing_args = request.matchdict
objid = routing_args['objid']
objver = routing_args.get('objver')
filename = routing_args.get('filename')
id, version = _convert_legacy_id(objid, objver)
if not id:
raise httpexceptions.HTTPNotFound()
# We always use 301 redirects (HTTPMovedPermanently) here
# because we want search engines to move to the newer links
# We cache these redirects only briefly because, even when versioned,
# legacy collection versions don't include the minor version,
# so the latest archive url could change
if filename:
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
args = dict(id=id, version=version, filename=filename)
cursor.execute(SQL['get-resourceid-by-filename'], args)
try:
res = cursor.fetchone()
resourceid = res[0]
raise httpexceptions.HTTPMovedPermanently(
request.route_path('resource', hash=resourceid,
ignore=u'/{}'.format(filename)),
headers=[("Cache-Control", "max-age=60, public")])
except TypeError: # None returned
raise httpexceptions.HTTPNotFound()
ident_hash = join_ident_hash(id, version)
params = request.params
if params.get('collection'): # page in book
objid, objver = split_legacy_hash(params['collection'])
book_uuid, book_version = _convert_legacy_id(objid, objver)
if book_uuid:
id, ident_hash = \
_get_page_in_book(id, version, book_uuid, book_version)
raise httpexceptions.HTTPMovedPermanently(
request.route_path('content', ident_hash=ident_hash),
headers=[("Cache-Control", "max-age=60, public")])
|
Redirect from legacy /content/id/version to new /contents/uuid@version.
Handles collection context (book) as well.
|
def _transschema(x):
"""
Transform a schema, once loaded from its YAML representation, to its
final internal representation
"""
if isinstance(x, tuple):
return x.__class__(_transschema(x[0]), *x[1:])
elif isinstance(x, dict):
return dict((_qualify_map(key, _transschema(val)) for key, val in x.iteritems()))
elif isinstance(x, list):
return map(_transschema, x)
else:
return x
|
Transform a schema, once loaded from its YAML representation, to its
final internal representation
|
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
self.parse_args(ctx, args)
return ctx
|
This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
|
def H3(self):
"Correlation."
multiplied = np.dot(self.levels[:, np.newaxis] + 1,
self.levels[np.newaxis] + 1)
repeated = np.tile(multiplied[np.newaxis], (self.nobjects, 1, 1))
summed = (repeated * self.P).sum(2).sum(1)
h3 = (summed - self.mux * self.muy) / (self.sigmax * self.sigmay)
h3[np.isinf(h3)] = 0
return h3
|
Correlation.
|
def errors_to_json(errors):
"""Convert the errors to JSON."""
out = []
for e in errors:
out.append({
"check": e[0],
"message": e[1],
"line": 1 + e[2],
"column": 1 + e[3],
"start": 1 + e[4],
"end": 1 + e[5],
"extent": e[6],
"severity": e[7],
"replacements": e[8],
})
return json.dumps(
dict(status="success", data={"errors": out}), sort_keys=True)
|
Convert the errors to JSON.
|
def normalize(symbol_string, strict=False):
"""Normalize an encoded symbol string.
Normalization provides error correction and prepares the
string for decoding. These transformations are applied:
1. Hyphens are removed
2. 'I', 'i', 'L' or 'l' are converted to '1'
3. 'O' or 'o' are converted to '0'
4. All characters are converted to uppercase
A TypeError is raised if an invalid string type is provided.
A ValueError is raised if the normalized string contains
invalid characters.
If the strict parameter is set to True, a ValueError is raised
if any of the above transformations are applied.
The normalized string is returned.
"""
if isinstance(symbol_string, string_types):
if not PY3:
try:
symbol_string = symbol_string.encode('ascii')
except UnicodeEncodeError:
raise ValueError("string should only contain ASCII characters")
else:
raise TypeError("string is of invalid type %s" %
symbol_string.__class__.__name__)
norm_string = symbol_string.replace('-', '').translate(normalize_symbols).upper()
if not valid_symbols.match(norm_string):
raise ValueError("string '%s' contains invalid characters" % norm_string)
if strict and norm_string != symbol_string:
raise ValueError("string '%s' requires normalization" % symbol_string)
return norm_string
|
Normalize an encoded symbol string.
Normalization provides error correction and prepares the
string for decoding. These transformations are applied:
1. Hyphens are removed
2. 'I', 'i', 'L' or 'l' are converted to '1'
3. 'O' or 'o' are converted to '0'
4. All characters are converted to uppercase
A TypeError is raised if an invalid string type is provided.
A ValueError is raised if the normalized string contains
invalid characters.
If the strict parameter is set to True, a ValueError is raised
if any of the above transformations are applied.
The normalized string is returned.
|
def decompress(obj, return_type="bytes"):
"""
De-compress it to it's original.
:param obj: Compressed object, could be bytes or str.
:param return_type: if bytes, then return bytes; if str, then use
base64.b64decode; if obj, then use pickle.loads return an object.
"""
if isinstance(obj, binary_type):
b = zlib.decompress(obj)
elif isinstance(obj, string_types):
b = zlib.decompress(base64.b64decode(obj.encode("utf-8")))
else:
raise TypeError("input cannot be anything other than str and bytes!")
if return_type == "bytes":
return b
elif return_type == "str":
return b.decode("utf-8")
elif return_type == "obj":
return pickle.loads(b)
else:
raise ValueError(
"'return_type' has to be one of 'bytes', 'str' or 'obj'!")
|
De-compress it to it's original.
:param obj: Compressed object, could be bytes or str.
:param return_type: if bytes, then return bytes; if str, then use
base64.b64decode; if obj, then use pickle.loads return an object.
|
def status(self,verbose=0):
"""Print a status of all jobs currently being managed."""
self._update_status()
self._group_report(self.running,'Running')
self._group_report(self.completed,'Completed')
self._group_report(self.dead,'Dead')
# Also flush the report queues
self._comp_report[:] = []
self._dead_report[:] = []
|
Print a status of all jobs currently being managed.
|
def created_by_column(self, obj):
""" Return user who first created an item in Django admin """
try:
first_addition_logentry = admin.models.LogEntry.objects.filter(
object_id=obj.pk,
content_type_id=self._get_obj_ct(obj).pk,
action_flag=admin.models.ADDITION,
).get()
return first_addition_logentry.user
except admin.models.LogEntry.DoesNotExist:
return None
|
Return user who first created an item in Django admin
|
def delete_zone(server, token, domain):
"""Delete specific zone.
Argument:
server: TonicDNS API server
token: TonicDNS API authentication token
domain: Specify domain name
x-authentication-token: token
"""
method = 'DELETE'
uri = 'https://' + server + '/zone/' + domain
connect.tonicdns_client(uri, method, token, data=False)
|
Delete specific zone.
Argument:
server: TonicDNS API server
token: TonicDNS API authentication token
domain: Specify domain name
x-authentication-token: token
|
def how_many(self):
"""
Ascertain where to start downloading, and how many entries.
"""
if self.linkdates != []:
# What follows is a quick sanity check: if the entry date is in the
# future, this is probably a mistake, and we just count the entry
# date as right now.
if max(self.linkdates) <= list(time.localtime()):
currentdate = max(self.linkdates)
else:
currentdate = list(time.localtime())
print(("This entry has its date set in the future. "
"I will use your current local time as its date "
"instead."),
file=sys.stderr, flush=True)
stop = sys.maxsize
else:
currentdate = [1, 1, 1, 0, 0]
firstsync = self.retrieve_config('firstsync', '1')
if firstsync == 'all':
stop = sys.maxsize
else:
stop = int(firstsync)
return currentdate, stop
|
Ascertain where to start downloading, and how many entries.
|
def common_vector_root(vec1, vec2):
"""
Return common root of the two vectors.
Args:
vec1 (list/tuple): First vector.
vec2 (list/tuple): Second vector.
Usage example::
>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])
[1, 2]
Returns:
list: Common part of two vectors or blank list.
"""
root = []
for v1, v2 in zip(vec1, vec2):
if v1 == v2:
root.append(v1)
else:
return root
return root
|
Return common root of the two vectors.
Args:
vec1 (list/tuple): First vector.
vec2 (list/tuple): Second vector.
Usage example::
>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])
[1, 2]
Returns:
list: Common part of two vectors or blank list.
|
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
"""
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
# %% input trapping
lat = np.atleast_1d(lat_deg)
lon = np.atleast_1d(lon_deg)
ra = np.atleast_1d(ra_deg)
dec = np.atleast_1d(dec_deg)
obs = EarthLocation(lat=lat * u.deg,
lon=lon * u.deg)
points = SkyCoord(Angle(ra, unit=u.deg),
Angle(dec, unit=u.deg),
equinox='J2000.0')
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
|
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
|
def _func_addrs_from_prologues(self):
"""
Scan the entire program image for function prologues, and start code scanning at those positions
:return: A list of possible function addresses
"""
# Pre-compile all regexes
regexes = list()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.append(r)
# EDG says: I challenge anyone bothering to read this to come up with a better
# way to handle CPU modes that affect instruction decoding.
# Since the only one we care about is ARM/Thumb right now
# we have this gross hack. Sorry about that.
thumb_regexes = list()
if hasattr(self.project.arch, 'thumb_prologs'):
for ins_regex in self.project.arch.thumb_prologs:
# Thumb prologues are found at even addrs, but their actual addr is odd!
# Isn't that great?
r = re.compile(ins_regex)
thumb_regexes.append(r)
# Construct the binary blob first
unassured_functions = [ ]
for start_, bytes_ in self._binary.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position)
# HACK part 2: Yes, i really have to do this
for regex in thumb_regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position+1)
l.info("Found %d functions with prologue scanning.", len(unassured_functions))
return unassured_functions
|
Scan the entire program image for function prologues, and start code scanning at those positions
:return: A list of possible function addresses
|
def group_singles2array(input, **params):
"""
Creates array of strings or ints from objects' fields
:param input: list of objects
:param params:
:return: list
"""
PARAM_FIELD_KEY = 'field.key'
PARAM_FIELD_ARRAY = 'field.array'
PARAM_FIELD_SINGLE = 'field.single'
field_key = params.get(PARAM_FIELD_KEY) if PARAM_FIELD_KEY in params else None
field_array = params.get(PARAM_FIELD_ARRAY)
field_single = params.get(PARAM_FIELD_SINGLE)
if not field_key:
res = []
for item in input:
res.append(item[field_single])
return {field_array: res}
else:
tdict = {}
for row in input:
if not row[field_key] in tdict:
tdict.update({row[field_key]: [row[field_single]]})
else:
tdict[row[field_key]].append(row[field_single])
res = []
for key, value in tdict.items():
res.append({field_key: key, field_array: value})
return res
|
Creates array of strings or ints from objects' fields
:param input: list of objects
:param params:
:return: list
|
async def _on_receive_array(self, array):
"""Parse channel array and call the appropriate events."""
if array[0] == 'noop':
pass # This is just a keep-alive, ignore it.
else:
wrapper = json.loads(array[0]['p'])
# Wrapper appears to be a Protocol Buffer message, but encoded via
# field numbers as dictionary keys. Since we don't have a parser
# for that, parse it ad-hoc here.
if '3' in wrapper:
# This is a new client_id.
self._client_id = wrapper['3']['2']
logger.info('Received new client_id: %r', self._client_id)
# Once client_id is received, the channel is ready to have
# services added.
await self._add_channel_services()
if '2' in wrapper:
pblite_message = json.loads(wrapper['2']['2'])
if pblite_message[0] == 'cbu':
# This is a (Client)BatchUpdate containing StateUpdate
# messages.
batch_update = hangouts_pb2.BatchUpdate()
pblite.decode(batch_update, pblite_message,
ignore_first_item=True)
for state_update in batch_update.state_update:
logger.debug('Received StateUpdate:\n%s', state_update)
header = state_update.state_update_header
self._active_client_state = header.active_client_state
await self.on_state_update.fire(state_update)
else:
logger.info('Ignoring message: %r', pblite_message[0])
|
Parse channel array and call the appropriate events.
|
def parse_object_type_definition(lexer: Lexer) -> ObjectTypeDefinitionNode:
"""ObjectTypeDefinition"""
start = lexer.token
description = parse_description(lexer)
expect_keyword(lexer, "type")
name = parse_name(lexer)
interfaces = parse_implements_interfaces(lexer)
directives = parse_directives(lexer, True)
fields = parse_fields_definition(lexer)
return ObjectTypeDefinitionNode(
description=description,
name=name,
interfaces=interfaces,
directives=directives,
fields=fields,
loc=loc(lexer, start),
)
|
ObjectTypeDefinition
|
def migrate_case(adapter: MongoAdapter, scout_case: dict, archive_data: dict):
"""Migrate case information from archive."""
# update collaborators
collaborators = list(set(scout_case['collaborators'] + archive_data['collaborators']))
if collaborators != scout_case['collaborators']:
LOG.info(f"set collaborators: {', '.join(collaborators)}")
scout_case['collaborators'] = collaborators
# update assignees
if len(scout_case.get('assignees', [])) == 0:
scout_user = adapter.user(archive_data['assignee'])
if scout_user:
scout_case['assignees'] = [archive_data['assignee']]
else:
LOG.warning(f"{archive_data['assignee']}: unable to find assigned user")
# add/update suspected/causative variants
for key in ['suspects', 'causatives']:
scout_case[key] = scout_case.get(key, [])
for archive_variant in archive_data[key]:
variant_id = get_variantid(archive_variant, scout_case['_id'])
scout_variant = adapter.variant(variant_id)
if scout_variant:
if scout_variant['_id'] in scout_case[key]:
LOG.info(f"{scout_variant['_id']}: variant already in {key}")
else:
LOG.info(f"{scout_variant['_id']}: add to {key}")
scout_variant[key].append(scout_variant['_id'])
else:
LOG.warning(f"{scout_variant['_id']}: unable to find variant ({key})")
scout_variant[key].append(variant_id)
if not scout_case.get('synopsis'):
# update synopsis
scout_case['synopsis'] = archive_data['synopsis']
scout_case['is_migrated'] = True
adapter.case_collection.find_one_and_replace(
{'_id': scout_case['_id']},
scout_case,
)
# add/update phenotype groups/terms
scout_institute = adapter.institute(scout_case['owner'])
scout_user = adapter.user('mans.magnusson@scilifelab.se')
for key in ['phenotype_terms', 'phenotype_groups']:
for archive_term in archive_data[key]:
adapter.add_phenotype(
institute=scout_institute,
case=scout_case,
user=scout_user,
link=f"/{scout_case['owner']}/{scout_case['display_name']}",
hpo_term=archive_term['phenotype_id'],
is_group=key == 'phenotype_groups',
)
|
Migrate case information from archive.
|
def select_dict(conn, query: str, params=None, name=None, itersize=5000):
"""Return a select statement's results as dictionary.
Parameters
----------
conn : database connection
query : select query string
params : query parameters.
name : server side cursor name. defaults to client side.
itersize : number of records fetched by server.
"""
with conn.cursor(name, cursor_factory=RealDictCursor) as cursor:
cursor.itersize = itersize
cursor.execute(query, params)
for result in cursor:
yield result
|
Return a select statement's results as dictionary.
Parameters
----------
conn : database connection
query : select query string
params : query parameters.
name : server side cursor name. defaults to client side.
itersize : number of records fetched by server.
|
def VGGFace(include_top=True, model='vgg16', weights='vggface',
input_tensor=None, input_shape=None,
pooling=None,
classes=None):
"""Instantiates the VGGFace architectures.
Optionally loads weights pre-trained
on VGGFace datasets. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "vggface" (pre-training on VGGFACE datasets).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
model: selects the one of the available architectures
vgg16, resnet50 or senet50 default is vgg16.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'vggface', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `vggface`'
'(pre-training on VGGFace Datasets).')
if model == 'vgg16':
if classes is None:
classes = 2622
if weights == 'vggface' and include_top and classes != 2622:
raise ValueError(
'If using `weights` as vggface original with `include_top`'
' as true, `classes` should be 2622')
return VGG16(include_top=include_top, input_tensor=input_tensor,
input_shape=input_shape, pooling=pooling,
weights=weights,
classes=classes)
if model == 'resnet50':
if classes is None:
classes = 8631
if weights == 'vggface' and include_top and classes != 8631:
raise ValueError(
'If using `weights` as vggface original with `include_top`'
' as true, `classes` should be 8631')
return RESNET50(include_top=include_top, input_tensor=input_tensor,
input_shape=input_shape, pooling=pooling,
weights=weights,
classes=classes)
if model == 'senet50':
if classes is None:
classes = 8631
if weights == 'vggface' and include_top and classes != 8631:
raise ValueError(
'If using `weights` as vggface original with `include_top`'
' as true, `classes` should be 8631')
return SENET50(include_top=include_top, input_tensor=input_tensor,
input_shape=input_shape, pooling=pooling,
weights=weights,
classes=classes)
|
Instantiates the VGGFace architectures.
Optionally loads weights pre-trained
on VGGFace datasets. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "vggface" (pre-training on VGGFACE datasets).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
model: selects the one of the available architectures
vgg16, resnet50 or senet50 default is vgg16.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
|
def commit_and_quit(self):
"""
Commits and closes the currently open configration. Saves a step by not needing to manually close the config.
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.load_config_template("system{ host-name {{ hostname }};}",hostname="foo")
dev commit_and_quit()
dev.close()
"""
try:
self.dev.rpc.commit_configuration()
self.close_config()
except Exception as err:
print err
|
Commits and closes the currently open configration. Saves a step by not needing to manually close the config.
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.load_config_template("system{ host-name {{ hostname }};}",hostname="foo")
dev commit_and_quit()
dev.close()
|
def handle(self, line_info):
"""Try to get some help for the object.
obj? or ?obj -> basic information.
obj?? or ??obj -> more details.
"""
normal_handler = self.prefilter_manager.get_handler_by_name('normal')
line = line_info.line
# We need to make sure that we don't process lines which would be
# otherwise valid python, such as "x=1 # what?"
try:
codeop.compile_command(line)
except SyntaxError:
# We should only handle as help stuff which is NOT valid syntax
if line[0]==ESC_HELP:
line = line[1:]
elif line[-1]==ESC_HELP:
line = line[:-1]
if line:
#print 'line:<%r>' % line # dbg
self.shell.magic('pinfo %s' % line_info.ifun)
else:
self.shell.show_usage()
return '' # Empty string is needed here!
except:
raise
# Pass any other exceptions through to the normal handler
return normal_handler.handle(line_info)
else:
# If the code compiles ok, we should handle it normally
return normal_handler.handle(line_info)
|
Try to get some help for the object.
obj? or ?obj -> basic information.
obj?? or ??obj -> more details.
|
def getServiceDependencies(self):
"""
This methods returns a list with the analyses services dependencies.
:return: a list of analysis services objects.
"""
calc = self.getCalculation()
if calc:
return calc.getCalculationDependencies(flat=True)
return []
|
This methods returns a list with the analyses services dependencies.
:return: a list of analysis services objects.
|
def list_plugins(path, user):
'''
List plugins in an installed wordpress path
path
path to wordpress install location
user
user to run the command as
CLI Example:
.. code-block:: bash
salt '*' wordpress.list_plugins /var/www/html apache
'''
ret = []
resp = __salt__['cmd.shell']((
'wp --path={0} plugin list'
).format(path), runas=user)
for line in resp.split('\n')[1:]:
ret.append(line.split('\t'))
return [plugin.__dict__ for plugin in map(_get_plugins, ret)]
|
List plugins in an installed wordpress path
path
path to wordpress install location
user
user to run the command as
CLI Example:
.. code-block:: bash
salt '*' wordpress.list_plugins /var/www/html apache
|
def get_server_setting(settings, server=_DEFAULT_SERVER):
'''
Get the value of the setting for the SMTP virtual server.
:param str settings: A list of the setting names.
:param str server: The SMTP server name.
:return: A dictionary of the provided settings and their values.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']"
'''
ret = dict()
if not settings:
_LOG.warning('No settings provided.')
return ret
with salt.utils.winapi.Com():
try:
connection = wmi.WMI(namespace=_WMI_NAMESPACE)
objs = connection.IIsSmtpServerSetting(settings, Name=server)[0]
for setting in settings:
ret[setting] = six.text_type(getattr(objs, setting))
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error)
except (AttributeError, IndexError) as error:
_LOG.error('Error getting IIsSmtpServerSetting: %s', error)
return ret
|
Get the value of the setting for the SMTP virtual server.
:param str settings: A list of the setting names.
:param str server: The SMTP server name.
:return: A dictionary of the provided settings and their values.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']"
|
def init():
""" Initializes this module
"""
global ORG
global LEXER
global MEMORY
global INITS
global AUTORUN_ADDR
global NAMESPACE
ORG = 0 # Origin of CODE
INITS = []
MEMORY = None # Memory for instructions (Will be initialized with a Memory() instance)
AUTORUN_ADDR = None # Where to start the execution automatically
NAMESPACE = GLOBAL_NAMESPACE # Current namespace (defaults to ''). It's a prefix added to each global label
gl.has_errors = 0
gl.error_msg_cache.clear()
|
Initializes this module
|
def rule_operation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
operation = ET.SubElement(rule, "operation")
operation.text = kwargs.pop('operation')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def deferral():
"""Defers a function call when it is being required like Go.
::
with deferral() as defer:
sys.setprofile(f)
defer(sys.setprofile, None)
# do something.
"""
deferred = []
defer = lambda f, *a, **k: deferred.append((f, a, k))
try:
yield defer
finally:
while deferred:
f, a, k = deferred.pop()
f(*a, **k)
|
Defers a function call when it is being required like Go.
::
with deferral() as defer:
sys.setprofile(f)
defer(sys.setprofile, None)
# do something.
|
def url(self, name):
"""
Ask blobstore api for an url to directly serve the file
"""
key = blobstore.create_gs_key('/gs' + name)
return images.get_serving_url(key)
|
Ask blobstore api for an url to directly serve the file
|
def append(self, name, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
:param name: str the name of the redis key
:param value: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.append(self.redis_key(name),
self.valueparse.encode(value))
|
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
:param name: str the name of the redis key
:param value: str
:return: Future()
|
def addworkdays(self, date, offset):
"""
Add work days to a given date, ignoring holidays.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a work date. An offset of 1
represents the next work date, regardless of date being a work
date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of work days to add. Positive values move
the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
"""
date = parsefun(date)
if offset == 0:
return date
if offset > 0:
direction = 1
idx_offset = Calendar._idx_offsetnext
idx_next = Calendar._idx_nextworkday
idx_offset_other = Calendar._idx_offsetprev
idx_next_other = Calendar._idx_prevworkday
else:
direction = -1
idx_offset = Calendar._idx_offsetprev
idx_next = Calendar._idx_prevworkday
idx_offset_other = Calendar._idx_offsetnext
idx_next_other = Calendar._idx_nextworkday
# adjust date to first work day before/after so counting always
# starts from a workday
weekdaymap = self.weekdaymap # speed up
datewk = date.weekday()
if not weekdaymap[datewk].isworkday:
date += datetime.timedelta(days=\
weekdaymap[datewk][idx_offset_other])
datewk = weekdaymap[datewk][idx_next_other]
nw, nd = divmod(abs(offset), len(self.workdays))
ndays = nw * 7
while nd > 0:
ndays += abs(weekdaymap[datewk][idx_offset])
datewk = weekdaymap[datewk][idx_next]
nd -= 1
date += datetime.timedelta(days=ndays*direction)
return date
|
Add work days to a given date, ignoring holidays.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a work date. An offset of 1
represents the next work date, regardless of date being a work
date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of work days to add. Positive values move
the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
|
def write_patch_file(self, patch_file, lines_to_write):
"""Write lines_to_write to a the file called patch_file
:param patch_file: file name of the patch to generate
:param lines_to_write: lines to write to the file - they should be \n terminated
:type lines_to_write: list[str]
:return: None
"""
with open(patch_file, 'w') as f:
f.writelines(lines_to_write)
|
Write lines_to_write to a the file called patch_file
:param patch_file: file name of the patch to generate
:param lines_to_write: lines to write to the file - they should be \n terminated
:type lines_to_write: list[str]
:return: None
|
def backwardeuler(dfun, xzero, timerange, timestep):
'''Backward Euler method integration. This function wraps BackwardEuler.
:param dfun:
Derivative function of the system.
The differential system arranged as a series of first-order
equations: \dot{X} = dfun(t, x)
:param xzero:
The initial condition of the system.
:param vzero:
The initial condition of first derivative of the system.
:param timerange:
The start and end times as (starttime, endtime).
:param timestep:
The timestep.
:param convergencethreshold:
Each step requires an iterative solution of an implicit equation.
This is the threshold of convergence.
:param maxiterations:
Maximum iterations of the implicit equation before raising
an exception.
:returns: t, x:
as lists.
'''
return zip(*list(BackwardEuler(dfun, xzero, timerange, timestep)))
|
Backward Euler method integration. This function wraps BackwardEuler.
:param dfun:
Derivative function of the system.
The differential system arranged as a series of first-order
equations: \dot{X} = dfun(t, x)
:param xzero:
The initial condition of the system.
:param vzero:
The initial condition of first derivative of the system.
:param timerange:
The start and end times as (starttime, endtime).
:param timestep:
The timestep.
:param convergencethreshold:
Each step requires an iterative solution of an implicit equation.
This is the threshold of convergence.
:param maxiterations:
Maximum iterations of the implicit equation before raising
an exception.
:returns: t, x:
as lists.
|
async def call_async(self, method_name: str, *args, rpc_timeout: float = None, **kwargs):
"""
Send JSON RPC request to a backend socket and receive reply (asynchronously)
:param method_name: Method name
:param args: Args that will be passed to the remote function
:param float rpc_timeout: Timeout in seconds for Server response, set to None to disable the timeout
:param kwargs: Keyword args that will be passed to the remote function
"""
# if an rpc_timeout override is not specified, use the one set in the Client attributes
if rpc_timeout is None:
rpc_timeout = self.rpc_timeout
if rpc_timeout:
# Implementation note: this simply wraps the call in a timeout and converts to the built-in TimeoutError
try:
return await asyncio.wait_for(self._call_async(method_name, *args, **kwargs), timeout=rpc_timeout)
except asyncio.TimeoutError:
raise TimeoutError(f"Timeout on client {self.endpoint}, method name {method_name}, class info: {self}")
else:
return await self._call_async(method_name, *args, **kwargs)
|
Send JSON RPC request to a backend socket and receive reply (asynchronously)
:param method_name: Method name
:param args: Args that will be passed to the remote function
:param float rpc_timeout: Timeout in seconds for Server response, set to None to disable the timeout
:param kwargs: Keyword args that will be passed to the remote function
|
def set_rotation(self, rotation):
"""Set the rotation of the stereonet in degrees clockwise from North."""
self._rotation = np.radians(rotation)
self._polar.set_theta_offset(self._rotation + np.pi / 2.0)
self.transData.invalidate()
self.transAxes.invalidate()
self._set_lim_and_transforms()
|
Set the rotation of the stereonet in degrees clockwise from North.
|
def get_features_by_ids(self, ids=None, threshold=0.0001, func=np.mean,
get_weights=False):
''' Returns features for which the mean loading across all specified
studies (in ids) is >= threshold. '''
weights = self.data.ix[ids].apply(func, 0)
above_thresh = weights[weights >= threshold]
return above_thresh if get_weights else list(above_thresh.index)
|
Returns features for which the mean loading across all specified
studies (in ids) is >= threshold.
|
def sigPerms(s):
"""Generate all possible signatures derived by upcasting the given
signature.
"""
codes = 'bilfdc'
if not s:
yield ''
elif s[0] in codes:
start = codes.index(s[0])
for x in codes[start:]:
for y in sigPerms(s[1:]):
yield x + y
elif s[0] == 's': # numbers shall not be cast to strings
for y in sigPerms(s[1:]):
yield 's' + y
else:
yield s
|
Generate all possible signatures derived by upcasting the given
signature.
|
def locateChild(self, context, segments):
"""
Return a statically defined child or a child defined by a sessionless
site root plugin or an avatar from guard.
"""
shortcut = getattr(self, 'child_' + segments[0], None)
if shortcut:
res = shortcut(context)
if res is not None:
return res, segments[1:]
req = IRequest(context)
for plg in self.siteStore.powerupsFor(ISessionlessSiteRootPlugin):
spr = getattr(plg, 'sessionlessProduceResource', None)
if spr is not None:
childAndSegments = spr(req, segments)
else:
childAndSegments = plg.resourceFactory(segments)
if childAndSegments is not None:
return childAndSegments
return self.guardedRoot.locateChild(context, segments)
|
Return a statically defined child or a child defined by a sessionless
site root plugin or an avatar from guard.
|
def stubs_clustering(network,use_reduced_coordinates=True, line_length_factor=1.0):
"""Cluster network by reducing stubs and stubby trees
(i.e. sequentially reducing dead-ends).
Parameters
----------
network : pypsa.Network
use_reduced_coordinates : boolean
If True, do not average clusters, but take from busmap.
line_length_factor : float
Factor to multiply the crow-flies distance between new buses in order to get new
line lengths.
Returns
-------
Clustering : named tuple
A named tuple containing network, busmap and linemap
"""
busmap = busmap_by_stubs(network)
#reset coordinates to the new reduced guys, rather than taking an average
if use_reduced_coordinates:
# TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS,
# i.e. network is changed in place!!
network.buses.loc[busmap.index,['x','y']] = network.buses.loc[busmap,['x','y']].values
return get_clustering_from_busmap(network, busmap, line_length_factor=line_length_factor)
|
Cluster network by reducing stubs and stubby trees
(i.e. sequentially reducing dead-ends).
Parameters
----------
network : pypsa.Network
use_reduced_coordinates : boolean
If True, do not average clusters, but take from busmap.
line_length_factor : float
Factor to multiply the crow-flies distance between new buses in order to get new
line lengths.
Returns
-------
Clustering : named tuple
A named tuple containing network, busmap and linemap
|
def _findAssociatedConfigSpecFile(self, cfgFileName):
""" Given a config file, find its associated config-spec file, and
return the full pathname of the file. """
# Handle simplest 2 cases first: co-located or local .cfgspc file
retval = "."+os.sep+self.__taskName+".cfgspc"
if os.path.isfile(retval): return retval
retval = os.path.dirname(cfgFileName)+os.sep+self.__taskName+".cfgspc"
if os.path.isfile(retval): return retval
# Also try the resource dir
retval = self.getDefaultSaveFilename()+'spc' # .cfgspc
if os.path.isfile(retval): return retval
# Now try and see if there is a matching .cfgspc file in/under an
# associated package, if one is defined.
if self.__assocPkg is not None:
x, theFile = findCfgFileForPkg(None, '.cfgspc',
pkgObj = self.__assocPkg,
taskName = self.__taskName)
return theFile
# Finally try to import the task name and see if there is a .cfgspc
# file in that directory
x, theFile = findCfgFileForPkg(self.__taskName, '.cfgspc',
taskName = self.__taskName)
if os.path.exists(theFile):
return theFile
# unfound
raise NoCfgFileError('Unfound config-spec file for task: "'+ \
self.__taskName+'"')
|
Given a config file, find its associated config-spec file, and
return the full pathname of the file.
|
def importaccount(ctx, account, role):
""" Import an account using an account password
"""
from peerplaysbase.account import PasswordKey
password = click.prompt("Account Passphrase", hide_input=True)
account = Account(account, peerplays_instance=ctx.peerplays)
imported = False
if role == "owner":
owner_key = PasswordKey(account["name"], password, role="owner")
owner_pubkey = format(
owner_key.get_public_key(), ctx.peerplays.rpc.chain_params["prefix"]
)
if owner_pubkey in [x[0] for x in account["owner"]["key_auths"]]:
click.echo("Importing owner key!")
owner_privkey = owner_key.get_private_key()
ctx.peerplays.wallet.addPrivateKey(owner_privkey)
imported = True
if role == "active":
active_key = PasswordKey(account["name"], password, role="active")
active_pubkey = format(
active_key.get_public_key(), ctx.peerplays.rpc.chain_params["prefix"]
)
if active_pubkey in [x[0] for x in account["active"]["key_auths"]]:
click.echo("Importing active key!")
active_privkey = active_key.get_private_key()
ctx.peerplays.wallet.addPrivateKey(active_privkey)
imported = True
if role == "memo":
memo_key = PasswordKey(account["name"], password, role=role)
memo_pubkey = format(
memo_key.get_public_key(), ctx.peerplays.rpc.chain_params["prefix"]
)
if memo_pubkey == account["memo_key"]:
click.echo("Importing memo key!")
memo_privkey = memo_key.get_private_key()
ctx.peerplays.wallet.addPrivateKey(memo_privkey)
imported = True
if not imported:
click.echo("No matching key(s) found. Password correct?")
|
Import an account using an account password
|
def load_batch(self, fn_batch):
""" Loads a batch with the given prefixes. The prefixes is the full path to the
training example minus the extension.
"""
# TODO Assumes targets are available, which is how its distinct from
# utils.load_batch_x(). These functions need to change names to be
# clearer.
inverse = list(zip(*fn_batch))
feat_fn_batch = inverse[0]
target_fn_batch = inverse[1]
batch_inputs, batch_inputs_lens = utils.load_batch_x(feat_fn_batch,
flatten=False)
batch_targets_list = []
for targets_path in target_fn_batch:
with open(targets_path, encoding=ENCODING) as targets_f:
target_indices = self.corpus.labels_to_indices(targets_f.readline().split())
batch_targets_list.append(target_indices)
batch_targets = utils.target_list_to_sparse_tensor(batch_targets_list)
return batch_inputs, batch_inputs_lens, batch_targets
|
Loads a batch with the given prefixes. The prefixes is the full path to the
training example minus the extension.
|
def check_req(req):
"""Checks if a given req is the latest version available."""
if not isinstance(req, Requirement):
return None
info = get_package_info(req.name)
newest_version = _get_newest_version(info)
if _is_pinned(req) and _is_version_range(req):
return None
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
if current_version != newest_version:
return req.name, current_version, newest_version
|
Checks if a given req is the latest version available.
|
def detect_c3_function_shadowing(contract):
"""
Detects and obtains functions which are indirectly shadowed via multiple inheritance by C3 linearization
properties, despite not directly inheriting from each other.
:param contract: The contract to check for potential C3 linearization shadowing within.
:return: A list of list of tuples: (contract, function), where each inner list describes colliding functions.
The later elements in the inner list overshadow the earlier ones. The contract-function pair's function does not
need to be defined in its paired contract, it may have been inherited within it.
"""
# Loop through all contracts, and all underlying functions.
results = {}
for i in range(0, len(contract.immediate_inheritance) - 1):
inherited_contract1 = contract.immediate_inheritance[i]
for function1 in inherited_contract1.functions_and_modifiers:
# If this function has already be handled or is unimplemented, we skip it
if function1.full_name in results or function1.is_constructor or not function1.is_implemented:
continue
# Define our list of function instances which overshadow each other.
functions_matching = [(inherited_contract1, function1)]
already_processed = set([function1])
# Loop again through other contracts and functions to compare to.
for x in range(i + 1, len(contract.immediate_inheritance)):
inherited_contract2 = contract.immediate_inheritance[x]
# Loop for each function in this contract
for function2 in inherited_contract2.functions_and_modifiers:
# Skip this function if it is the last function that was shadowed.
if function2 in already_processed or function2.is_constructor or not function2.is_implemented:
continue
# If this function does have the same full name, it is shadowing through C3 linearization.
if function1.full_name == function2.full_name:
functions_matching.append((inherited_contract2, function2))
already_processed.add(function2)
# If we have more than one definition matching the same signature, we add it to the results.
if len(functions_matching) > 1:
results[function1.full_name] = functions_matching
return list(results.values())
|
Detects and obtains functions which are indirectly shadowed via multiple inheritance by C3 linearization
properties, despite not directly inheriting from each other.
:param contract: The contract to check for potential C3 linearization shadowing within.
:return: A list of list of tuples: (contract, function), where each inner list describes colliding functions.
The later elements in the inner list overshadow the earlier ones. The contract-function pair's function does not
need to be defined in its paired contract, it may have been inherited within it.
|
def request_permissions(self, permissions):
""" Return a future that resolves with the results
of the permission requests
"""
f = self.create_future()
#: Old versions of android did permissions at install time
if self.api_level < 23:
f.set_result({p: True for p in permissions})
return f
w = self.widget
request_code = self._permission_code
self._permission_code += 1 #: So next call has a unique code
#: On first request, setup our listener, and request the permission
if request_code == 0:
w.setPermissionResultListener(w.getId())
w.onRequestPermissionsResult.connect(self._on_permission_result)
def on_results(code, perms, results):
#: Check permissions
f.set_result({p: r == Activity.PERMISSION_GRANTED
for (p, r) in zip(perms, results)})
#: Save a reference
self._permission_requests[request_code] = on_results
#: Send out the request
self.widget.requestPermissions(permissions, request_code)
return f
|
Return a future that resolves with the results
of the permission requests
|
def matrix(mat):
"""Convert a ROOT TMatrix into a NumPy matrix.
Parameters
----------
mat : ROOT TMatrixT
A ROOT TMatrixD or TMatrixF
Returns
-------
mat : numpy.matrix
A NumPy matrix
Examples
--------
>>> from root_numpy import matrix
>>> from ROOT import TMatrixD
>>> a = TMatrixD(4, 4)
>>> a[1][2] = 2
>>> matrix(a)
matrix([[ 0., 0., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]])
"""
import ROOT
if isinstance(mat, (ROOT.TMatrixD, ROOT.TMatrixDSym)):
return _librootnumpy.matrix_d(ROOT.AsCObject(mat))
elif isinstance(mat, (ROOT.TMatrixF, ROOT.TMatrixFSym)):
return _librootnumpy.matrix_f(ROOT.AsCObject(mat))
raise TypeError(
"unable to convert object of type {0} "
"into a numpy matrix".format(type(mat)))
|
Convert a ROOT TMatrix into a NumPy matrix.
Parameters
----------
mat : ROOT TMatrixT
A ROOT TMatrixD or TMatrixF
Returns
-------
mat : numpy.matrix
A NumPy matrix
Examples
--------
>>> from root_numpy import matrix
>>> from ROOT import TMatrixD
>>> a = TMatrixD(4, 4)
>>> a[1][2] = 2
>>> matrix(a)
matrix([[ 0., 0., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]])
|
def build_query_fragment(query):
"""
<query xmlns="http://basex.org/rest">
<text><![CDATA[ (//city/name)[position() <= 5] ]]></text>
</query>
"""
root = etree.Element('query', nsmap={None: 'http://basex.org/rest'})
text = etree.SubElement(root, 'text')
text.text = etree.CDATA(query.strip())
return root
|
<query xmlns="http://basex.org/rest">
<text><![CDATA[ (//city/name)[position() <= 5] ]]></text>
</query>
|
def add_before(self, pipeline):
"""Add a Pipeline to be applied before this processing pipeline.
Arguments:
pipeline: The Pipeline or callable to apply before this
Pipeline.
"""
if not isinstance(pipeline, Pipeline):
pipeline = Pipeline(pipeline)
self.pipes = pipeline.pipes[:] + self.pipes[:]
return self
|
Add a Pipeline to be applied before this processing pipeline.
Arguments:
pipeline: The Pipeline or callable to apply before this
Pipeline.
|
def send_sms(self, text, **kw):
"""
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
"""
params = {
'user': self._user,
'pass': self._passwd,
'msg': text
}
kw.setdefault("verify", False)
if not kw["verify"]:
# remove SSL warning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
res = requests.get(FreeClient.BASE_URL, params=params, **kw)
return FreeResponse(res.status_code)
|
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
|
def add_uid(self, uid, selfsign=True, **prefs):
"""
Add a User ID to this key.
:param uid: The user id to add
:type uid: :py:obj:`~pgpy.PGPUID`
:param selfsign: Whether or not to self-sign the user id before adding it
:type selfsign: ``bool``
Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`.
Any such keyword arguments are ignored if selfsign is ``False``
"""
uid._parent = self
if selfsign:
uid |= self.certify(uid, SignatureType.Positive_Cert, **prefs)
self |= uid
|
Add a User ID to this key.
:param uid: The user id to add
:type uid: :py:obj:`~pgpy.PGPUID`
:param selfsign: Whether or not to self-sign the user id before adding it
:type selfsign: ``bool``
Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`.
Any such keyword arguments are ignored if selfsign is ``False``
|
def Hakim_Steinberg_Stiel(T, Tc, Pc, omega, StielPolar=0):
r'''Calculates air-water surface tension using the reference fluids methods
of [1]_.
.. math::
\sigma = 4.60104\times 10^{-7} P_c^{2/3}T_c^{1/3}Q_p \left(\frac{1-T_r}{0.4}\right)^m
Q_p = 0.1574+0.359\omega-1.769\chi-13.69\chi^2-0.51\omega^2+1.298\omega\chi
m = 1.21+0.5385\omega-14.61\chi-32.07\chi^2-1.65\omega^2+22.03\omega\chi
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
omega : float
Acentric factor for fluid, [-]
StielPolar : float, optional
Stiel Polar Factor, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Original equation for m and Q are used. Internal units are atm and mN/m.
Examples
--------
1-butanol, as compared to value in CRC Handbook of 0.02493.
>>> Hakim_Steinberg_Stiel(298.15, 563.0, 4414000.0, 0.59, StielPolar=-0.07872)
0.021907902575190447
References
----------
.. [1] Hakim, D. I., David Steinberg, and L. I. Stiel. "Generalized
Relationship for the Surface Tension of Polar Fluids." Industrial &
Engineering Chemistry Fundamentals 10, no. 1 (February 1, 1971): 174-75.
doi:10.1021/i160037a032.
'''
Q = (0.1574 + 0.359*omega - 1.769*StielPolar - 13.69*StielPolar**2
- 0.510*omega**2 + 1.298*StielPolar*omega)
m = (1.210 + 0.5385*omega - 14.61*StielPolar - 32.07*StielPolar**2
- 1.656*omega**2 + 22.03*StielPolar*omega)
Tr = T/Tc
Pc = Pc/101325.
sigma = Pc**(2/3.)*Tc**(1/3.)*Q*((1 - Tr)/0.4)**m
sigma = sigma/1000. # convert to N/m
return sigma
|
r'''Calculates air-water surface tension using the reference fluids methods
of [1]_.
.. math::
\sigma = 4.60104\times 10^{-7} P_c^{2/3}T_c^{1/3}Q_p \left(\frac{1-T_r}{0.4}\right)^m
Q_p = 0.1574+0.359\omega-1.769\chi-13.69\chi^2-0.51\omega^2+1.298\omega\chi
m = 1.21+0.5385\omega-14.61\chi-32.07\chi^2-1.65\omega^2+22.03\omega\chi
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
omega : float
Acentric factor for fluid, [-]
StielPolar : float, optional
Stiel Polar Factor, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Original equation for m and Q are used. Internal units are atm and mN/m.
Examples
--------
1-butanol, as compared to value in CRC Handbook of 0.02493.
>>> Hakim_Steinberg_Stiel(298.15, 563.0, 4414000.0, 0.59, StielPolar=-0.07872)
0.021907902575190447
References
----------
.. [1] Hakim, D. I., David Steinberg, and L. I. Stiel. "Generalized
Relationship for the Surface Tension of Polar Fluids." Industrial &
Engineering Chemistry Fundamentals 10, no. 1 (February 1, 1971): 174-75.
doi:10.1021/i160037a032.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.