code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def is30(msg):
"""Check if a message is likely to be BDS code 2,0
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
"""
if allzeros(msg):
return False
d = hex2bin(data(msg))
if d[0:8] != '00110000':
return False
# threat type 3 not assigned
if d[28:30] == '11':
return False
# reserved for ACAS III, in far future
if bin2int(d[15:22]) >= 48:
return False
return True
|
Check if a message is likely to be BDS code 2,0
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
|
def atlas_get_peer( peer_hostport, peer_table=None ):
"""
Get the given peer's info
"""
ret = None
with AtlasPeerTableLocked(peer_table) as ptbl:
ret = ptbl.get(peer_hostport, None)
return ret
|
Get the given peer's info
|
def set_cli_options(config, arguments=None):
"""Set any configuration options which have a CLI value set.
Args:
config (confpy.core.config.Configuration): A configuration object which
has been initialized with options.
arguments (iter of str): An iterable of strings which contains the CLI
arguments passed. If nothing is give then sys.argv is used.
Returns:
confpy.core.config.Configuration: A configuration object with CLI
values set.
The pattern to follow when setting CLI values is:
<section>_<option>
Each value should be lower case and separated by underscores.
"""
arguments = arguments or sys.argv[1:]
parser = argparse.ArgumentParser()
for section_name, section in config:
for option_name, _ in section:
var_name = '{0}_{1}'.format(
section_name.lower(),
option_name.lower(),
)
parser.add_argument('--{0}'.format(var_name))
args, _ = parser.parse_known_args(arguments)
args = vars(args)
for section_name, section in config:
for option_name, _ in section:
var_name = '{0}_{1}'.format(
section_name.lower(),
option_name.lower(),
)
value = args.get(var_name)
if value:
setattr(section, option_name, value)
return config
|
Set any configuration options which have a CLI value set.
Args:
config (confpy.core.config.Configuration): A configuration object which
has been initialized with options.
arguments (iter of str): An iterable of strings which contains the CLI
arguments passed. If nothing is give then sys.argv is used.
Returns:
confpy.core.config.Configuration: A configuration object with CLI
values set.
The pattern to follow when setting CLI values is:
<section>_<option>
Each value should be lower case and separated by underscores.
|
def export_legacy_ldcoeffs(self, models, filename=None, photon_weighted=True):
"""
@models: the path (including the filename) of legacy's models.list
@filename: output filename for storing the table
Exports CK2004 limb darkening coefficients to a PHOEBE legacy
compatible format.
"""
if photon_weighted:
grid = self._ck2004_ld_photon_grid
else:
grid = self._ck2004_ld_energy_grid
if filename is not None:
import time
f = open(filename, 'w')
f.write('# PASS_SET %s\n' % self.pbset)
f.write('# PASSBAND %s\n' % self.pbname)
f.write('# VERSION 1.0\n\n')
f.write('# Exported from PHOEBE-2 passband on %s\n' % (time.ctime()))
f.write('# The coefficients are computed for the %s-weighted regime.\n\n' % ('photon' if photon_weighted else 'energy'))
mods = np.loadtxt(models)
for mod in mods:
Tindex = np.argwhere(self._ck2004_intensity_axes[0] == mod[0])[0][0]
lindex = np.argwhere(self._ck2004_intensity_axes[1] == mod[1]/10)[0][0]
mindex = np.argwhere(self._ck2004_intensity_axes[2] == mod[2]/10)[0][0]
if filename is None:
print('%6.3f '*11 % tuple(grid[Tindex, lindex, mindex].tolist()))
else:
f.write(('%6.3f '*11+'\n') % tuple(self._ck2004_ld_photon_grid[Tindex, lindex, mindex].tolist()))
if filename is not None:
f.close()
|
@models: the path (including the filename) of legacy's models.list
@filename: output filename for storing the table
Exports CK2004 limb darkening coefficients to a PHOEBE legacy
compatible format.
|
def set_index_edited(self, index, edited):
"""Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
"""
self.__edited[index.row()] = edited
self.dataChanged.emit(index, index)
|
Set whether the conf was edited or not.
Edited files will be displayed with a \'*\'
:param index: the index that was edited
:type index: QModelIndex
:param edited: if the file was edited, set edited to True, else False
:type edited: bool
:returns: None
:rtype: None
:raises: None
|
def query(self,
startTime=None,
endTime=None,
sinceServerStart=False,
level="WARNING",
services="*",
machines="*",
server="*",
codes=[],
processIds=[],
export=False,
exportType="CSV", #CSV or TAB
out_path=None
):
"""
The query operation on the logs resource provides a way to
aggregate, filter, and page through logs across the entire site.
Inputs:
"""
allowed_levels = ("SEVERE", "WARNING", "INFO",
"FINE", "VERBOSE", "DEBUG")
qFilter = {
"services": "*",
"machines": "*",
"server" : "*"
}
if len(processIds) > 0:
qFilter['processIds'] = processIds
if len(codes) > 0:
qFilter['codes'] = codes
params = {
"f" : "json",
"sinceServerStart" : sinceServerStart,
"pageSize" : 10000
}
if startTime is not None and \
isinstance(startTime, datetime):
params['startTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S")
if endTime is not None and \
isinstance(endTime, datetime):
params['endTime'] = endTime.strftime("%Y-%m-%dT%H:%M:%S")
if level.upper() in allowed_levels:
params['level'] = level
if server != "*":
qFilter['server'] = server.split(',')
if services != "*":
qFilter['services'] = services.split(',')
if machines != "*":
qFilter['machines'] = machines.split(",")
params['filter'] = qFilter
if export == True and \
out_path is not None:
messages = self._post(self._url + "/query", params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
with open(name=out_path, mode='wb') as f:
hasKeys = False
if exportType == "TAB":
csvwriter = csv.writer(f, delimiter='\t')
else:
csvwriter = csv.writer(f)
for message in messages['logMessages']:
if hasKeys == False:
csvwriter.writerow(message.keys())
hasKeys = True
csvwriter.writerow(message.values())
del message
del messages
return out_path
else:
return self._post(self._url + "/query", params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
The query operation on the logs resource provides a way to
aggregate, filter, and page through logs across the entire site.
Inputs:
|
def commit(self):
""" Store metadata on this Job to the backend. """
logger.debug('Committing job {0}'.format(self.name))
self.backend.commit_job(self._serialize())
self.parent.commit()
|
Store metadata on this Job to the backend.
|
def mdr_mutual_information(X, Y, labels, base=2):
"""Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY)
"""
return mutual_information(_mdr_predict(X, Y, labels), labels, base=base)
|
Calculates the MDR mutual information, I(XY;labels), in the given base
MDR mutual information is calculated by combining variables X and Y into a single MDR model then calculating
the mutual information between the resulting model's predictions and the labels.
Parameters
----------
X: array-like (# samples)
An array of values corresponding to one feature in the MDR model
Y: array-like (# samples)
An array of values corresponding to one feature in the MDR model
labels: array-like (# samples)
The class labels corresponding to features X and Y
base: integer (default: 2)
The base in which to calculate MDR mutual information
Returns
----------
mdr_mutual_information: float
The MDR mutual information calculated according to the equation I(XY;labels) = H(labels) - H(labels|XY)
|
def _update_metadata(self, kwargs=None):
'''Extract any additional attributes to hold with the instance
from kwargs
'''
# If not given metadata, use instance.list to get it for container
if kwargs == None and hasattr(self, 'name'):
kwargs = self._list(self.name, quiet=True, return_json=True)
# Add acceptable arguments
for arg in ['pid', 'name']:
# Skip over non-iterables:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
if "image" in kwargs:
self._image = kwargs['image']
elif "container_image" in kwargs:
self._image = kwargs['container_image']
|
Extract any additional attributes to hold with the instance
from kwargs
|
def sel_list_pres(ds_sfc_x):
'''
select proper levels for model level data download
'''
p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values
list_pres_level = [
'1', '2', '3',
'5', '7', '10',
'20', '30', '50',
'70', '100', '125',
'150', '175', '200',
'225', '250', '300',
'350', '400', '450',
'500', '550', '600',
'650', '700', '750',
'775', '800', '825',
'850', '875', '900',
'925', '950', '975',
'1000',
]
ser_pres_level = pd.Series(list_pres_level).map(int)*100
pos_lev_max, pos_lev_min = (
ser_pres_level[ser_pres_level > p_max].idxmin(),
ser_pres_level[ser_pres_level < p_min].idxmax()
)
list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100
list_pres_sel = list_pres_sel.map(int).map(str).to_list()
return list_pres_sel
|
select proper levels for model level data download
|
def add_user(self, attrs):
"""add a user"""
ldap_client = self._bind()
# encoding crap
attrs_srt = self.attrs_pretreatment(attrs)
attrs_srt[self._byte_p2('objectClass')] = self.objectclasses
# construct is DN
dn = \
self._byte_p2(self.dn_user_attr) + \
self._byte_p2('=') + \
self._byte_p2(ldap.dn.escape_dn_chars(
attrs[self.dn_user_attr]
)
) + \
self._byte_p2(',') + \
self._byte_p2(self.userdn)
# gen the ldif first add_s and add the user
ldif = modlist.addModlist(attrs_srt)
try:
ldap_client.add_s(dn, ldif)
except ldap.ALREADY_EXISTS as e:
raise UserAlreadyExists(attrs[self.key], self.backend_name)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s()
|
add a user
|
def approx_aic(self, ts):
"""
Calculates an approximation to the Akaike Information Criterion (AIC). This is an approximation
as we use the conditional likelihood, rather than the exact likelihood. Please see
[[https://en.wikipedia.org/wiki/Akaike_information_criterion]] for more information on this
measure.
Parameters
----------
ts:
the timeseries to evaluate under current model
Returns an approximation to the AIC under the current model as a double
"""
return self._jmodel.approxAIC(_py2java(self._ctx, Vectors.dense(ts)))
|
Calculates an approximation to the Akaike Information Criterion (AIC). This is an approximation
as we use the conditional likelihood, rather than the exact likelihood. Please see
[[https://en.wikipedia.org/wiki/Akaike_information_criterion]] for more information on this
measure.
Parameters
----------
ts:
the timeseries to evaluate under current model
Returns an approximation to the AIC under the current model as a double
|
def _interpret_regexp(self, string, flags):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
inside_square = 0
while (self.index < self.length):
template = '[%s]' if not inside_square else '%s'
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == 'u':
digs = self.source[self.index:self.index + 4]
if len(digs) == 4 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 4
else:
st += 'u'
elif ch == 'x':
digs = self.source[self.index:self.index + 2]
if len(digs) == 2 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 2
else:
st += 'x'
# special meaning - single char.
elif ch == '0':
st += '\\0'
elif ch == 'n':
st += '\\n'
elif ch == 'r':
st += '\\r'
elif ch == 't':
st += '\\t'
elif ch == 'f':
st += '\\f'
elif ch == 'v':
st += '\\v'
# unescape special single characters like . so that they are interpreted literally
elif ch in REGEXP_SPECIAL_SINGLE:
st += '\\' + ch
# character groups
elif ch == 'b':
st += '\\b'
elif ch == 'B':
st += '\\B'
elif ch == 'w':
st += '\\w'
elif ch == 'W':
st += '\\W'
elif ch == 'd':
st += '\\d'
elif ch == 'D':
st += '\\D'
elif ch == 's':
st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff'
elif ch == 'S':
st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff'
else:
if isDecimalDigit(ch):
num = ch
while self.index < self.length and isDecimalDigit(
self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += '\\' + num
else:
st += ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
if ch == '[':
inside_square = True
elif ch == ']':
inside_square = False
st += ch
# print string, 'was transformed to', st
return st
|
Perform sctring escape - for regexp literals
|
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
|
Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps.
|
def get_destinations(self, ascii_listing):
"""
returns all line numbers that are used in a jump.
"""
self.destinations = set()
def collect_destinations(matchobj):
numbers = matchobj.group("no")
if numbers:
self.destinations.update(set(
[n.strip() for n in numbers.split(",")]
))
for line in self._iter_lines(ascii_listing):
self.renum_regex.sub(collect_destinations, line)
return sorted([int(no) for no in self.destinations if no])
|
returns all line numbers that are used in a jump.
|
def update_metadata_filters(metadata, jupyter_md, cell_metadata):
"""Update or set the notebook and cell metadata filters"""
cell_metadata = [m for m in cell_metadata if m not in ['language', 'magic_args']]
if 'cell_metadata_filter' in metadata.get('jupytext', {}):
metadata_filter = metadata_filter_as_dict(metadata.get('jupytext', {})['cell_metadata_filter'])
if isinstance(metadata_filter.get('excluded'), list):
metadata_filter['excluded'] = [key for key in metadata_filter['excluded'] if key not in cell_metadata]
metadata_filter.setdefault('additional', [])
if isinstance(metadata_filter.get('additional'), list):
for key in cell_metadata:
if key not in metadata_filter['additional']:
metadata_filter['additional'].append(key)
metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(metadata_filter)
if not jupyter_md:
# Set a metadata filter equal to the current metadata in script
cell_metadata = {'additional': cell_metadata, 'excluded': 'all'}
metadata.setdefault('jupytext', {})['notebook_metadata_filter'] = '-all'
metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(cell_metadata)
|
Update or set the notebook and cell metadata filters
|
def quick_summary(nml2_doc):
'''
Or better just use nml2_doc.summary(show_includes=False)
'''
info = 'Contents of NeuroML 2 document: %s\n'%nml2_doc.id
membs = inspect.getmembers(nml2_doc)
for memb in membs:
if isinstance(memb[1], list) and len(memb[1])>0 \
and not memb[0].endswith('_'):
info+=' %s:\n ['%memb[0]
for entry in memb[1]:
extra = '???'
extra = entry.name if hasattr(entry,'name') else extra
extra = entry.href if hasattr(entry,'href') else extra
extra = entry.id if hasattr(entry,'id') else extra
info+=" %s (%s),"%(entry, extra)
info+=']\n'
return info
|
Or better just use nml2_doc.summary(show_includes=False)
|
def set_extended_elements(self):
"""Parses and sets non required elements"""
self.set_creative_commons()
self.set_owner()
self.set_subtitle()
self.set_summary()
|
Parses and sets non required elements
|
def protect_libraries_from_patching():
"""
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
"""
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name]
|
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
|
def dispatch(self, req):
"""
Called by the Routes middleware to dispatch the request to the
appropriate controller. If a webob exception is raised, it is
returned; if some other exception is raised, the webob
`HTTPInternalServerError` exception is raised. Otherwise, the
return value of the controller is returned.
"""
# Grab the request parameters
params = req.environ['wsgiorg.routing_args'][1]
# What controller is authoritative?
controller = params.pop('controller')
# Determine its name
cont_class = controller.__class__
cont_name = "%s:%s" % (cont_class.__module__, cont_class.__name__)
# Determine the origin of the request
origin = req.remote_addr if req.remote_addr else '[local]'
if req.remote_user:
origin = '%s (%s)' % (origin, req.remote_user)
# Log that we're processing the request
LOG.info("%s %s %s (controller %r)" %
(origin, req.method, req.url, cont_name))
# Call into that controller
try:
return controller(req, params)
except webob.exc.HTTPException as e:
# Return the HTTP exception directly
return e
except exceptions.AppathyResponse as e:
# Return the webob.Response directly
return e.response
except Exception as e:
# Log the controller exception
LOG.exception("Exception occurred in controller %r" % cont_name)
# These exceptions result in a 500. Note we're
# intentionally not including the exception message, since
# it could contain sensitive data.
return webob.exc.HTTPInternalServerError()
|
Called by the Routes middleware to dispatch the request to the
appropriate controller. If a webob exception is raised, it is
returned; if some other exception is raised, the webob
`HTTPInternalServerError` exception is raised. Otherwise, the
return value of the controller is returned.
|
def netconf_session_start_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_session_start = ET.SubElement(config, "netconf-session-start", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
session_id = ET.SubElement(netconf_session_start, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _CreateAllTypes(self, enumTypes, dataTypes, managedTypes):
""" Create pyVmomi types from pyVmomi type definitions """
# Create versions
for typeInfo in managedTypes:
name = typeInfo[0]
version = typeInfo[3]
VmomiSupport.AddVersion(version, '', '1.0', 0, name)
VmomiSupport.AddVersionParent(version, 'vmodl.version.version0')
VmomiSupport.AddVersionParent(version, 'vmodl.version.version1')
VmomiSupport.AddVersionParent(version, version)
# Create partial types
for fn, infos in (VmomiSupport.CreateEnumType, enumTypes), \
(VmomiSupport.CreateDataType, dataTypes), \
(VmomiSupport.CreateManagedType, managedTypes):
for typeInfo in infos:
try:
fn(*typeInfo)
except Exception as err:
#Ignore errors due to duplicate importing
pass
|
Create pyVmomi types from pyVmomi type definitions
|
def search_globs(path, patterns):
# type: (str, List[str]) -> bool
""" Test whether the given *path* contains any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the ``path`` matches any pattern in *patterns*.
"""
for pattern in (p for p in patterns if p):
if pattern.startswith('/'):
# If pattern starts with root it means it match from root only
regex = fnmatch.translate(pattern[1:])
regex = regex.replace('\\Z', '')
temp_path = path[1:] if path.startswith('/') else path
m = re.search(regex, temp_path)
if m and m.start() == 0:
return True
else:
regex = fnmatch.translate(pattern)
regex = regex.replace('\\Z', '')
if re.search(regex, path):
return True
return False
|
Test whether the given *path* contains any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the ``path`` matches any pattern in *patterns*.
|
def remove_listener(registry, listener):
"""
Removes a listener from the registry
:param registry: A registry (a list)
:param listener: The listener to remove
:return: True if the listener was in the list
"""
if listener is not None and listener in registry:
registry.remove(listener)
return True
return False
|
Removes a listener from the registry
:param registry: A registry (a list)
:param listener: The listener to remove
:return: True if the listener was in the list
|
def parse_query(query):
"""
Given a simplified XPath query string, returns an array of normalized query parts.
"""
parts = query.split('/')
norm = []
for p in parts:
p = p.strip()
if p:
norm.append(p)
elif '' not in norm:
norm.append('')
return norm
|
Given a simplified XPath query string, returns an array of normalized query parts.
|
def setHoverIcon( self, column, icon ):
"""
Returns the icon to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
icon | <QtGui.QIcon)
"""
self._hoverIcon[column] = QtGui.QIcon(icon)
|
Returns the icon to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
icon | <QtGui.QIcon)
|
def integrateFullOrbit_c(pot,yo,t,int_method,rtol=None,atol=None,dt=None):
"""
NAME:
integrateFullOrbit_c
PURPOSE:
C integrate an ode for a FullOrbit
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
dt= (None) force integrator to use this stepsize (default is to automatically determine one))
OUTPUT:
(y,err)
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message, if not zero: 1 means maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS)
"""
rtol, atol= _parse_tol(rtol,atol)
npot, pot_type, pot_args= _parse_pot(pot)
int_method_c= _parse_integrator(int_method)
if dt is None:
dt= -9999.99
#Set up result array
result= nu.empty((len(t),6))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
integrationFunc= _lib.integrateFullOrbit
integrationFunc.argtypes= [ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.int32,flags=ndarrayFlags),
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int]
#Array requirements, first store old order
f_cont= [yo.flags['F_CONTIGUOUS'],
t.flags['F_CONTIGUOUS']]
yo= nu.require(yo,dtype=nu.float64,requirements=['C','W'])
t= nu.require(t,dtype=nu.float64,requirements=['C','W'])
result= nu.require(result,dtype=nu.float64,requirements=['C','W'])
#Run the C code
integrationFunc(yo,
ctypes.c_int(len(t)),
t,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(dt),
ctypes.c_double(rtol),ctypes.c_double(atol),
result,
ctypes.byref(err),
ctypes.c_int(int_method_c))
if int(err.value) == -10: #pragma: no cover
raise KeyboardInterrupt("Orbit integration interrupted by CTRL-C (SIGINT)")
#Reset input arrays
if f_cont[0]: yo= nu.asfortranarray(yo)
if f_cont[1]: t= nu.asfortranarray(t)
return (result,err.value)
|
NAME:
integrateFullOrbit_c
PURPOSE:
C integrate an ode for a FullOrbit
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
dt= (None) force integrator to use this stepsize (default is to automatically determine one))
OUTPUT:
(y,err)
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message, if not zero: 1 means maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS)
|
def get_meta(meta, name):
"""Retrieves the metadata variable 'name' from the 'meta' dict."""
assert name in meta
data = meta[name]
if data['t'] in ['MetaString', 'MetaBool']:
return data['c']
elif data['t'] == 'MetaInlines':
# Handle bug in pandoc 2.2.3 and 2.2.3.1: Return boolean value rather
# than strings, as appropriate.
if len(data['c']) == 1 and data['c'][0]['t'] == 'Str':
if data['c'][0]['c'] in ['true', 'True', 'TRUE']:
return True
elif data['c'][0]['c'] in ['false', 'False', 'FALSE']:
return False
return stringify(data['c'])
elif data['t'] == 'MetaList':
return [stringify(v['c']) for v in data['c']]
else:
raise RuntimeError("Could not understand metadata variable '%s'." %
name)
|
Retrieves the metadata variable 'name' from the 'meta' dict.
|
def fileinfo(self, fid):
"""Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated."""
if not isinstance(fid, str):
raise TypeError("Your file ID must be a string")
try:
info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5)
if not info:
warnings.warn(
f"Your query for file with ID: '{fid}' failed.", RuntimeWarning
)
elif fid in self.__files and not self.__files[fid].updated:
self.__files[fid].fileupdate(info)
except queue.Empty as ex:
raise ValueError(
"lain didn't produce a callback!\n"
"Are you sure your query wasn't malformed?"
) from ex
return info
|
Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated.
|
def make_url(self, container=None, resource=None, query_items=None):
"""Create a URL from the specified parts."""
pth = [self._base_url]
if container:
pth.append(container.strip('/'))
if resource:
pth.append(resource)
else:
pth.append('')
url = '/'.join(pth)
if isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
p = requests.PreparedRequest()
p.prepare_url(url, query_items)
return p.url
|
Create a URL from the specified parts.
|
def list_files(self, offset=None, limit=None, api=None):
"""List files in a folder
:param api: Api instance
:param offset: Pagination offset
:param limit: Pagination limit
:return: List of files
"""
api = api or self._API
if not self.is_folder():
raise SbgError('{name} is not a folder'.format(name=self.name))
url = self._URL['list_folder'].format(id=self.id)
return super(File, self.__class__)._query(
api=api, url=url, offset=offset, limit=limit, fields='_all'
)
|
List files in a folder
:param api: Api instance
:param offset: Pagination offset
:param limit: Pagination limit
:return: List of files
|
def process_tick(self, tup):
"""Called every window_duration
"""
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
self.processWindow(window_info, list(self.current_tuples))
for tup in self.current_tuples:
self.ack(tup)
self.current_tuples.clear()
|
Called every window_duration
|
def _dict_to_pio(d, class_=None):
"""
Convert a single dictionary object to a Physical Information Object.
:param d: Dictionary to convert.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio`.
"""
d = keys_to_snake_case(d)
if class_:
return class_(**d)
if 'category' not in d:
raise ValueError('Dictionary does not contains a category field: ' + ', '.join(d.keys()))
elif d['category'] == 'system':
return System(**d)
elif d['category'] == 'system.chemical':
return ChemicalSystem(**d)
elif d['category'] == 'system.chemical.alloy': # Legacy support
return Alloy(**d)
elif d['category'] == 'system.chemical.alloy.phase': # Legacy support
return ChemicalSystem(**d)
raise ValueError('Dictionary does not contain a valid top-level category: ' + str(d['category']))
|
Convert a single dictionary object to a Physical Information Object.
:param d: Dictionary to convert.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio`.
|
def authorize_url(self, state=''):
""" return user authorize url
"""
url = 'https://openapi.youku.com/v2/oauth2/authorize?'
params = {
'client_id': self.client_id,
'response_type': 'code',
'state': state,
'redirect_uri': self.redirect_uri
}
return url + urlencode(params)
|
return user authorize url
|
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
|
Load jcrop css file.
:param css_url: The custom CSS URL.
|
def last_job_statuses(self) -> List[str]:
"""The last constants of the job in this experiment."""
statuses = []
for status in self.jobs.values_list('status__status', flat=True):
if status is not None:
statuses.append(status)
return statuses
|
The last constants of the job in this experiment.
|
def set_logger(self, logger):
"""subscribe to fortran log messages"""
# we don't expect anything back
try:
self.library.set_logger.restype = None
except AttributeError:
logger.warn("Tried to set logger but method is not implemented in %s", self.engine)
return
# as an argument we need a pointer to a fortran log func...
self.library.set_logger.argtypes = [
(fortran_log_functype)]
self.library.set_logger(fortran_log_func)
|
subscribe to fortran log messages
|
def get_all_tasks(self, course):
"""
:return: a table containing taskid=>Task pairs
"""
tasks = self.get_readable_tasks(course)
output = {}
for task in tasks:
try:
output[task] = self.get_task(course, task)
except:
pass
return output
|
:return: a table containing taskid=>Task pairs
|
def set_ylim(self, xlims, dx, xscale, reverse=False):
"""Set y limits for plot.
This will set the limits for the y axis
for the specific plot.
Args:
ylims (len-2 list of floats): The limits for the axis.
dy (float): Amount to increment by between the limits.
yscale (str): Scale of the axis. Either `log` or `lin`.
reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
"""
self._set_axis_limits('y', xlims, dx, xscale, reverse)
return
|
Set y limits for plot.
This will set the limits for the y axis
for the specific plot.
Args:
ylims (len-2 list of floats): The limits for the axis.
dy (float): Amount to increment by between the limits.
yscale (str): Scale of the axis. Either `log` or `lin`.
reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
|
def does_sqlatype_require_index_len(
coltype: Union[TypeEngine, VisitableType]) -> bool:
"""
Is the SQLAlchemy column type one that requires its indexes to have a
length specified?
(MySQL, at least, requires index length to be specified for ``BLOB`` and
``TEXT`` columns:
http://dev.mysql.com/doc/refman/5.7/en/create-index.html.)
"""
coltype = _coltype_to_typeengine(coltype)
if isinstance(coltype, sqltypes.Text):
return True
if isinstance(coltype, sqltypes.LargeBinary):
return True
return False
|
Is the SQLAlchemy column type one that requires its indexes to have a
length specified?
(MySQL, at least, requires index length to be specified for ``BLOB`` and
``TEXT`` columns:
http://dev.mysql.com/doc/refman/5.7/en/create-index.html.)
|
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
# pylint: disable=consider-using-ternary
return hasattr(obj, '__iter__') and not isinstance(obj, str) or isinstance(obj, GeneratorType)
|
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
|
def recipients(messenger, addresses):
"""Structures recipients data.
:param str|unicode, MessageBase messenger: MessengerBase heir
:param list[str|unicode]|str|unicode addresses: recipients addresses or Django User
model heir instances (NOTE: if supported by a messenger)
:return: list of Recipient
:rtype: list[Recipient]
"""
if isinstance(messenger, six.string_types):
messenger = get_registered_messenger_object(messenger)
return messenger._structure_recipients_data(addresses)
|
Structures recipients data.
:param str|unicode, MessageBase messenger: MessengerBase heir
:param list[str|unicode]|str|unicode addresses: recipients addresses or Django User
model heir instances (NOTE: if supported by a messenger)
:return: list of Recipient
:rtype: list[Recipient]
|
def request_vpc_peering_connection(requester_vpc_id=None, requester_vpc_name=None,
peer_vpc_id=None, peer_vpc_name=None, name=None,
peer_owner_id=None, peer_region=None, region=None,
key=None, keyid=None, profile=None, dry_run=False):
'''
Request a VPC peering connection between two VPCs.
.. versionadded:: 2016.11.0
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC to create VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC to create VPC peering connection with. This can only
be a VPC in the same account and same region, else resolving it into a
vpc ID will almost certainly fail. Exclusive with peer_vpc_id.
name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and return status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da name=my_vpc_connection
# Without a name
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da
# Specify a region
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da region=us-west-2
'''
conn = _get_conn3(region=region, key=key, keyid=keyid,
profile=profile)
if name and _vpc_peering_conn_id_for_name(name, conn):
raise SaltInvocationError('A VPC peering connection with this name already '
'exists! Please specify a different name.')
if not _exactly_one((requester_vpc_id, requester_vpc_name)):
raise SaltInvocationError('Exactly one of requester_vpc_id or '
'requester_vpc_name is required')
if not _exactly_one((peer_vpc_id, peer_vpc_name)):
raise SaltInvocationError('Exactly one of peer_vpc_id or '
'peer_vpc_name is required.')
if requester_vpc_name:
requester_vpc_id = _get_id(vpc_name=requester_vpc_name, region=region, key=key,
keyid=keyid, profile=profile)
if not requester_vpc_id:
return {'error': 'Could not resolve VPC name {0} to an ID'.format(requester_vpc_name)}
if peer_vpc_name:
peer_vpc_id = _get_id(vpc_name=peer_vpc_name, region=region, key=key,
keyid=keyid, profile=profile)
if not peer_vpc_id:
return {'error': 'Could not resolve VPC name {0} to an ID'.format(peer_vpc_name)}
peering_params = {"VpcId": requester_vpc_id, "PeerVpcId": peer_vpc_id, "DryRun": dry_run}
if peer_owner_id:
peering_params.update({"PeerOwnerId": peer_owner_id})
if peer_region:
peering_params.update({"PeerRegion": peer_region})
try:
log.debug('Trying to request vpc peering connection')
vpc_peering = conn.create_vpc_peering_connection(**peering_params)
peering = vpc_peering.get('VpcPeeringConnection', {})
peering_conn_id = peering.get('VpcPeeringConnectionId', 'ERROR')
msg = 'VPC peering {0} requested.'.format(peering_conn_id)
log.debug(msg)
if name:
log.debug('Adding name tag to vpc peering connection')
conn.create_tags(
Resources=[peering_conn_id],
Tags=[{'Key': 'Name', 'Value': name}]
)
log.debug('Applied name tag to vpc peering connection')
msg += ' With name {0}.'.format(name)
return {'msg': msg}
except botocore.exceptions.ClientError as err:
log.error('Got an error while trying to request vpc peering')
return {'error': __utils__['boto.get_error'](err)}
|
Request a VPC peering connection between two VPCs.
.. versionadded:: 2016.11.0
requester_vpc_id
ID of the requesting VPC. Exclusive with requester_vpc_name.
requester_vpc_name
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
peer_vpc_id
ID of the VPC to create VPC peering connection with. This can be a VPC in
another account. Exclusive with peer_vpc_name.
peer_vpc_name
Name tag of the VPC to create VPC peering connection with. This can only
be a VPC in the same account and same region, else resolving it into a
vpc ID will almost certainly fail. Exclusive with peer_vpc_id.
name
The name to use for this VPC peering connection.
peer_owner_id
ID of the owner of the peer VPC. Defaults to your account ID, so a value
is required if peering with a VPC in a different account.
peer_region
Region of peer VPC. For inter-region vpc peering connections. Not required
for intra-region peering connections.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
dry_run
If True, skip application and return status.
CLI Example:
.. code-block:: bash
# Create a named VPC peering connection
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da name=my_vpc_connection
# Without a name
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da
# Specify a region
salt myminion boto_vpc.request_vpc_peering_connection vpc-4a3e622e vpc-be82e9da region=us-west-2
|
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
consumer_token = self._oauth_consumer_token()
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
|
Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
|
def generate_csr(self, basename='djangoafip'):
"""
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
"""
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
'{}{}'.format(basename, int(datetime.now().timestamp())),
'CUIT {}'.format(self.cuit),
csr,
)
csr.seek(0)
return csr
|
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
|
def solve_let(expr, vars):
"""Solves a let-form by calling RHS with nested scope."""
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of 'let' must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
|
Solves a let-form by calling RHS with nested scope.
|
def solve_tsp(V,c):
"""solve_tsp -- solve the traveling salesman problem
- start with assignment model
- check flow from a source to every other node;
- if no flow, a sub-cycle has been found --> add cut
- otherwise, the solution is optimal
Parameters:
- V: set/list of nodes in the graph
- c[i,j]: cost for traversing edge (i,j)
Returns the optimum objective value and the list of edges used.
"""
def addcut(X):
for sink in V[1:]:
mflow = maxflow(V,X,V[0],sink)
mflow.optimize()
f,cons = mflow.data
if mflow.ObjVal < 2-EPS: # no flow to sink, can add cut
break
else:
return False
#add a cut/constraint
CutA = set([V[0]])
for i in cons:
if cons[i].Pi <= -1+EPS:
CutA.add(i)
CutB = set(V) - CutA
main.addCons(
quicksum(x[i,j] for i in CutA for j in CutB if j>i) + \
quicksum(x[j,i] for i in CutA for j in CutB if j<i) >= 2)
print("mflow:",mflow.getObjVal(),"cut:",CutA,"+",CutB,">= 2")
print("mflow:",mflow.getObjVal(),"cut:",[(i,j) for i in CutA for j in CutB if j>i],"+",[(j,i) for i in CutA for j in CutB if j<i],">= 2")
return True
def isMIP(x):
for var in x:
if var.vtype == "CONTINUOUS":
return False
return True
# main part of the solution process:
main = Model("tsp")
x = {}
for i in V:
for j in V:
if j > i:
x[i,j] = main.addVar(ub=1, vtype="C", name="x(%s,%s)"%(i,j))
for i in V:
main.addCons(quicksum(x[j,i] for j in V if j < i) + \
quicksum(x[i,j] for j in V if j > i) == 2, "Degree(%s)"%i)
main.setObjective(quicksum(c[i,j]*x[i,j] for i in V for j in V if j > i), "minimize")
while True:
main.optimize()
z = main.getObjVal()
X = {}
for (i,j) in x:
if main.getVal(x[i,j]) > EPS:
X[i,j] = main.getVal(x[i,j])
if addcut(X) == False: # i.e., components are connected
if isMIP(): # integer variables, components connected: solution found
break
for (i,j) in x: # all components connected, switch to integer model
main.chgVarType(x[i,j], "BINARY")
# process solution
edges = []
for (i,j) in x:
if main.getVal(x[i,j]) > EPS:
edges.append((i,j))
return main.getObjVal(),edges
|
solve_tsp -- solve the traveling salesman problem
- start with assignment model
- check flow from a source to every other node;
- if no flow, a sub-cycle has been found --> add cut
- otherwise, the solution is optimal
Parameters:
- V: set/list of nodes in the graph
- c[i,j]: cost for traversing edge (i,j)
Returns the optimum objective value and the list of edges used.
|
def get_email_link(email, value=None):
"""
Returns a well-formed link to an email address. If email is None/empty,
returns an empty string
:param email: email address
:param link_text: text to be displayed. If None, the email itself is used
:return: a well-formatted html anchor
"""
if not email:
return ""
mailto = 'mailto:{}'.format(email)
link_value = value and value or email
return get_link(mailto, link_value)
|
Returns a well-formed link to an email address. If email is None/empty,
returns an empty string
:param email: email address
:param link_text: text to be displayed. If None, the email itself is used
:return: a well-formatted html anchor
|
def _validate_entity_cls(self, entity_cls):
"""Validate that Entity is a valid class"""
# Import here to avoid cyclic dependency
from protean.core.entity import Entity
if not issubclass(entity_cls, Entity):
raise AssertionError(
f'Entity {entity_cls.__name__} must be subclass of `Entity`')
if entity_cls.meta_.abstract is True:
raise NotSupportedError(
f'{entity_cls.__name__} class has been marked abstract'
f' and cannot be instantiated')
|
Validate that Entity is a valid class
|
def to_array(self):
"""
Serializes this InlineQueryResultMpeg4Gif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(InlineQueryResultMpeg4Gif, self).to_array()
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['id'] = u(self.id) # py2: type unicode, py3: type str
array['mpeg4_url'] = u(self.mpeg4_url) # py2: type unicode, py3: type str
array['thumb_url'] = u(self.thumb_url) # py2: type unicode, py3: type str
if self.mpeg4_width is not None:
array['mpeg4_width'] = int(self.mpeg4_width) # type int
if self.mpeg4_height is not None:
array['mpeg4_height'] = int(self.mpeg4_height) # type int
if self.mpeg4_duration is not None:
array['mpeg4_duration'] = int(self.mpeg4_duration) # type int
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.caption is not None:
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
if self.parse_mode is not None:
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
if self.reply_markup is not None:
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
if self.input_message_content is not None:
array['input_message_content'] = self.input_message_content.to_array() # type InputMessageContent
return array
|
Serializes this InlineQueryResultMpeg4Gif to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
|
def is_bootstrapped(metadata):
"""Return True if cihai is correctly bootstrapped."""
fields = UNIHAN_FIELDS + DEFAULT_COLUMNS
if TABLE_NAME in metadata.tables.keys():
table = metadata.tables[TABLE_NAME]
if set(fields) == set(c.name for c in table.columns):
return True
else:
return False
else:
return False
|
Return True if cihai is correctly bootstrapped.
|
def removeBiosample(self, biosample):
"""
Removes the specified biosample from this repository.
"""
q = models.Biosample.delete().where(
models.Biosample.id == biosample.getId())
q.execute()
|
Removes the specified biosample from this repository.
|
def main():
"""
Simple command-line program for powering on virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and user %s: ' % (args.host,args.user))
try:
vmnames = args.vmname
if not len(vmnames):
print("No virtual machine specified for poweron")
sys.exit()
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Cannot connect to specified host using specified username and password")
sys.exit()
atexit.register(Disconnect, si)
# Retreive the list of Virtual Machines from the inventory objects
# under the rootFolder
content = si.content
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
vmList = objView.view
objView.Destroy()
# Find the vm and power it on
tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames]
# Wait for power on to complete
WaitForTasks(tasks, si)
print("Virtual Machine(s) have been powered on successfully")
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
except Exception as e:
print("Caught Exception : " + str(e))
|
Simple command-line program for powering on virtual machines on a system.
|
def t_op(self, s):
r'\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|\*\*=|//=|//|==|<=|>=|<<|>>|[<>%^&+/=~-]'
# Operators need to be further classified since the grammar requires this
if s in ('<', '>', '==', '>=', '<=', '<>', '!='):
self.add_token('COMP_OP', s)
elif s in ('+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=',
'//='):
self.add_token('AUGASSIGN', s)
elif s in self.UNOP2NAME.keys():
self.add_token(self.UNOP2NAME[s], s)
elif s in ('|', '^', '&', '<<', '>>', '**', '/', '%', '//'):
# These are *ONLY* binary operators. Operators which are exclusively or
# can be unary operators were handled previously
self.add_token('BINOP', s)
elif s == '=':
self.add_token('EQUAL', s)
else:
print("Internal error: Unknown operator %s" % s)
raise SystemExit
|
r'\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|\*\*=|//=|//|==|<=|>=|<<|>>|[<>%^&+/=~-]
|
def whole_subnet_maker(ip_addr, cidr):
"""
Function to return a whole subnet value from a IP address and CIDR pair
Args:
ip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
Returns: returns the corrected whole subnet
"""
if ucast_ip(ip_addr, False) == False and mcast_ip(ip_addr, False) == False:
LOGGER.critical('Function whole_subnet_maker ip_addr {item}'.format(item=ip_addr))
raise ValueError("Not a good ipv4 address")
if not cidr_check(cidr, False):
LOGGER.critical('Function whole_subnet_maker cidr {item}'.format(item=cidr))
raise ValueError("Not a good CIDR value should be 0 to 32")
def subnet_corrector(octet, cidr):
""" Function to correct a octet for a subnet """
cidr_int = int(cidr)
octet_int = int(octet)
if cidr_int >= 24:
cidr_int = __mask_conversion[cidr_int]["OCT4"]
elif cidr_int >= 16:
cidr_int = __mask_conversion[cidr_int]["OCT3"]
elif cidr_int >= 8:
cidr_int = __mask_conversion[cidr_int]["OCT2"]
elif cidr_int >= 1:
cidr_int = __mask_conversion[cidr_int]["OCT1"]
cidr_count = 0
cidr_v = 256 - cidr_int
cidr_2 = 256 - cidr_int
while cidr_count < 300:
if octet_int >= cidr_count and octet_int <= cidr_2:
cidr_int = cidr_count
cidr_count = cidr_2
cidr_2 = cidr_2 + cidr_v
return str(cidr_int)
ip_addr_split = ip_addr.split(".")
if int(cidr) >= 24:
octet = subnet_corrector(ip_addr_split[3], cidr)
completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + ip_addr_split[2] + "." + octet
return completed
elif int(cidr) >= 16:
octet = subnet_corrector(ip_addr_split[2], cidr)
completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + octet + ".0"
return completed
elif int(cidr) >= 8:
octet = subnet_corrector(ip_addr_split[1], cidr)
completed = ip_addr_split[0] + "." + octet + ".0.0"
return completed
elif int(cidr) >= 1:
octet = subnet_corrector(ip_addr_split[0], cidr)
completed = octet + ".0.0.0"
return completed
else:
return "0.0.0.0"
|
Function to return a whole subnet value from a IP address and CIDR pair
Args:
ip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
Returns: returns the corrected whole subnet
|
def spdhg_generic(x, f, g, A, tau, sigma, niter, **kwargs):
r"""Computes a saddle point with a stochastic PDHG.
This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
(x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
proper functionals. For this algorithm, they all may be non-smooth and no
strong convexity is assumed.
Parameters
----------
x : primal variable
This variable is both input and output of the method.
f : functions
Functionals Y[i] -> IR_infty that all have a convex conjugate with a
proximal operator, i.e.
f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i].
g : function
Functional X -> IR_infty that has a proximal operator, i.e.
g.proximal(tau) : X -> X.
A : functions
Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
tau : scalar / vector / matrix
Step size for primal variable. Note that the proximal operator of g
has to be well-defined for this input.
sigma : scalar
Scalar / vector / matrix used as step size for dual variable. Note that
the proximal operator related to f (see above) has to be well-defined
for this input.
niter : int
Number of iterations
Other Parameters
----------------
y : dual variable, optional
Dual variable is part of a product space. By default equals 0.
z : variable, optional
Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
mu_g : scalar
Strong convexity constant of g.
theta : scalar
Global extrapolation factor.
extra: list
List of local extrapolation paramters for every index i. By default
extra_i = 1.
fun_select : function
Function that selects blocks at every iteration IN -> {1,...,n}. By
default this is serial uniform sampling, fun_select(k) selects an index
i \in {1,...,n} with probability 1/n.
callback : callable, optional
Function called with the current iterate after each iteration.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
[E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
58 (2017) http://doi.org/10.1117/12.2272946.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'
''.format(callback))
# Dual variable
y = kwargs.pop('y', None)
if y is None:
y = A.range.zero()
# Adjoint of dual variable
z = kwargs.pop('z', None)
if z is None:
if y.norm() == 0:
z = A.domain.zero()
else:
z = A.adjoint(y)
# Strong convexity of g
mu_g = kwargs.pop('mu_g', None)
if mu_g is None:
update_proximal_primal = False
else:
update_proximal_primal = True
# Global extrapolation factor theta
theta = kwargs.pop('theta', 1)
# Second extrapolation factor
extra = kwargs.pop('extra', None)
if extra is None:
extra = [1] * len(sigma)
# Selection function
fun_select = kwargs.pop('fun_select', None)
if fun_select is None:
def fun_select(x):
return [int(np.random.choice(len(A), 1, p=1 / len(A)))]
# Initialize variables
z_relax = z.copy()
dz = A.domain.element()
y_old = A.range.element()
# Save proximal operators
proximal_dual_sigma = [fi.convex_conj.proximal(si)
for fi, si in zip(f, sigma)]
proximal_primal_tau = g.proximal(tau)
# run the iterations
for k in range(niter):
# select block
selected = fun_select(k)
# update primal variable
# tmp = x - tau * z_relax; z_relax used as tmp variable
z_relax.lincomb(1, x, -tau, z_relax)
# x = prox(tmp)
proximal_primal_tau(z_relax, out=x)
# update extrapolation parameter theta
if update_proximal_primal:
theta = float(1 / np.sqrt(1 + 2 * mu_g * tau))
# update dual variable and z, z_relax
z_relax.assign(z)
for i in selected:
# save old yi
y_old[i].assign(y[i])
# tmp = Ai(x)
A[i](x, out=y[i])
# tmp = y_old + sigma_i * Ai(x)
y[i].lincomb(1, y_old[i], sigma[i], y[i])
# y[i]= prox(tmp)
proximal_dual_sigma[i](y[i], out=y[i])
# update adjoint of dual variable
y_old[i].lincomb(-1, y_old[i], 1, y[i])
A[i].adjoint(y_old[i], out=dz)
z += dz
# compute extrapolation
z_relax.lincomb(1, z_relax, 1 + theta * extra[i], dz)
# update the step sizes tau and sigma for acceleration
if update_proximal_primal:
for i in range(len(sigma)):
sigma[i] /= theta
tau *= theta
proximal_dual_sigma = [fi.convex_conj.proximal(si)
for fi, si in zip(f, sigma)]
proximal_primal_tau = g.proximal(tau)
if callback is not None:
callback([x, y])
|
r"""Computes a saddle point with a stochastic PDHG.
This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
(x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
proper functionals. For this algorithm, they all may be non-smooth and no
strong convexity is assumed.
Parameters
----------
x : primal variable
This variable is both input and output of the method.
f : functions
Functionals Y[i] -> IR_infty that all have a convex conjugate with a
proximal operator, i.e.
f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i].
g : function
Functional X -> IR_infty that has a proximal operator, i.e.
g.proximal(tau) : X -> X.
A : functions
Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
tau : scalar / vector / matrix
Step size for primal variable. Note that the proximal operator of g
has to be well-defined for this input.
sigma : scalar
Scalar / vector / matrix used as step size for dual variable. Note that
the proximal operator related to f (see above) has to be well-defined
for this input.
niter : int
Number of iterations
Other Parameters
----------------
y : dual variable, optional
Dual variable is part of a product space. By default equals 0.
z : variable, optional
Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
mu_g : scalar
Strong convexity constant of g.
theta : scalar
Global extrapolation factor.
extra: list
List of local extrapolation paramters for every index i. By default
extra_i = 1.
fun_select : function
Function that selects blocks at every iteration IN -> {1,...,n}. By
default this is serial uniform sampling, fun_select(k) selects an index
i \in {1,...,n} with probability 1/n.
callback : callable, optional
Function called with the current iterate after each iteration.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
[E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
58 (2017) http://doi.org/10.1117/12.2272946.
|
def overlap_bbox_and_point(bbox, xp, yp):
"""Given a bbox that contains a given point, return the (x, y) displacement
necessary to make the bbox not overlap the point."""
cx, cy = get_midpoint(bbox)
dir_x = np.sign(cx-xp)
dir_y = np.sign(cy-yp)
if dir_x == -1:
dx = xp - bbox.xmax
elif dir_x == 1:
dx = xp - bbox.xmin
else:
dx = 0
if dir_y == -1:
dy = yp - bbox.ymax
elif dir_y == 1:
dy = yp - bbox.ymin
else:
dy = 0
return dx, dy
|
Given a bbox that contains a given point, return the (x, y) displacement
necessary to make the bbox not overlap the point.
|
def envs(backend=None, sources=False):
'''
Return the available fileserver environments. If no backend is provided,
then the environments for all configured backends will be returned.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.envs
salt-run fileserver.envs backend=roots,git
salt-run fileserver.envs git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
return sorted(fileserver.envs(back=backend, sources=sources))
|
Return the available fileserver environments. If no backend is provided,
then the environments for all configured backends will be returned.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.envs
salt-run fileserver.envs backend=roots,git
salt-run fileserver.envs git
|
def count(self):
"""
Count the number of domain for each status.
"""
if self.status:
# The status is parsed.
# We increase the number of tested.
PyFunceble.INTERN["counter"]["number"]["tested"] += 1
if (
self.status.lower() in PyFunceble.STATUS["list"]["up"]
or self.status.lower() in PyFunceble.STATUS["list"]["valid"]
):
# The status is in the list of up status.
# We increase the number of up.
PyFunceble.INTERN["counter"]["number"]["up"] += 1
elif self.status.lower() in PyFunceble.STATUS["list"]["down"]:
# The status is in the list of down status.
# We increase the number of down.
PyFunceble.INTERN["counter"]["number"]["down"] += 1
else:
# The status is not in the list of up nor down status.
# We increase the number of invalid.
PyFunceble.INTERN["counter"]["number"]["invalid"] += 1
|
Count the number of domain for each status.
|
def _ctorCmprRange(self, vals):
'''
Override default *range= handler to account for relative computation.
'''
if not isinstance(vals, (list, tuple)):
raise s_exc.BadCmprValu(valu=vals, cmpr='*range=')
if len(vals) != 2:
raise s_exc.BadCmprValu(valu=vals, cmpr='*range=')
tick, tock = self.getTickTock(vals)
if tick > tock:
# User input has requested a nullset
def cmpr(valu):
return False
return cmpr
def cmpr(valu):
return tick <= valu <= tock
return cmpr
|
Override default *range= handler to account for relative computation.
|
def create_superuser(self, email, password, **extra_fields):
"""Save new User with is_staff and is_superuser set to True"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
|
Save new User with is_staff and is_superuser set to True
|
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
|
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
|
async def movehere(self, channel):
"""
Moves the embed message to a new channel; can also be used to move the musicplayer to the front
Args:
channel (discord.Channel): The channel to move to
"""
self.logger.debug("movehere command")
# Delete the old message
await self.embed.delete()
# Set the channel to this channel
self.embed.channel = channel
# Send a new embed to the channel
await self.embed.send()
# Re-add the reactions
await self.add_reactions()
self.statuslog.info("Moved to front")
|
Moves the embed message to a new channel; can also be used to move the musicplayer to the front
Args:
channel (discord.Channel): The channel to move to
|
def norm_slash(name):
"""Normalize path slashes."""
if isinstance(name, str):
return name.replace('/', "\\") if not is_case_sensitive() else name
else:
return name.replace(b'/', b"\\") if not is_case_sensitive() else name
|
Normalize path slashes.
|
def transliterate(table, text):
"""
Transliterate text according to one of the tables above.
`table` chooses the table. It looks like a language code but comes from a
very restricted set:
- 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the
Latin alphabet.
- 'az-Latn' means the same for Azerbaijani Cyrillic to Latn.
"""
if table == 'sr-Latn':
return text.translate(SR_LATN_TABLE)
elif table == 'az-Latn':
return text.translate(AZ_LATN_TABLE)
else:
raise ValueError("Unknown transliteration table: {!r}".format(table))
|
Transliterate text according to one of the tables above.
`table` chooses the table. It looks like a language code but comes from a
very restricted set:
- 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the
Latin alphabet.
- 'az-Latn' means the same for Azerbaijani Cyrillic to Latn.
|
def URIUnescapeString(str, len, target):
"""Unescaping routine, but does not check that the string is
an URI. The output is a direct unsigned char translation of
%XX values (no encoding) Note that the length of the result
can only be smaller or same size as the input string. """
ret = libxml2mod.xmlURIUnescapeString(str, len, target)
return ret
|
Unescaping routine, but does not check that the string is
an URI. The output is a direct unsigned char translation of
%XX values (no encoding) Note that the length of the result
can only be smaller or same size as the input string.
|
def normalize_name(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_name is None:
decl.cache.normalized_name = normalize(decl.name)
return decl.cache.normalized_name
|
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
|
def legal_graph(graph):
'''judge if a graph is legal or not.
'''
descriptor = graph.extract_descriptor()
skips = descriptor.skip_connections
if len(skips) != len(set(skips)):
return False
return True
|
judge if a graph is legal or not.
|
def GetNBits(value, nbits):
"""
Get the first `nbits` from `value`.
:param value: Source value from which to extract
:type value: int or long or BitVec
:param int nbits: How many bits to extract
:return: Low `nbits` bits of `value`.
:rtype int or long or BitVec
"""
# NOP if sizes are the same
if isinstance(value, int):
return Operators.EXTRACT(value, 0, nbits)
elif isinstance(value, BitVec):
if value.size < nbits:
return Operators.ZEXTEND(value, nbits)
else:
return Operators.EXTRACT(value, 0, nbits)
|
Get the first `nbits` from `value`.
:param value: Source value from which to extract
:type value: int or long or BitVec
:param int nbits: How many bits to extract
:return: Low `nbits` bits of `value`.
:rtype int or long or BitVec
|
def state(self, context):
"""
Get instance state.
:param resort.engine.execution.Context context:
Current execution context.
:rtype:
str
:return:
Instance state name.
"""
state = None
for line in self.read(context, [
"status",
context.resolve(self.__name)
]):
if line[2] == "state":
state = line[3].strip()
return state
|
Get instance state.
:param resort.engine.execution.Context context:
Current execution context.
:rtype:
str
:return:
Instance state name.
|
def _validate_compression_params(self, img_array, cparams, colorspace):
"""Check that the compression parameters are valid.
Parameters
----------
img_array : ndarray
Image data to be written to file.
cparams : CompressionParametersType(ctypes.Structure)
Corresponds to cparameters_t type in openjp2 headers.
"""
self._validate_j2k_colorspace(cparams, colorspace)
self._validate_codeblock_size(cparams)
self._validate_precinct_size(cparams)
self._validate_image_rank(img_array)
self._validate_image_datatype(img_array)
|
Check that the compression parameters are valid.
Parameters
----------
img_array : ndarray
Image data to be written to file.
cparams : CompressionParametersType(ctypes.Structure)
Corresponds to cparameters_t type in openjp2 headers.
|
def iterate(self, params, repetition, iteration):
"""
Called once for each training iteration (== epoch here).
"""
try:
print("\nStarting iteration",iteration)
t1 = time.time()
ret = {}
# Update learning rate using learning rate scheduler if configured
if self.lr_scheduler is not None:
# ReduceLROnPlateau lr_scheduler step should be called after validation,
# all other lr_schedulers should be called before training
if params["lr_scheduler"] != "ReduceLROnPlateau":
self.lr_scheduler.step()
self.train(params, epoch=iteration)
# Run validation test
if self.validation_loader is not None:
validation = self.test(params, self.validation_loader)
# ReduceLROnPlateau step should be called after validation
if params["lr_scheduler"] == "ReduceLROnPlateau":
self.lr_scheduler.step(validation["test_loss"])
ret["validation"] = validation
print("Validation: Test error=", validation["testerror"],
"entropy=", validation["entropy"])
# Run noise test
if (params["test_noise_every_epoch"] or
iteration == params["iterations"] - 1):
ret.update(self.runNoiseTests(params))
print("Noise test results: totalCorrect=", ret["totalCorrect"],
"Test error=", ret["testerror"], ", entropy=", ret["entropy"])
if ret["totalCorrect"] > 100000 and ret["testerror"] > 98.3:
print("*******")
print(params)
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
print("Iteration time= {0:.3f} secs, "
"total elapsed time= {1:.3f} mins".format(
time.time() - t1,ret["elapsedTime"]/60.0))
except Exception as e:
# Tracebacks are not printed if using multiprocessing so we do it here
tb = sys.exc_info()[2]
traceback.print_tb(tb)
raise RuntimeError("Something went wrong in iterate", e)
return ret
|
Called once for each training iteration (== epoch here).
|
def crud_mutation_name(action, model):
"""
This function returns the name of a mutation that performs the specified
crud action on the given model service
"""
model_string = get_model_string(model)
# make sure the mutation name is correctly camelcases
model_string = model_string[0].upper() + model_string[1:]
# return the mutation name
return "{}{}".format(action, model_string)
|
This function returns the name of a mutation that performs the specified
crud action on the given model service
|
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method)
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = None
#if asyncio is not None:
# from tornado.platform.asyncio import AsyncIOLoop, AsyncIOMainLoop
# if IOLoop.configured_class() is AsyncIOLoop:
# current = AsyncIOMainLoop()
if current is None:
if sys.platform == 'darwin':
from .platforms import KQueueIOLoop
current = KQueueIOLoop()
else:
from .platforms import EPollIOLoop
current = EPollIOLoop()
current.initialize()
#current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
return current
|
Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method)
|
def time_zone_by_name(self, hostname):
"""
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.time_zone_by_addr(addr)
|
Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)
:arg hostname: Hostname (e.g. example.com)
|
def get_classification_node(self, project, structure_group, path=None, depth=None):
"""GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemClassificationNode', response)
|
GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>`
|
def calc_geo_dist_vincenty(node_source, node_target):
""" Calculates the geodesic distance between `node_source` and `node_target`
incorporating the detour factor specified in :file:`ding0/ding0/config/config_calc.cfg`.
Parameters
----------
node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0
source node, member of GridDing0._graph
node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0
target node, member of GridDing0._graph
Returns
-------
:any:`float`
Distance in m
"""
branch_detour_factor = cfg_ding0.get('assumptions', 'branch_detour_factor')
# notice: vincenty takes (lat,lon)
branch_length = branch_detour_factor * vincenty((node_source.geo_data.y, node_source.geo_data.x),
(node_target.geo_data.y, node_target.geo_data.x)).m
# ========= BUG: LINE LENGTH=0 WHEN CONNECTING GENERATORS ===========
# When importing generators, the geom_new field is used as position. If it is empty, EnergyMap's geom
# is used and so there are a couple of generators at the same position => length of interconnecting
# line is 0. See issue #76
if branch_length == 0:
branch_length = 1
logger.warning('Geo distance is zero, check objects\' positions. '
'Distance is set to 1m')
# ===================================================================
return branch_length
|
Calculates the geodesic distance between `node_source` and `node_target`
incorporating the detour factor specified in :file:`ding0/ding0/config/config_calc.cfg`.
Parameters
----------
node_source: LVStationDing0, GeneratorDing0, or CableDistributorDing0
source node, member of GridDing0._graph
node_target: LVStationDing0, GeneratorDing0, or CableDistributorDing0
target node, member of GridDing0._graph
Returns
-------
:any:`float`
Distance in m
|
def extract_key_value(line, environ):
"""Return key, value from given line if present, else return None.
"""
segments = line.split("=", 1)
if len(segments) < 2:
return None
key, value = segments
# foo passes through as-is (with spaces stripped)
# '{foo}' passes through literally
# "{foo}" substitutes from environ's foo
value = value.strip()
if value[0] == "'" and _SQUOTE_RE.match(value):
value = value[1:-1]
elif value[0] == '"' and _DQUOTE_RE.match(value):
template = value[1:-1]
value = template.format(**environ)
key = key.strip()
value = value.strip()
return key, value
|
Return key, value from given line if present, else return None.
|
def _adjust_legend(self, overlay, axis):
"""
Accumulate the legend handles and labels for all subplots
and set up the legend
"""
legend_data = []
dimensions = overlay.kdims
title = ', '.join([d.name for d in dimensions])
for key, subplot in self.subplots.items():
element = overlay.data.get(key, False)
if not subplot.show_legend or not element: continue
title = ', '.join([d.name for d in dimensions])
handle = subplot.traverse(lambda p: p.handles['artist'],
[lambda p: 'artist' in p.handles])
if isinstance(overlay, NdOverlay):
key = (dim.pprint_value(k) for k, dim in zip(key, dimensions))
label = ','.join([str(k) + dim.unit if dim.unit else str(k) for dim, k in
zip(dimensions, key)])
if handle:
legend_data.append((handle, label))
else:
if isinstance(subplot, OverlayPlot):
legend_data += subplot.handles.get('legend_data', {}).items()
elif element.label and handle:
legend_data.append((handle, element.label))
all_handles, all_labels = list(zip(*legend_data)) if legend_data else ([], [])
data = OrderedDict()
used_labels = []
for handle, label in zip(all_handles, all_labels):
# Ensure that artists with multiple handles are supported
if isinstance(handle, list): handle = tuple(handle)
if handle and (handle not in data) and label and label not in used_labels:
data[handle] = label
used_labels.append(label)
if (not len(set(data.values())) > 0) or not self.show_legend:
legend = axis.get_legend()
if legend:
legend.set_visible(False)
else:
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec['ncol'] = self.legend_cols
leg = axis.legend(list(data.keys()), list(data.values()),
title=title, scatterpoints=1,
**dict(leg_spec, **self._fontsize('legend')))
title_fontsize = self._fontsize('legend_title')
if title_fontsize:
leg.get_title().set_fontsize(title_fontsize['fontsize'])
frame = leg.get_frame()
frame.set_facecolor('1.0')
frame.set_edgecolor('0.0')
frame.set_linewidth('1.0')
leg.set_zorder(10e6)
self.handles['legend'] = leg
self.handles['bbox_extra_artists'].append(leg)
self.handles['legend_data'] = data
|
Accumulate the legend handles and labels for all subplots
and set up the legend
|
def intervaljoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
lprefix=None, rprefix=None):
"""
Join two tables by overlapping intervals. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
>>> # include stop coordinate in intervals
... table2 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop',
... include_stop=True)
>>> table2.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
An additional key comparison can be made, e.g.::
>>> import petl as etl
>>> left = (('fruit', 'begin', 'end'),
... ('apple', 1, 2),
... ('apple', 2, 4),
... ('apple', 2, 5),
... ('orange', 2, 5),
... ('orange', 9, 14),
... ('orange', 19, 140),
... ('apple', 1, 1))
>>> right = (('type', 'start', 'stop', 'value'),
... ('apple', 1, 4, 'foo'),
... ('apple', 3, 7, 'bar'),
... ('orange', 4, 9, 'baz'))
>>> table3 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end', lkey='fruit',
... rstart='start', rstop='stop', rkey='type')
>>> table3.lookall()
+----------+-------+-----+----------+-------+------+-------+
| fruit | begin | end | type | start | stop | value |
+==========+=======+=====+==========+=======+======+=======+
| 'apple' | 1 | 2 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'orange' | 2 | 5 | 'orange' | 4 | 9 | 'baz' |
+----------+-------+-----+----------+-------+------+-------+
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
lprefix=lprefix, rprefix=rprefix)
|
Join two tables by overlapping intervals. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
>>> # include stop coordinate in intervals
... table2 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop',
... include_stop=True)
>>> table2.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
An additional key comparison can be made, e.g.::
>>> import petl as etl
>>> left = (('fruit', 'begin', 'end'),
... ('apple', 1, 2),
... ('apple', 2, 4),
... ('apple', 2, 5),
... ('orange', 2, 5),
... ('orange', 9, 14),
... ('orange', 19, 140),
... ('apple', 1, 1))
>>> right = (('type', 'start', 'stop', 'value'),
... ('apple', 1, 4, 'foo'),
... ('apple', 3, 7, 'bar'),
... ('orange', 4, 9, 'baz'))
>>> table3 = etl.intervaljoin(left, right,
... lstart='begin', lstop='end', lkey='fruit',
... rstart='start', rstop='stop', rkey='type')
>>> table3.lookall()
+----------+-------+-----+----------+-------+------+-------+
| fruit | begin | end | type | start | stop | value |
+==========+=======+=====+==========+=======+======+=======+
| 'apple' | 1 | 2 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 4 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 1 | 4 | 'foo' |
+----------+-------+-----+----------+-------+------+-------+
| 'apple' | 2 | 5 | 'apple' | 3 | 7 | 'bar' |
+----------+-------+-----+----------+-------+------+-------+
| 'orange' | 2 | 5 | 'orange' | 4 | 9 | 'baz' |
+----------+-------+-----+----------+-------+------+-------+
|
def parse(self, rrstr):
# type: (bytes) -> None
'''
Parse a Rock Ridge Parent Link record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PL record already initialized!')
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
(su_len, su_entry_version_unused, parent_log_block_num_le, parent_log_block_num_be) = struct.unpack_from('=BBLL', rrstr[:12], 2)
if su_len != RRPLRecord.length():
raise pycdlibexception.PyCdlibInvalidISO('Invalid length on rock ridge extension')
if parent_log_block_num_le != utils.swab_32bit(parent_log_block_num_be):
raise pycdlibexception.PyCdlibInvalidISO('Little endian block num does not equal big endian; corrupt ISO')
self.parent_log_block_num = parent_log_block_num_le
self._initialized = True
|
Parse a Rock Ridge Parent Link record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
|
def promote(self, cart_name):
"""
`name` - name of cart
Promote a cart from its current environment to the next in the chain.
"""
cart = juicer.common.Cart.Cart(cart_name=cart_name, autoload=True, autosync=True)
old_env = cart.current_env
cart.current_env = juicer.utils.get_next_environment(cart.current_env)
# figure out what needs to be done to promote packages. If
# packages are going between environments that are on the same
# host and we don't need to sign them just associate with both
# repos.
if juicer.utils.env_same_host(old_env, cart.current_env) and (self.connectors[old_env].requires_signature == self.connectors[cart.current_env].requires_signature):
juicer.utils.Log.log_info("Envs %s and %s exist on the same host, calling remote associate action" % (old_env, cart.current_env))
juicer.utils.Log.log_info("Promoting %s from %s to %s" %
(cart_name, old_env, cart.current_env))
# iterate through packages and associate to new repo
for repo, items in cart.iterrepos():
query = '/repositories/%s-%s/actions/associate/' % (repo, cart.current_env)
for item in items:
source_repo_id = '%s-%s' % (repo, old_env)
data = {
'source_repo_id': str(source_repo_id),
'criteria': {
'type_ids': ['rpm'],
'filters': {
'unit': {
'filename': str(item.path.split('/')[-1])
}
}
}
}
_r = self.connectors[cart.current_env].post(query, data)
if _r.status_code != Constants.PULP_POST_ACCEPTED:
raise JuicerPulpError("Package association call was not accepted. Terminating!")
else:
# association was accepted so publish destination repo
con = self.connectors[cart.current_env]
con.post('/repositories/%s-%s/actions/publish/' % (repo, cart.current_env), {'id': 'yum_distributor'})
# also update the item's remote path
filename = item.path.split('/')[-1]
item.update('%s/%s' % (juicer.utils.pulp_repo_path(con, '%s-%s' % (repo, cart.current_env)), filename))
# we didn't bomb out yet so let the user know what's up
juicer.utils.Log.log_info("Package association calls were accepted. Trusting that your packages existed in %s" % old_env)
# we can save and publish here because upload does this too...
cart.save()
self.publish(cart)
else:
juicer.utils.Log.log_debug("Syncing down rpms...")
cart.sync_remotes()
self.sign_cart_for_env_maybe(cart, cart.current_env)
juicer.utils.Log.log_info("Promoting %s from %s to %s" %
(cart_name, old_env, cart.current_env))
for repo in cart.repos():
juicer.utils.Log.log_debug("Promoting %s to %s in %s" %
(cart[repo], repo, cart.current_env))
# reiterating that upload will save and publish the cart
self.upload(cart.current_env, cart)
|
`name` - name of cart
Promote a cart from its current environment to the next in the chain.
|
def size_of_varint(value):
""" Number of bytes needed to encode an integer in variable-length format.
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10
|
Number of bytes needed to encode an integer in variable-length format.
|
def _get_linewise_report(self):
"""
Returns a report each line of which comprises a pair of an input line
and an error. Unlike in the standard report, errors will appear as many
times as they occur.
Helper for the get_report method.
"""
d = defaultdict(list) # line: [] of errors
for error, lines in self.errors.items():
for line_num in lines:
d[line_num].append(error)
return '\n'.join([
'{:>3} → {}'.format(line, error.string)
for line in sorted(d.keys())
for error in d[line]])
|
Returns a report each line of which comprises a pair of an input line
and an error. Unlike in the standard report, errors will appear as many
times as they occur.
Helper for the get_report method.
|
def frame_parser(version=None, kind=0, extensions=None, protocols=None):
'''Create a new :class:`FrameParser` instance.
:param version: protocol version, the default is 13
:param kind: the kind of parser, and integer between 0 and 3 (check the
:class:`FrameParser` documentation for details)
:param extensions: not used at the moment
:param protocols: not used at the moment
:param pyparser: if ``True`` (default ``False``) uses the python frame
parser implementation rather than the much faster cython
implementation.
'''
version = get_version(version)
# extensions, protocols
return FrameParser(version, kind, ProtocolError, close_codes=CLOSE_CODES)
|
Create a new :class:`FrameParser` instance.
:param version: protocol version, the default is 13
:param kind: the kind of parser, and integer between 0 and 3 (check the
:class:`FrameParser` documentation for details)
:param extensions: not used at the moment
:param protocols: not used at the moment
:param pyparser: if ``True`` (default ``False``) uses the python frame
parser implementation rather than the much faster cython
implementation.
|
def read_message_bytes_from_pipe(pipe_handle):
"""
(coroutine)
Read message from this pipe. Return bytes.
"""
overlapped = OVERLAPPED()
overlapped.hEvent = create_event()
try:
buff = create_string_buffer(BUFSIZE + 1)
c_read = DWORD()
success = windll.kernel32.ReadFile(
pipe_handle,
buff,
DWORD(BUFSIZE),
byref(c_read),
byref(overlapped))
if success:
buff[c_read.value] = b'\0'
raise Return(buff.value)
error_code = windll.kernel32.GetLastError()
if error_code == ERROR_IO_PENDING:
yield From(wait_for_event(overlapped.hEvent))
success = windll.kernel32.GetOverlappedResult(
pipe_handle,
byref(overlapped),
byref(c_read),
BOOL(False))
if success:
buff[c_read.value] = b'\0'
raise Return(buff.value)
else:
error_code = windll.kernel32.GetLastError()
if error_code == ERROR_BROKEN_PIPE:
raise BrokenPipeError
elif error_code == ERROR_MORE_DATA:
more_data = yield From(read_message_bytes_from_pipe(pipe_handle))
raise Return(buff.value + more_data)
else:
raise Exception(
'reading overlapped IO failed. error_code=%r' % error_code)
elif error_code == ERROR_BROKEN_PIPE:
raise BrokenPipeError
elif error_code == ERROR_MORE_DATA:
more_data = yield From(read_message_bytes_from_pipe(pipe_handle))
raise Return(buff.value + more_data)
else:
raise Exception('Reading pipe failed, error_code=%s' % error_code)
finally:
windll.kernel32.CloseHandle(overlapped.hEvent)
|
(coroutine)
Read message from this pipe. Return bytes.
|
def setDragData(self, data, x=None, y=None):
"""
Sets the drag data for this chart item to the inputed data.
:param data | <QMimeData> || None
"""
self._dragData[(x, y)] = data
|
Sets the drag data for this chart item to the inputed data.
:param data | <QMimeData> || None
|
def GetRandomDatetime():
"""Return a datetime in the next week."""
seconds_offset = random.randint(0, 60 * 60 * 24 * 7)
dt = datetime.today() + timedelta(seconds=seconds_offset)
return dt.replace(second=0, microsecond=0)
|
Return a datetime in the next week.
|
def example(script, explain, contents, requirements, output, outputfmt, details):
"""Prints the example help for the script."""
blank()
cprint(script.upper(), "yellow")
cprint(''.join(["=" for i in range(70)]) + '\n', "yellow")
cprint("DETAILS", "blue")
std(explain + '\n')
cprint(requirements, "red")
cprint(output, "green")
blank()
if details != "":
std(details)
blank()
cprint("OUTPUT FORMAT", "blue")
std(outputfmt)
blank()
cprint("EXAMPLES", "blue")
for i in range(len(contents)):
pre, code, post = contents[i]
std("{}) {}".format(i + 1, pre))
cprint(" " + code, "cyan")
if post != "":
std('\n' + post)
blank()
|
Prints the example help for the script.
|
def connect_delete_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
"""connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
|
def search_node_namespace_names(graph, query, namespace):
"""Search for nodes with the given namespace(s) and whose names containing a given string(s).
:param pybel.BELGraph graph: A BEL graph
:param query: The search query
:type query: str or iter[str]
:param namespace: The namespace(s) to filter
:type namespace: str or iter[str]
:return: An iterator over nodes whose names match the search query
:rtype: iter
"""
node_predicates = [
namespace_inclusion_builder(namespace),
build_node_name_search(query)
]
return filter_nodes(graph, node_predicates)
|
Search for nodes with the given namespace(s) and whose names containing a given string(s).
:param pybel.BELGraph graph: A BEL graph
:param query: The search query
:type query: str or iter[str]
:param namespace: The namespace(s) to filter
:type namespace: str or iter[str]
:return: An iterator over nodes whose names match the search query
:rtype: iter
|
def simxLoadModel(clientID, modelPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
baseHandle = ct.c_int()
if (sys.version_info[0] == 3) and (type(modelPathAndName) is str):
modelPathAndName=modelPathAndName.encode('utf-8')
return c_LoadModel(clientID, modelPathAndName, options, ct.byref(baseHandle), operationMode), baseHandle.value
|
Please have a look at the function description/documentation in the V-REP user manual
|
def acl_show(self, msg, args):
"""Show current allow and deny blocks for the given acl."""
name = args[0] if len(args) > 0 else None
if name is None:
return "%s: The following ACLs are defined: %s" % (msg.user, ', '.join(self._acl.keys()))
if name not in self._acl:
return "Sorry, couldn't find an acl named '%s'" % name
return '\n'.join([
"%s: ACL '%s' is defined as follows:" % (msg.user, name),
"allow: %s" % ', '.join(self._acl[name]['allow']),
"deny: %s" % ', '.join(self._acl[name]['deny'])
])
|
Show current allow and deny blocks for the given acl.
|
def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True):
"""
Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen.
"""
self.renderer.set_scale(xscale, yscale, zscale, reset_camera)
|
Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen.
|
def generate_message_doc(message_descriptor, locations, path, name_prefix=''):
"""Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
"""
# message_type is 4
prefixed_name = name_prefix + message_descriptor.name
print(make_subsection(prefixed_name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for field_index, field in enumerate(message_descriptor.field):
field_location = locations[path + (2, field_index)]
if field.type not in [11, 14]:
type_str = TYPE_TO_STR[field.type]
else:
type_str = make_link(field.type_name.lstrip('.'))
row_tuples.append((
make_code(field.name),
field.number,
type_str,
LABEL_TO_STR[field.label],
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Field', 'Number', 'Type', 'Label', 'Description'),
row_tuples)
# Generate nested messages
nested_types = enumerate(message_descriptor.nested_type)
for index, nested_message_desc in nested_types:
generate_message_doc(nested_message_desc, locations,
path + (3, index),
name_prefix=prefixed_name + '.')
# Generate nested enums
for index, nested_enum_desc in enumerate(message_descriptor.enum_type):
generate_enum_doc(nested_enum_desc, locations, path + (4, index),
name_prefix=prefixed_name + '.')
|
Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
|
def check_application_state(self, request, callback):
"Check optional state parameter."
stored = request.session.get(self.session_key, None)
returned = request.GET.get('state', None)
check = False
if stored is not None:
if returned is not None:
check = constant_time_compare(stored, returned)
else:
logger.error('No state parameter returned by the provider.')
else:
logger.error('No state stored in the sesssion.')
return check
|
Check optional state parameter.
|
def roll_out_and_store(self, batch_info):
""" Roll out environment and store result in the replay buffer """
self.model.train()
if self.env_roller.is_ready_for_sampling():
rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device)
# Store some information about the rollout, no training phase
batch_info['frames'] = rollout.frames()
batch_info['episode_infos'] = rollout.episode_information()
else:
frames = 0
episode_infos = []
with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar:
while not self.env_roller.is_ready_for_sampling():
rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device)
new_frames = rollout.frames()
frames += new_frames
episode_infos.extend(rollout.episode_information())
pbar.update(new_frames)
# Store some information about the rollout, no training phase
batch_info['frames'] = frames
batch_info['episode_infos'] = episode_infos
|
Roll out environment and store result in the replay buffer
|
def get_cookie_jar(self):
"""Returns our cookie jar."""
cookie_file = self._get_cookie_file()
cookie_jar = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cookie_jar.load()
else:
safe_mkdir_for(cookie_file)
# Save an empty cookie jar so we can change the file perms on it before writing data to it.
with self._lock:
cookie_jar.save()
os.chmod(cookie_file, 0o600)
return cookie_jar
|
Returns our cookie jar.
|
def save_to_internal(self, data):
"""save
"""
if self.filetype is "pickle":
pickle.dump(data, open(self.location_internal, "wb"))
elif self.filetype is "hickle":
import hickle
hickle.dump(data, open(self.location_internal, "wb"))
else:
raise ValueError(
"Invalid filetype {} (must be {} or {})".format(
self.filetype, "pickle", "hickle"
)
)
|
save
|
def make_int(value, missing=-1):
"""Convert string value to long, '' to missing"""
if isinstance(value, six.string_types):
if not value.strip():
return missing
elif value is None:
return missing
return int(value)
|
Convert string value to long, '' to missing
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.