code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _to_sqlite3_by_table(self, conn, table_name):
"""
Saves the sequence to the specified table of sqlite3 database.
Each element can be a dictionary, namedtuple, tuple or list.
Target table must be created in advance.
:param conn: path or sqlite connection, cursor
:param table_name: table name string
"""
def _insert_item(item):
if isinstance(item, dict):
cols = ', '.join(item.keys())
placeholders = ', '.join('?' * len(item))
sql = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, cols, placeholders)
conn.execute(sql, tuple(item.values()))
elif is_namedtuple(item):
cols = ', '.join(item._fields)
placeholders = ', '.join('?' * len(item))
sql = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, cols, placeholders)
conn.execute(sql, item)
elif isinstance(item, (list, tuple)):
placeholders = ', '.join('?' * len(item))
sql = 'INSERT INTO {} VALUES ({})'.format(table_name, placeholders)
conn.execute(sql, item)
else:
raise TypeError('item must be one of dict, namedtuple, tuple or list got {}'
.format(type(item)))
self.for_each(_insert_item)
|
Saves the sequence to the specified table of sqlite3 database.
Each element can be a dictionary, namedtuple, tuple or list.
Target table must be created in advance.
:param conn: path or sqlite connection, cursor
:param table_name: table name string
|
def add_to_recent(self, notebook):
"""
Add an entry to recent notebooks.
We only maintain the list of the 20 most recent notebooks.
"""
if notebook not in self.recent_notebooks:
self.recent_notebooks.insert(0, notebook)
self.recent_notebooks = self.recent_notebooks[:20]
|
Add an entry to recent notebooks.
We only maintain the list of the 20 most recent notebooks.
|
def create(self, width, height):
"""Create an image of type.
Parameters
----------
width: `int`
Image width.
height: `int`
Image height.
Returns
-------
`PIL.Image.Image`
"""
return Image.new(self.mode, (width, height))
|
Create an image of type.
Parameters
----------
width: `int`
Image width.
height: `int`
Image height.
Returns
-------
`PIL.Image.Image`
|
def call_from_executor(self, callback, _max_postpone_until=None):
"""
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
:param _max_postpone_until: `None` or `time.time` value. For interal
use. If the eventloop is saturated, consider this task to be low
priority and postpone maximum until this timestamp. (For instance,
repaint is done using low priority.)
"""
assert _max_postpone_until is None or isinstance(_max_postpone_until, float)
self._calls_from_executor.append((callback, _max_postpone_until))
if self._schedule_pipe:
try:
os.write(self._schedule_pipe[1], b'x')
except (AttributeError, IndexError, OSError):
# Handle race condition. We're in a different thread.
# - `_schedule_pipe` could have become None in the meantime.
# - We catch `OSError` (actually BrokenPipeError), because the
# main thread could have closed the pipe already.
pass
|
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
:param _max_postpone_until: `None` or `time.time` value. For interal
use. If the eventloop is saturated, consider this task to be low
priority and postpone maximum until this timestamp. (For instance,
repaint is done using low priority.)
|
def GetTransPosition(df,field,dic,refCol="transcript_id"):
"""
Maps a genome position to transcript positon"
:param df: a Pandas dataframe
:param field: the head of the column containing the genomic position
:param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'}
:param refCol: header of the reference column with IDs, eg. 'transcript_id'
:returns: position on transcript
"""
try:
gen=str(int(df[field]))
transid=df[refCol]
bases=dic.get(transid).split(",")
bases=bases.index(str(gen))+1
except:
bases=np.nan
return bases
|
Maps a genome position to transcript positon"
:param df: a Pandas dataframe
:param field: the head of the column containing the genomic position
:param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'}
:param refCol: header of the reference column with IDs, eg. 'transcript_id'
:returns: position on transcript
|
def onCancelButton(self, event):
"""
Quit grid with warning if unsaved changes present
"""
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise()
|
Quit grid with warning if unsaved changes present
|
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
|
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
|
def get_inflators_cn_to_cn(target_year):
'''
Calcule l'inflateur de vieillissement à partir des masses de comptabilité nationale.
'''
data_year = find_nearest_inferior(data_years, target_year)
data_year_cn_aggregates = get_cn_aggregates(data_year)['consoCN_COICOP_{}'.format(data_year)].to_dict()
target_year_cn_aggregates = get_cn_aggregates(target_year)['consoCN_COICOP_{}'.format(target_year)].to_dict()
return dict(
(key, target_year_cn_aggregates[key] / data_year_cn_aggregates[key])
for key in data_year_cn_aggregates.keys()
)
|
Calcule l'inflateur de vieillissement à partir des masses de comptabilité nationale.
|
def _get_body_instance(self):
"""Return the body instance."""
simple_body = {
MultipartType.OFPMP_FLOW: FlowStatsRequest,
MultipartType.OFPMP_AGGREGATE: AggregateStatsRequest,
MultipartType.OFPMP_PORT_STATS: PortStatsRequest,
MultipartType.OFPMP_QUEUE: QueueStatsRequest,
MultipartType.OFPMP_GROUP: GroupStatsRequest,
MultipartType.OFPMP_METER: MeterMultipartRequest,
MultipartType.OFPMP_EXPERIMENTER: ExperimenterMultipartHeader
}
array_of_bodies = {MultipartType.OFPMP_TABLE_FEATURES: TableFeatures}
if isinstance(self.multipart_type, UBInt16):
self.multipart_type = self.multipart_type.enum_ref(
self.multipart_type.value)
pyof_class = simple_body.get(self.multipart_type, None)
if pyof_class:
return pyof_class()
array_of_class = array_of_bodies.get(self.multipart_type, None)
if array_of_class:
return FixedTypeList(pyof_class=array_of_class)
return BinaryData(b'')
|
Return the body instance.
|
def _sumLists(a, b):
"""
Algorithm to check validity of NBI and NIF.
Receives string with a umber to validate.
"""
val = 0
for i in map(lambda a, b: a * b, a, b):
val += i
return val
|
Algorithm to check validity of NBI and NIF.
Receives string with a umber to validate.
|
def add_handler_spec(f, handler_spec, *, kwargs=None):
"""
Attach a handler specification (see :class:`HandlerSpec`) to a function.
:param f: Function to attach the handler specification to.
:param handler_spec: Handler specification to attach to the function.
:type handler_spec: :class:`HandlerSpec`
:param kwargs: additional keyword arguments passed to the function
carried in the handler spec.
:type kwargs: :class:`dict`
:raises ValueError: if the handler was registered with
different `kwargs` before
This uses a private attribute, whose exact name is an implementation
detail. The `handler_spec` is stored in a :class:`dict` bound to the
attribute.
.. versionadded:: 0.11
The `kwargs` argument. If two handlers with the same spec, but
different arguments are registered for one function, an error
will be raised. So you should always include all possible
arguments, this is the responsibility of the calling decorator.
"""
handler_dict = automake_magic_attr(f)
if kwargs is None:
kwargs = {}
if kwargs != handler_dict.setdefault(handler_spec, kwargs):
raise ValueError(
"The additional keyword arguments to the handler are incompatible")
|
Attach a handler specification (see :class:`HandlerSpec`) to a function.
:param f: Function to attach the handler specification to.
:param handler_spec: Handler specification to attach to the function.
:type handler_spec: :class:`HandlerSpec`
:param kwargs: additional keyword arguments passed to the function
carried in the handler spec.
:type kwargs: :class:`dict`
:raises ValueError: if the handler was registered with
different `kwargs` before
This uses a private attribute, whose exact name is an implementation
detail. The `handler_spec` is stored in a :class:`dict` bound to the
attribute.
.. versionadded:: 0.11
The `kwargs` argument. If two handlers with the same spec, but
different arguments are registered for one function, an error
will be raised. So you should always include all possible
arguments, this is the responsibility of the calling decorator.
|
def pick_four_unique_nodes_quickly(n, seed=None):
'''
This is equivalent to np.random.choice(n, 4, replace=False)
Another fellow suggested np.random.random_sample(n).argpartition(4) which is
clever but still substantially slower.
'''
rng = get_rng(seed)
k = rng.randint(n**4)
a = k % n
b = k // n % n
c = k // n ** 2 % n
d = k // n ** 3 % n
if (a != b and a != c and a != d and b != c and b != d and c != d):
return (a, b, c, d)
else:
# the probability of finding a wrong configuration is extremely low
# unless for extremely small n. if n is extremely small the
# computational demand is not a problem.
# In my profiling it only took 0.4 seconds to include the uniqueness
# check in 1 million runs of this function so I think it is OK.
return pick_four_unique_nodes_quickly(n, rng)
|
This is equivalent to np.random.choice(n, 4, replace=False)
Another fellow suggested np.random.random_sample(n).argpartition(4) which is
clever but still substantially slower.
|
def get_listeners(self, event_type: str) -> List[Callable]:
"""Get all listeners of a particular type of event."""
if event_type not in self.events:
raise ValueError(f'No event {event_type} in system.')
return self.events.get_listeners(event_type)
|
Get all listeners of a particular type of event.
|
def plotEzJz(self,*args,**kwargs):
"""
NAME:
plotEzJz
PURPOSE:
plot E_z(.)/sqrt(dens(R)) along the orbit
INPUT:
pot= Potential instance or list of instances in which the orbit was
integrated
d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz'
+bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$',
'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$',
'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'}
if not 'pot' in kwargs:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
else:
pot= kwargs.pop('pot')
d1= kwargs.pop('d1','t')
self.EzJz= [(evaluatePotentials(pot,self.orbit[ii,0],self.orbit[ii,3],
t=self.t[ii],use_physical=False)-
evaluatePotentials(pot,self.orbit[ii,0],0.,
phi= self.orbit[ii,5],t=self.t[ii],
use_physical=False)+
self.orbit[ii,4]**2./2.)/\
nu.sqrt(evaluateDensities(pot,self.orbit[ii,0],0.,
phi=self.orbit[ii,5],
t=self.t[ii],
use_physical=False))\
for ii in range(len(self.t))]
if not 'xlabel' in kwargs:
kwargs['xlabel']= labeldict[d1]
if not 'ylabel' in kwargs:
kwargs['ylabel']= r'$E_z/\sqrt{\rho}$'
if d1 == 't':
return plot.bovy_plot(nu.array(self.t),
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'z':
return plot.bovy_plot(self.orbit[:,3],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'R':
return plot.bovy_plot(self.orbit[:,0],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vR':
return plot.bovy_plot(self.orbit[:,1],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vT':
return plot.bovy_plot(self.orbit[:,2],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vz':
return plot.bovy_plot(self.orbit[:,4],
nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
|
NAME:
plotEzJz
PURPOSE:
plot E_z(.)/sqrt(dens(R)) along the orbit
INPUT:
pot= Potential instance or list of instances in which the orbit was
integrated
d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz'
+bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
|
def get_adapter_for_persistent_model(self, persistent_model, rest_model=None):
"""
:param persistent_model: instance of persistent model
:param rest_model: specific REST model
:return: the matching model adapter
:rtype: ModelAdapter
"""
persistent_signature = self.generate_signature(persistent_model)
if persistent_signature in self._persistent_map:
sub_map = self._persistent_map[persistent_signature]
# return the first match if REST model was not specified
if rest_model is None:
return self._persistent_map[persistent_signature][self.DEFAULT_REST_ADAPTER]
else:
rest_sig = self.generate_signature(rest_model)
if rest_sig in sub_map:
return self._persistent_map[persistent_signature][rest_sig]
raise TypeError("No registered Data Adapter for class %s" % persistent_signature)
|
:param persistent_model: instance of persistent model
:param rest_model: specific REST model
:return: the matching model adapter
:rtype: ModelAdapter
|
async def expand_now(self, building: UnitTypeId=None, max_distance: Union[int, float]=10, location: Optional[Point2]=None):
"""Takes new expansion."""
if not building:
# self.race is never Race.Random
start_townhall_type = {Race.Protoss: UnitTypeId.NEXUS, Race.Terran: UnitTypeId.COMMANDCENTER, Race.Zerg: UnitTypeId.HATCHERY}
building = start_townhall_type[self.race]
assert isinstance(building, UnitTypeId)
if not location:
location = await self.get_next_expansion()
if location:
await self.build(building, near=location, max_distance=max_distance, random_alternative=False, placement_step=1)
|
Takes new expansion.
|
def uri_tree_encode(uri_tree, type_host = HOST_REG_NAME):
"""
Percent/Query encode a raw URI tree.
"""
scheme, authority, path, query, fragment = uri_tree
if authority:
user, passwd, host, port = authority
if user:
user = pct_encode(user, USER_ENCDCT)
if passwd:
passwd = pct_encode(passwd, PASSWD_ENCDCT)
if host and type_host == HOST_REG_NAME:
host = pct_encode(host, REG_NAME_ENCDCT)
if isinstance(port, (int, long)):
port = str(port)
authority = (user, passwd, host, port)
if path:
path = pct_encode(path, P_ENCDCT)
if (not authority) and (not scheme):
# check for path-noscheme special case
sppath = path.split('/', 1)
if ':' in sppath[0]:
sppath[0] = sppath[0].replace(':', '%3A')
path = '/'.join(sppath)
if query:
query = tuple([(query_elt_encode(x, QUERY_KEY_ENCDCT),
query_elt_encode(y, QUERY_VAL_ENCDCT)) for (x, y) in query])
if fragment:
fragment = pct_encode(fragment, FRAG_ENCDCT)
return (scheme, authority, path, query, fragment)
|
Percent/Query encode a raw URI tree.
|
def get_ip(request):
"""Return the IP address inside the HTTP_X_FORWARDED_FOR var inside
the `request` object.
The return of this function can be overrided by the
`LOCAL_GEOLOCATION_IP` variable in the `conf` module.
This function will skip local IPs (starting with 10. and equals to
127.0.0.1).
"""
if getsetting('LOCAL_GEOLOCATION_IP'):
return getsetting('LOCAL_GEOLOCATION_IP')
forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if not forwarded_for:
return UNKNOWN_IP
for ip in forwarded_for.split(','):
ip = ip.strip()
if not ip.startswith('10.') and not ip == '127.0.0.1':
return ip
return UNKNOWN_IP
|
Return the IP address inside the HTTP_X_FORWARDED_FOR var inside
the `request` object.
The return of this function can be overrided by the
`LOCAL_GEOLOCATION_IP` variable in the `conf` module.
This function will skip local IPs (starting with 10. and equals to
127.0.0.1).
|
def search_in_dirs(fname, search_dpaths=[], shortcircuit=True,
return_tried=False, strict=False):
"""
search_in_dirs
Args:
fname (str): file name
search_dpaths (list):
shortcircuit (bool):
return_tried (bool): return tried paths
strict (bool): (default = False)
Returns:
fpath: None
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> fname = 'Inno Setup 5\ISCC.exe'
>>> search_dpaths = ut.get_install_dirs()
>>> shortcircuit = True
>>> fpath = ut.search_in_dirs(fname, search_dpaths, shortcircuit)
>>> print(fpath)
"""
fpath_list = []
tried_list = []
for dpath in search_dpaths:
fpath = join(dpath, fname)
if return_tried:
tried_list.append(fpath)
if exists(fpath):
if shortcircuit:
if return_tried:
return fpath, tried_list
return fpath
else:
fpath_list.append(fpath)
if strict and len(fpath_list) == 0:
msg = ('Cannot find: fname=%r\n' % (fname,))
if return_tried:
msg += 'Tried: \n ' + '\n '.join(tried_list)
raise Exception(msg)
if shortcircuit:
if return_tried:
return None, tried_list
return None
else:
if return_tried:
return fpath_list, tried_list
return fpath_list
|
search_in_dirs
Args:
fname (str): file name
search_dpaths (list):
shortcircuit (bool):
return_tried (bool): return tried paths
strict (bool): (default = False)
Returns:
fpath: None
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> fname = 'Inno Setup 5\ISCC.exe'
>>> search_dpaths = ut.get_install_dirs()
>>> shortcircuit = True
>>> fpath = ut.search_in_dirs(fname, search_dpaths, shortcircuit)
>>> print(fpath)
|
def psql_csv_run(sql_command, error_handler=None):
"""
Runs psql and returns a CSVReader object from the query
This CSVReader includes header names as the first record in all
situations. The output is fully buffered into Python.
"""
csv_query = ('COPY ({query}) TO STDOUT WITH CSV HEADER;'
.format(query=sql_command))
new_env = os.environ.copy()
new_env.setdefault('PGOPTIONS', '')
new_env["PGOPTIONS"] += ' --statement-timeout=0'
psql_proc = popen_nonblock([PSQL_BIN, '-d', 'postgres', '--no-password',
'--no-psqlrc', '-c', csv_query],
stdout=PIPE,
env=new_env)
stdout = psql_proc.communicate()[0].decode('utf-8')
if psql_proc.returncode != 0:
if error_handler is not None:
error_handler(psql_proc)
else:
assert error_handler is None
raise UserException(
'could not csv-execute a query successfully via psql',
'Query was "{query}".'.format(sql_command),
'You may have to set some libpq environment '
'variables if you are sure the server is running.')
# Previous code must raise any desired exceptions for non-zero
# exit codes
assert psql_proc.returncode == 0
# Fake enough iterator interface to get a CSV Reader object
# that works.
return csv.reader(iter(stdout.strip().split('\n')))
|
Runs psql and returns a CSVReader object from the query
This CSVReader includes header names as the first record in all
situations. The output is fully buffered into Python.
|
def release(self, tid, fh):
"""
Close file. Descriptor is removed from ``self.fds``.
Parameters
----------
tid : str
Path to file. Ignored.
fh : int
File descriptor to release.
"""
try:
try:
self.fds[fh].unregisterHandler(fh)
except AttributeError:
pass
del self.fds[fh]
except KeyError:
raise FuseOSError(errno.EBADF)
return 0
|
Close file. Descriptor is removed from ``self.fds``.
Parameters
----------
tid : str
Path to file. Ignored.
fh : int
File descriptor to release.
|
def containsUid(self, uid):
'''
containsUid - Check if the uid (unique internal ID) appears anywhere as a direct child to this node, or the node itself.
@param uid <uuid.UUID> - uuid to check
@return <bool> - True if #uid is this node's uid, or is the uid of any children at any level down
'''
# Check if this node is the match
if self.uid == uid:
return True
# Scan all children
for child in self.children:
if child.containsUid(uid):
return True
return False
|
containsUid - Check if the uid (unique internal ID) appears anywhere as a direct child to this node, or the node itself.
@param uid <uuid.UUID> - uuid to check
@return <bool> - True if #uid is this node's uid, or is the uid of any children at any level down
|
def _post_md5_skip_on_check(self, key, filename, size, md5_match):
# type: (Downloader, str, str, int, bool) -> None
"""Perform post MD5 skip on check
:param Downloader self: this
:param str key: md5 map key
:param str filename: local filename
:param int size: size of checked data
:param bool md5_match: if MD5 matches
"""
with self._md5_meta_lock:
rfile = self._md5_map.pop(key)
lpath = pathlib.Path(filename)
if md5_match:
if size is None:
size = lpath.stat().st_size
with self._transfer_lock:
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(rfile))
self._download_total -= 1
self._download_bytes_total -= size
if self._general_options.dry_run:
logger.info('[DRY RUN] MD5 match, skipping: {} -> {}'.format(
rfile.path, lpath))
else:
if self._general_options.dry_run:
with self._transfer_lock:
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(rfile))
self._download_total -= 1
self._download_bytes_total -= size
logger.info(
'[DRY RUN] MD5 mismatch, download: {} -> {}'.format(
rfile.path, lpath))
else:
self._add_to_download_queue(lpath, rfile)
|
Perform post MD5 skip on check
:param Downloader self: this
:param str key: md5 map key
:param str filename: local filename
:param int size: size of checked data
:param bool md5_match: if MD5 matches
|
def updateEvolution(self):
'''
Updates the "population punk proportion" evolution array. Fasion victims
believe that the proportion of punks in the subsequent period is a linear
function of the proportion of punks this period, subject to a uniform
shock. Given attributes of self pNextIntercept, pNextSlope, pNextCount,
pNextWidth, and pGrid, this method generates a new array for the attri-
bute pEvolution, representing a discrete approximation of next period
states for each current period state in pGrid.
Parameters
----------
none
Returns
-------
none
'''
self.pEvolution = np.zeros((self.pCount,self.pNextCount))
for j in range(self.pCount):
pNow = self.pGrid[j]
pNextMean = self.pNextIntercept + self.pNextSlope*pNow
dist = approxUniform(N=self.pNextCount,bot=pNextMean-self.pNextWidth,top=pNextMean+self.pNextWidth)[1]
self.pEvolution[j,:] = dist
|
Updates the "population punk proportion" evolution array. Fasion victims
believe that the proportion of punks in the subsequent period is a linear
function of the proportion of punks this period, subject to a uniform
shock. Given attributes of self pNextIntercept, pNextSlope, pNextCount,
pNextWidth, and pGrid, this method generates a new array for the attri-
bute pEvolution, representing a discrete approximation of next period
states for each current period state in pGrid.
Parameters
----------
none
Returns
-------
none
|
def login(context, request):
""" Login Route
Login route to authenticate a user against Plone.
"""
# extract the data
__ac_name = request.get("__ac_name", None)
__ac_password = request.get("__ac_password", None)
logger.info("*** LOGIN %s ***" % __ac_name)
if __ac_name is None:
api.fail(400, "__ac_name is missing")
if __ac_password is None:
api.fail(400, "__ac_password is missing")
acl_users = api.get_tool("acl_users")
# XXX hard coded
acl_users.credentials_cookie_auth.login()
# XXX amin user won't be logged in if I use this approach
# acl_users.login()
# response = request.response
# acl_users.updateCredentials(request, response, __ac_name, __ac_password)
if api.is_anonymous():
api.fail(401, "Invalid Credentials")
# return the JSON in the same format like the user route
return get(context, request, username=__ac_name)
|
Login Route
Login route to authenticate a user against Plone.
|
def pp_file_to_dataframe(pp_filename):
""" read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
"""
df = pd.read_csv(pp_filename, delim_whitespace=True,
header=None, names=PP_NAMES,usecols=[0,1,2,3,4])
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
return df
|
read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
|
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
|
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
|
def example(self) -> str:
""" Same as str(self), except the color codes are actually used. """
if self.rgb_mode:
colorcode = '\033[38;2;{};{};{}m'.format(*self.rgb)
else:
colorcode = '\033[38;5;{}m'.format(self.code)
return '{code}{s}\033[0m'.format(code=colorcode, s=self)
|
Same as str(self), except the color codes are actually used.
|
def ajax_count_plus(self, slug):
'''
post count plus one via ajax.
'''
output = {
'status': 1 if MWiki.view_count_plus(slug) else 0,
}
return json.dump(output, self)
|
post count plus one via ajax.
|
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Initialized variables include aNrm, as
well as time variables t_age and t_cycle. Normalized assets are drawn from a lognormal
distributions given by aLvlInitMean and aLvlInitStd.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
# Get and store states for newly born agents
N = np.sum(which_agents) # Number of new consumers to make
self.aLvlNow[which_agents] = drawLognormal(N,mu=self.aLvlInitMean,sigma=self.aLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.eStateNow[which_agents] = 1.0 # Agents are born employed
self.t_age[which_agents] = 0 # How many periods since each agent was born
self.t_cycle[which_agents] = 0 # Which period of the cycle each agent is currently in
return None
|
Makes new consumers for the given indices. Initialized variables include aNrm, as
well as time variables t_age and t_cycle. Normalized assets are drawn from a lognormal
distributions given by aLvlInitMean and aLvlInitStd.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
|
def get_selinux_context(path):
'''
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
'''
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret
|
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
|
def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
'''
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs)
|
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
|
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
|
Internal helper to run an aggregate that returns a single result.
|
def _dataframe_from_csv(reader, delimiter, with_header, skipspace):
"""Returns csv data as a pandas Dataframe object"""
sep = delimiter
header = 0
if not with_header:
header = None
return pd.read_csv(
reader,
header=header,
sep=sep,
skipinitialspace=skipspace,
encoding='utf-8-sig'
)
|
Returns csv data as a pandas Dataframe object
|
def template(self, key):
"""
Returns the template associated with this scaffold.
:param key | <str>
:return <projex.scaffold.Template> || None
"""
try:
return self._templates[key]
except KeyError:
return Template.Plugins[key]
|
Returns the template associated with this scaffold.
:param key | <str>
:return <projex.scaffold.Template> || None
|
def load_parameter_definitions(self, sheet_name: str = None):
"""
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
"""
definitions = self.excel_handler.load_definitions(sheet_name, filename=self.filename)
self.definition_version = self.excel_handler.version
return definitions
|
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
|
def _network_event_lifecycle_cb(conn, net, event, detail, opaque):
'''
Network lifecycle events handler
'''
_salt_send_event(opaque, conn, {
'network': {
'name': net.name(),
'uuid': net.UUIDString()
},
'event': _get_libvirt_enum_string('VIR_NETWORK_EVENT_', event),
'detail': 'unknown' # currently unused
})
|
Network lifecycle events handler
|
def record_make_all_subfields_volatile(rec):
"""
Turns all subfields to volatile
"""
for tag in rec.keys():
for field_position, field in enumerate(rec[tag]):
for subfield_position, subfield in enumerate(field[0]):
if subfield[1][:9] != "VOLATILE:":
record_modify_subfield(rec, tag, subfield[0], "VOLATILE:" + subfield[1],
subfield_position, field_position_local=field_position)
|
Turns all subfields to volatile
|
def _CreateReadAccessHelper(self):
"""Creates a CheckAccessHelper for controlling read access.
This function and _CreateQueryAccessHelper essentially define GRR's ACL
policy. Please refer to these 2 functions to either review or modify
GRR's ACLs.
Read access gives you the ability to open and read aff4 objects for which
you already have the URN.
Returns:
CheckAccessHelper for controlling read access.
"""
h = CheckAccessHelper("read")
h.Allow("aff4:/")
# In order to open directories below aff4:/users, we have to have access to
# aff4:/users directory itself.
h.Allow("aff4:/users")
# User is allowed to access anything in their home dir.
h.Allow("aff4:/users/*", self._IsHomeDir)
# Administrators are allowed to see current set of foreman rules.
h.Allow("aff4:/foreman", self._UserHasAdminLabel)
# Querying is not allowed for the blob namespace. Blobs are stored by hashes
# as filename. If the user already knows the hash, they can access the blob,
# however they must not be allowed to query for hashes.
h.Allow("aff4:/blobs")
h.Allow("aff4:/blobs/*")
# The fingerprint namespace typically points to blobs. As such, it follows
# the same rules.
h.Allow("aff4:/FP")
h.Allow("aff4:/FP/*")
# The files namespace contains hash references to all files downloaded with
# GRR, and is extensible via Filestore objects. Users can access files for
# which they know the hash.
# See lib/aff4_objects/filestore.py
h.Allow("aff4:/files")
h.Allow("aff4:/files/*")
# Namespace for indexes. Client index is stored there.
h.Allow("aff4:/index")
h.Allow("aff4:/index/*")
# Keyword-based Client index.
h.Allow("aff4:/client_index")
h.Allow("aff4:/client_index/*")
# ACL namespace contains approval objects for accessing clients and hunts.
h.Allow("aff4:/ACL")
h.Allow("aff4:/ACL/*")
# stats namespace is for different statistics. For example, ClientFleetStats
# object is stored there.
h.Allow("aff4:/stats")
h.Allow("aff4:/stats/*")
# Configuration namespace used for reading drivers, python hacks etc.
h.Allow("aff4:/config")
h.Allow("aff4:/config/*")
# Namespace for flows that run without a client. A lot of internal utilitiy
# flows and cron jobs' flows will end up here.
h.Allow("aff4:/flows")
h.Allow("aff4:/flows/*")
# Namespace for hunts.
h.Allow("aff4:/hunts")
h.Allow("aff4:/hunts/*")
# Namespace for cron jobs.
h.Allow("aff4:/cron")
h.Allow("aff4:/cron/*")
# Namespace for audit data.
h.Allow("aff4:/audit")
h.Allow("aff4:/audit/*")
h.Allow("aff4:/audit/logs")
h.Allow("aff4:/audit/logs/*")
# Namespace for clients.
h.Allow(self.CLIENT_URN_PATTERN)
h.Allow(self.CLIENT_URN_PATTERN + "/*", self._HasAccessToClient)
# Allow everyone to read the artifact store.
h.Allow("aff4:/artifact_store")
h.Allow("aff4:/artifact_store/*")
return h
|
Creates a CheckAccessHelper for controlling read access.
This function and _CreateQueryAccessHelper essentially define GRR's ACL
policy. Please refer to these 2 functions to either review or modify
GRR's ACLs.
Read access gives you the ability to open and read aff4 objects for which
you already have the URN.
Returns:
CheckAccessHelper for controlling read access.
|
def fetch(self):
"""
Fetch a TaskQueueInstance
:returns: Fetched TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return TaskQueueInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
sid=self._solution['sid'],
)
|
Fetch a TaskQueueInstance
:returns: Fetched TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
|
def get_top(modality_type, value=None):
"""Gets default top transformation; if none available, return value."""
if modality_type in (ModalityType.AUDIO,
ModalityType.AUDIO_SPECTRAL,
ModalityType.GENERIC_L2_LOSS,
ModalityType.IDENTITY,
ModalityType.IDENTITY_SYMBOL,
ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY,
ModalityType.SPEECH_RECOGNITION,
ModalityType.VIDEO_IDENTITY):
return identity_top
elif modality_type in (ModalityType.CLASS_LABEL,
ModalityType.MULTI_LABEL,
ModalityType.ONE_HOT_CLASS_LABEL,
ModalityType.SIGMOID_CLASS_LABEL):
return class_label_top
elif modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_WEIGHTS_ALL):
return symbol_top
elif modality_type == ModalityType.IMAGE:
return image_top
elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS:
return image_channel_compress_top
elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM:
return image_channel_embeddings_top
elif modality_type in (ModalityType.REAL,
ModalityType.REAL_L2_LOSS,
ModalityType.REAL_LOG_POISSON_LOSS):
return real_top
elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL:
return sigmoid_max_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL:
return softmax_average_pooling_class_label_top
elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL:
return softmax_last_timestep_class_label_top
elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL:
return softmax_max_pooling_class_label_top
elif modality_type == ModalityType.SYMBOL_ONE_HOT:
return symbol_one_hot_top
elif modality_type in (ModalityType.VIDEO,
ModalityType.VIDEO_BITWISE,
ModalityType.VIDEO_PIXEL_NOISE):
return video_top
elif modality_type in (ModalityType.VIDEO_L1,
ModalityType.VIDEO_L2):
return video_l1_top
elif modality_type in (ModalityType.VIDEO_L1_RAW,
ModalityType.VIDEO_L2_RAW):
return video_raw_top
return value
|
Gets default top transformation; if none available, return value.
|
def register_whoosheer(self, wh):
"""This will register the given whoosher on `whoosheers`, create the
neccessary SQLAlchemy event listeners, replace the `query_class` with
our own query class which will provide the search functionality
and store the app on the whoosheer, so that we can always work
with that.
:param wh: The whoosher which should be registered.
"""
self.whoosheers.append(wh)
for model in wh.models:
event.listen(model, 'after_{0}'.format(INSERT_KWD), self.after_insert)
event.listen(model, 'after_{0}'.format(UPDATE_KWD), self.after_update)
event.listen(model, 'after_{0}'.format(DELETE_KWD), self.after_delete)
query_class = getattr(model, 'query_class', None)
if query_class is not None and isclass(query_class):
# already a subclass, ignore it
if issubclass(query_class, self.query):
pass
# ensure there can be a stable MRO
elif query_class not in (BaseQuery, SQLAQuery, WhoosheeQuery):
query_class_name = query_class.__name__
model.query_class = type(
"Whooshee{}".format(query_class_name), (query_class, self.query), {}
)
else:
model.query_class = self.query
else:
model.query_class = self.query
if self.app:
wh.app = self.app
return wh
|
This will register the given whoosher on `whoosheers`, create the
neccessary SQLAlchemy event listeners, replace the `query_class` with
our own query class which will provide the search functionality
and store the app on the whoosheer, so that we can always work
with that.
:param wh: The whoosher which should be registered.
|
def tag(self, path, name):
'''
Change name associated with path
'''
if not path[len(path) - 1] == '/':
path += '/'
config = self.get_config()
folder = self.find_folder({
'path' : path
}, config)
if not folder:
raise custom_errors.FileNotInConfig(path)
old_name = folder['label']
folder['label'] = name
dir_config = self.adapter.get_dir_config(path)
dir_config['label'] = name
self.adapter.set_dir_config(dir_config)
self.set_config(config)
# self.restart
return old_name
|
Change name associated with path
|
def get_analysis_data_for(self, ar):
"""Return the Analysis data for this AR
"""
# Exclude analyses from children (partitions)
analyses = ar.objectValues("Analysis")
out = []
for an in analyses:
info = self.get_base_info(an)
info.update({
"service_uid": an.getServiceUID(),
})
out.append(info)
return out
|
Return the Analysis data for this AR
|
def raw(self, query, settings=None, stream=False):
'''
Performs a query and returns its output as text.
- `query`: the SQL query to execute.
- `settings`: query settings to send as HTTP GET parameters
- `stream`: if true, the HTTP response from ClickHouse will be streamed.
'''
query = self._substitute(query, None)
return self._send(query, settings=settings, stream=stream).text
|
Performs a query and returns its output as text.
- `query`: the SQL query to execute.
- `settings`: query settings to send as HTTP GET parameters
- `stream`: if true, the HTTP response from ClickHouse will be streamed.
|
def calculate_perf_counter_100ns_queuelen_type(previous, current, property_name):
"""
PERF_COUNTER_100NS_QUEUELEN_TYPE
Average length of a queue to a resource over time in 100 nanosecond units.
https://msdn.microsoft.com/en-us/library/aa392905(v=vs.85).aspx
Formula (n1 - n0) / (d1 - d0)
"""
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous["Timestamp_Sys100NS"]
d1 = current["Timestamp_Sys100NS"]
if n0 is None or n1 is None:
return
return (n1 - n0) / (d1 - d0)
|
PERF_COUNTER_100NS_QUEUELEN_TYPE
Average length of a queue to a resource over time in 100 nanosecond units.
https://msdn.microsoft.com/en-us/library/aa392905(v=vs.85).aspx
Formula (n1 - n0) / (d1 - d0)
|
def log_analyzer2(path):
"""This procedure considers every line which can't be parsed
as a line with ERROR level.
"""
with handle(MalformedLogEntryError,
lambda (c):
invoke_restart('reparse',
'ERROR: ' + c.text)):
for filename in find_all_logs(path):
analyze_log(filename)
|
This procedure considers every line which can't be parsed
as a line with ERROR level.
|
def connect(self, *args, **kwargs):
"""Attempt to establish connection, see :meth:`.CMClient.connect`"""
self._bootstrap_cm_list_from_file()
CMClient.connect(self, *args, **kwargs)
|
Attempt to establish connection, see :meth:`.CMClient.connect`
|
def get_vm(self, resource_group_name, vm_name):
'''
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
'''
return self.client.virtual_machines.get(
resource_group_name, vm_name, expand='instanceView')
|
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
|
def from_string(cls, string, format_=None, fps=None, **kwargs):
"""
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
"""
fp = io.StringIO(string)
return cls.from_file(fp, format_, fps=fps, **kwargs)
|
Load subtitle file from string.
See :meth:`SSAFile.load()` for full description.
Arguments:
string (str): Subtitle file in a string. Note that the string
must be Unicode (in Python 2).
Returns:
SSAFile
Example:
>>> text = '''
... 1
... 00:00:00,000 --> 00:00:05,000
... An example SubRip file.
... '''
>>> subs = SSAFile.from_string(text)
|
def unpack_rows(self, column_types, connection):
"""Unpack rows for data (from a select statement) from payload and yield a single row at a time.
:param column_types: a tuple of column descriptors
e.g. (<class 'pyhdb.protocol.types.String'>, <class 'pyhdb.protocol.types.ClobType'>)
:param connection: a db connection object
:returns: a generator object
"""
for _ in iter_range(self.num_rows):
yield tuple(typ.from_resultset(self.payload, connection) for typ in column_types)
|
Unpack rows for data (from a select statement) from payload and yield a single row at a time.
:param column_types: a tuple of column descriptors
e.g. (<class 'pyhdb.protocol.types.String'>, <class 'pyhdb.protocol.types.ClobType'>)
:param connection: a db connection object
:returns: a generator object
|
def delete_rrset(self, zone_name, rtype, owner_name):
"""Deletes an RRSet.
Arguments:
zone_name -- The zone containing the RRSet to be deleted. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
"""
return self.rest_api_connection.delete("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name)
|
Deletes an RRSet.
Arguments:
zone_name -- The zone containing the RRSet to be deleted. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
|
def duplicate_files(self):
'''
Search for duplicates of submission file uploads for this assignment.
This includes the search in other course, whether inactive or not.
Returns a list of lists, where each latter is a set of duplicate submissions
with at least on of them for this assignment
'''
result=list()
files = SubmissionFile.valid_ones.order_by('md5')
for key, dup_group in groupby(files, lambda f: f.md5):
file_list=[entry for entry in dup_group]
if len(file_list)>1:
for entry in file_list:
if entry.submissions.filter(assignment=self).count()>0:
result.append([key, file_list])
break
return result
|
Search for duplicates of submission file uploads for this assignment.
This includes the search in other course, whether inactive or not.
Returns a list of lists, where each latter is a set of duplicate submissions
with at least on of them for this assignment
|
def insert_pattern(pattern, model, index=0):
"""
Inserts given pattern into given Model.
:param pattern: Pattern.
:type pattern: unicode
:param model: Model.
:type model: PatternsModel
:param index: Insertion indes.
:type index: int
:return: Method success.
:rtype: bool
"""
if not pattern:
return False
pattern = pattern.replace(QChar(QChar.ParagraphSeparator), QString("\n"))
pattern = foundations.common.get_first_item(foundations.strings.to_string(pattern).split("\n"))
model.insert_pattern(foundations.strings.to_string(pattern), index)
return True
|
Inserts given pattern into given Model.
:param pattern: Pattern.
:type pattern: unicode
:param model: Model.
:type model: PatternsModel
:param index: Insertion indes.
:type index: int
:return: Method success.
:rtype: bool
|
def url(context, view, subdomain=UNSET, *args, **kwargs):
"""
Resolves a URL in a template, using subdomain-based URL resolution.
If no subdomain is provided and a ``request`` is in the template context
when rendering, the URL will be resolved relative to the current request's
subdomain. If no ``request`` is provided, the URL will be resolved relative
to current domain with the ``settings.ROOT_URLCONF``.
Usage::
{% load subdomainurls %}
{% url 'view-name' subdomain='subdomain' %}
.. note:: This tag uses the variable URL syntax introduced in Django
1.3 as ``{% load url from future %}`` and was made the standard in Django
1.5. If you are upgrading a legacy application from one of the previous
template tag formats, make sure to quote your constant string URL names
to avoid :exc:`~django.core.urlresolver.NoReverseMatch` errors during
template rendering.
"""
if subdomain is UNSET:
request = context.get('request')
if request is not None:
subdomain = getattr(request, 'subdomain', None)
else:
subdomain = None
elif subdomain is '':
subdomain = None
return reverse(view, subdomain=subdomain, args=args, kwargs=kwargs)
|
Resolves a URL in a template, using subdomain-based URL resolution.
If no subdomain is provided and a ``request`` is in the template context
when rendering, the URL will be resolved relative to the current request's
subdomain. If no ``request`` is provided, the URL will be resolved relative
to current domain with the ``settings.ROOT_URLCONF``.
Usage::
{% load subdomainurls %}
{% url 'view-name' subdomain='subdomain' %}
.. note:: This tag uses the variable URL syntax introduced in Django
1.3 as ``{% load url from future %}`` and was made the standard in Django
1.5. If you are upgrading a legacy application from one of the previous
template tag formats, make sure to quote your constant string URL names
to avoid :exc:`~django.core.urlresolver.NoReverseMatch` errors during
template rendering.
|
def track_progress(
measure: MeasureProgress,
target: MetricProgress,
interval_check: float,
capture_maybe: Optional[CaptureProgress] = None
) -> None:
"""
Tracks progress against a certain end condition of the simulation (for instance, a certain duration on the simulated
clock), reporting this progress as the simulation chugs along. Stops the simulation once the target has been
reached. By default, the progress is reported as printout on standard output, in a manner that works best for
digital terminals.
"""
def measure_to_target() -> MeasureComparison:
return list(zip(measure(), target))
def is_finished(progress: MeasureComparison) -> bool:
return all(p >= t for p, t in progress)
capture = capture_maybe or capture_print()
rt_started = now_real()
while True:
advance(interval_check)
rt_elapsed = now_real() - rt_started
progress = measure_to_target()
ratio_progress_min = min(m / t for m, t in progress)
if ratio_progress_min == 0.0:
rt_total_projected = inf
else:
rt_total_projected = rt_elapsed / ratio_progress_min
capture(ratio_progress_min, rt_total_projected - rt_elapsed, progress)
if is_finished(progress):
stop()
break
|
Tracks progress against a certain end condition of the simulation (for instance, a certain duration on the simulated
clock), reporting this progress as the simulation chugs along. Stops the simulation once the target has been
reached. By default, the progress is reported as printout on standard output, in a manner that works best for
digital terminals.
|
def intersects_segment(self, seg):
"""
Returns True if any segmentlist in self intersects the
segment, otherwise returns False.
"""
return any(value.intersects_segment(seg) for value in self.itervalues())
|
Returns True if any segmentlist in self intersects the
segment, otherwise returns False.
|
def load_jupyter_server_extension(nb_server_app):
"""Called by Jupyter when this module is loaded as a server extension."""
app = nb_server_app.web_app
host_pattern = '.*$'
app.add_handlers(host_pattern, [
(utils.url_path_join(app.settings['base_url'], '/http_over_websocket'),
handlers.HttpOverWebSocketHandler),
(utils.url_path_join(app.settings['base_url'],
'/http_over_websocket/diagnose'),
handlers.HttpOverWebSocketDiagnosticHandler),
])
print('jupyter_http_over_ws extension initialized. Listening on '
'/http_over_websocket')
|
Called by Jupyter when this module is loaded as a server extension.
|
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
Use pymovie to render (visual+audio)+text overlays.
|
def fetch(self, category, filter_classified=False, **kwargs):
"""Fetch items from the repository.
The method retrieves items from a repository.
To removed classified fields from the resulting items, set
the parameter `filter_classified`. Take into account this
parameter is incompatible with archiving items. Raw client
data are archived before any other process. Therefore,
classified data are stored within the archive. To prevent
from possible data leaks or security issues when users do
not need these fields, archiving and filtering are not
compatible.
:param category: the category of the items fetched
:param filter_classified: remove classified fields from the resulting items
:param kwargs: a list of other parameters (e.g., from_date, offset, etc.
specific for each backend)
:returns: a generator of items
:raises BackendError: either when the category is not valid or
'filter_classified' and 'archive' are active at the same time.
"""
if category not in self.categories:
cause = "%s category not valid for %s" % (category, self.__class__.__name__)
raise BackendError(cause=cause)
if filter_classified and self.archive:
cause = "classified fields filtering is not compatible with archiving items"
raise BackendError(cause=cause)
if self.archive:
self.archive.init_metadata(self.origin, self.__class__.__name__, self.version, category,
kwargs)
self.client = self._init_client()
for item in self.fetch_items(category, **kwargs):
if filter_classified:
item = self.filter_classified_data(item)
yield self.metadata(item, filter_classified=filter_classified)
|
Fetch items from the repository.
The method retrieves items from a repository.
To removed classified fields from the resulting items, set
the parameter `filter_classified`. Take into account this
parameter is incompatible with archiving items. Raw client
data are archived before any other process. Therefore,
classified data are stored within the archive. To prevent
from possible data leaks or security issues when users do
not need these fields, archiving and filtering are not
compatible.
:param category: the category of the items fetched
:param filter_classified: remove classified fields from the resulting items
:param kwargs: a list of other parameters (e.g., from_date, offset, etc.
specific for each backend)
:returns: a generator of items
:raises BackendError: either when the category is not valid or
'filter_classified' and 'archive' are active at the same time.
|
def swap(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-928b872300f1b66748c85cebb12a59bb574e501b"""
params = {
'action': 'SWAP',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
|
http://wiki.apache.org/solr/CoreAdmin#head-928b872300f1b66748c85cebb12a59bb574e501b
|
def get_dataset(self, name, multi_instance=0):
""" get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
"""
return [elem for elem in self._data_list
if elem.name == name and elem.multi_id == multi_instance][0]
|
get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
|
def render(self, template, **kwargs):
""" Render a route template and adds information to this route.
:param template: Template name.
:type template: str
:param kwargs: dictionary of named arguments used to be passed to the template
:type kwargs: dict
:return: Http Response with rendered template
:rtype: flask.Response
"""
kwargs["cache_key"] = "%s" % kwargs["url"].values()
kwargs["lang"] = self.get_locale()
kwargs["assets"] = self.assets
kwargs["main_collections"] = self.main_collections(kwargs["lang"])
kwargs["cache_active"] = self.cache is not None
kwargs["cache_time"] = 0
kwargs["cache_key"], kwargs["cache_key_i18n"] = self.make_cache_keys(request.endpoint, kwargs["url"])
kwargs["template"] = template
for plugin in self.__plugins_render_views__:
kwargs.update(plugin.render(**kwargs))
return render_template(kwargs["template"], **kwargs)
|
Render a route template and adds information to this route.
:param template: Template name.
:type template: str
:param kwargs: dictionary of named arguments used to be passed to the template
:type kwargs: dict
:return: Http Response with rendered template
:rtype: flask.Response
|
def scale_up(self, n, pods=None, **kwargs):
"""
Make sure we have n dask-workers available for this cluster
Examples
--------
>>> cluster.scale_up(20) # ask for twenty workers
"""
maximum = dask.config.get('kubernetes.count.max')
if maximum is not None and maximum < n:
logger.info("Tried to scale beyond maximum number of workers %d > %d",
n, maximum)
n = maximum
pods = pods or self._cleanup_terminated_pods(self.pods())
to_create = n - len(pods)
new_pods = []
for i in range(3):
try:
for _ in range(to_create):
new_pods.append(self.core_api.create_namespaced_pod(
self.namespace, self.pod_template))
to_create -= 1
break
except kubernetes.client.rest.ApiException as e:
if e.status == 500 and 'ServerTimeout' in e.body:
logger.info("Server timeout, retry #%d", i + 1)
time.sleep(1)
last_exception = e
continue
else:
raise
else:
raise last_exception
return new_pods
|
Make sure we have n dask-workers available for this cluster
Examples
--------
>>> cluster.scale_up(20) # ask for twenty workers
|
def validate_proof(proof: List[Keccak256], root: Keccak256, leaf_element: Keccak256) -> bool:
""" Checks that `leaf_element` was contained in the tree represented by
`merkleroot`.
"""
hash_ = leaf_element
for pair in proof:
hash_ = hash_pair(hash_, pair)
return hash_ == root
|
Checks that `leaf_element` was contained in the tree represented by
`merkleroot`.
|
def get_database_columns(self, tables=None, database=None):
"""Retrieve a dictionary of columns."""
# Get table data and columns from source database
source = database if database else self.database
tables = tables if tables else self.tables
return {tbl: self.get_columns(tbl) for tbl in tqdm(tables, total=len(tables),
desc='Getting {0} columns'.format(source))}
|
Retrieve a dictionary of columns.
|
def meta(self):
"""Value of the bound meta-property on the target."""
if not self._pv.meta_data_property or not self._meta_target:
return {}
return getattr(self._meta_target, self._pv.meta_data_property)
|
Value of the bound meta-property on the target.
|
def vocab_account_type(instance):
"""Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
"""
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] == 'user-account':
try:
acct_type = obj['account_type']
except KeyError:
continue
if acct_type not in enums.ACCOUNT_TYPE_OV:
yield JSONError("Object '%s' is a User Account Object "
"with an 'account_type' of '%s', which is not a "
"value in the account-type-ov vocabulary."
% (key, acct_type), instance['id'], 'account-type')
|
Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
|
def get_code_language(self):
"""
This is largely copied from bokeh.sphinxext.bokeh_plot.run
"""
js_source = self.get_js_source()
if self.options.get("include_html", False):
resources = get_sphinx_resources(include_bokehjs_api=True)
html_source = BJS_HTML.render(
css_files=resources.css_files,
js_files=resources.js_files,
bjs_script=js_source)
return [html_source, "html"]
else:
return [js_source, "javascript"]
|
This is largely copied from bokeh.sphinxext.bokeh_plot.run
|
def _to_json(self, include_references=True):
"""Convert the model to JSON using the PotionJSONEncode and automatically
resolving the resource as needed (`_properties` call handles this).
"""
if include_references:
return json.dumps(self._resource._properties, cls=PotionJSONEncoder)
else:
return json.dumps(
{
k: v
for k, v in self._resource._properties.items()
if not isinstance(v, Resource) and not k.startswith("$")
},
cls=PotionJSONEncoder,
)
|
Convert the model to JSON using the PotionJSONEncode and automatically
resolving the resource as needed (`_properties` call handles this).
|
def stream_events(self, filter: Callable[[Event], bool] = None, *, max_queue_size: int = 0):
"""Shortcut for calling :func:`stream_events` with this signal in the first argument."""
return stream_events([self], filter, max_queue_size=max_queue_size)
|
Shortcut for calling :func:`stream_events` with this signal in the first argument.
|
def types(**args):
"""Specifies the types used for the arguments of a published service.
@types(a=int, b = str)
def f(a, b):
pass
"""
def l(func):
if hasattr(func, '__annotations__'):
func.__annotations__.update(args)
else:
func.__annotations__ = args
return func
return l
|
Specifies the types used for the arguments of a published service.
@types(a=int, b = str)
def f(a, b):
pass
|
def list_commands(self, ctx):
"""
list all commands exposed to engineer
"""
self.connect(ctx)
if not hasattr(ctx, "widget"):
return super(Engineer, self).list_commands(ctx)
return ctx.widget.engineer_list_commands() + super(Engineer, self).list_commands(ctx)
|
list all commands exposed to engineer
|
async def set_property_value(self, turn_context: TurnContext, property_name: str, value: object) -> None:
"""
Deletes a property from the state cache in the turn context.
:param turn_context: The context object for this turn.
:param property_name: The value to set on the property.
:return: None
"""
if turn_context == None:
raise TypeError('BotState.delete_property(): turn_context cannot be None.')
if not property_name:
raise TypeError('BotState.delete_property(): property_name cannot be None.')
cached_state = turn_context.turn_state.get(self._context_service_key)
cached_state.state[property_name] = value
|
Deletes a property from the state cache in the turn context.
:param turn_context: The context object for this turn.
:param property_name: The value to set on the property.
:return: None
|
def files(self):
"""Set of found binary files output by StagYY."""
if self._rundir['ls'] is UNDETERMINED:
out_stem = pathlib.Path(self.par['ioin']['output_file_stem'] + '_')
out_dir = self.path / out_stem.parent
if out_dir.is_dir():
self._rundir['ls'] = set(out_dir.iterdir())
else:
self._rundir['ls'] = set()
return self._rundir['ls']
|
Set of found binary files output by StagYY.
|
def _rspiral(width, height):
"""Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
"""
x0 = 0
y0 = 0
x1 = width - 1
y1 = height - 1
while x0 < x1 and y0 < y1:
for x in range(x0, x1):
yield x, y0
for y in range(y0, y1):
yield x1, y
for x in range(x1, x0, -1):
yield x, y1
for y in range(y1, y0, -1):
yield x0, y
x0 += 1
y0 += 1
x1 -= 1
y1 -= 1
if x0 == x1:
for y in range(y0, y1 + 1):
yield x0, y
elif y0 == y1:
for x in range(x0, x1 + 1):
yield x, y0
|
Reversed spiral generator.
Parameters
----------
width : `int`
Spiral width.
height : `int`
Spiral height.
Returns
-------
`generator` of (`int`, `int`)
Points.
|
def main():
"""
Check args
"""
configure_obj = None
parser = argparse.ArgumentParser(description='自动化安装')
parser.add_argument(
'-s', '--server',
help='服务器代替名',
)
parser.add_argument(
'-ba', '--bigdata-app',
help='大数据服务应用替代名',
)
parser.add_argument(
'-add-slave', '--add-slave',
help='添加一个子节点服务器',
)
parser.add_argument(
'-skip-master', '--skip-master',
dest='skip_master',
action='store_true',
help='跳过master服务器的安装过程',
)
# force to install packages without ask question
parser.add_argument(
'-f', '--force',
dest='force',
action='store_true',
help='不询问是否安装',
)
parser.add_argument(
'-nf', '--not-force',
dest='force',
action='store_false',
help='询问是否安装',
)
parser.set_defaults(force=False)
# login to remote mysql, default is false
parser.add_argument(
'-my', '--mysql',
dest='login_mysql',
action='store_true',
help='登录到Mysql数据库',
)
parser.set_defaults(login_mysql=False)
# login to server
parser.add_argument(
'-login', '--login',
dest='login_server',
action='store_true',
help='登录到远程服务器',
)
parser.set_defaults(login_server=False)
parser.add_argument(
'-p', '--project',
help='项目名称,默认是demo',
default='demo'
)
parser.add_argument(
'-gp', '--git-pull',
help='Github项目的保存目录',
)
parser.add_argument(
'-C', '--config',
help='配置文件路径',
nargs=2
)
parser.add_argument(
'-H', '--host',
help='客户机地址',
)
parser.add_argument(
'-U', '--user',
help='客户机用户名',
)
parser.add_argument(
'-P', '--passwd',
help='客户机登录密码',
)
parser.add_argument(
'-K', '--keyfile',
help='客户机SSH KEY的路径',
)
parser.add_argument(
'-ap', '--active-port',
help='客户机已经开启的端口',
action='store_true'
)
args = parser.parse_args()
# if config file and host both not provide, throw a error
if args.config is None and args.host is None:
raise ValueError('缺少配置文件-C(--config)或客户机地址-H(--host)')
# check user and passwd
if args.host is not None:
if (args.user and (args.passwd or args.keyfile)) is None:
raise ValueError('缺少登录必要的信息')
# if exist config file, read configuration from file
configure = None
if args.config is not None:
# init configuration parser
configure = configparser.ConfigParser()
configure.read(args.config[0])
config_sections = configure.sections()
# check key exist
if args.config[1] not in config_sections:
raise KeyError('未找到与{0}对应的登录配置文件,存在的配置文件为{1}'
.format(args.config[1], config_sections))
configure_obj = configure[args.config[1]]
# init server
ServerBase(
args, configure_obj,
configure=configure
)
|
Check args
|
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload)
|
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
|
def to_networkx(self, labels=None, edge_labels=False):
""" Get a networkx representation of the binary search tree. """
import networkx as nx
graph = nx.DiGraph()
for node in self._traverse_nodes():
u = node.key
graph.add_node(u) # Minor redundancy
# Set node properties
graph.nodes[u]['value'] = node.value
if labels is not None:
label = ','.join([str(getattr(node, k)) for k in labels])
graph.nodes[u]['label'] = label
if node.left is not None:
v = node.left.key
graph.add_node(v)
graph.add_edge(u, v)
if edge_labels:
graph.edge[u][v]['label'] = 'L'
if node.right is not None:
v = node.right.key
graph.add_node(v)
graph.add_edge(u, v)
if edge_labels:
graph.edge[u][v]['label'] = 'R'
return graph
|
Get a networkx representation of the binary search tree.
|
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray containing the elapsed time in
seconds since the GPS epoch and returns the corresponding
Python :class:`datetime`.
If the optional parameter ``raw`` is ``True``, the integral
number of seconds will be returned instead.
"""
sec = super(Time32Type, self).decode(bytes)
return sec if raw else dmc.toLocalTime(sec)
|
decode(bytearray, raw=False) -> value
Decodes the given bytearray containing the elapsed time in
seconds since the GPS epoch and returns the corresponding
Python :class:`datetime`.
If the optional parameter ``raw`` is ``True``, the integral
number of seconds will be returned instead.
|
def _unpack_case(self, case):
""" Returns the contents of the case to be used in the OPF.
"""
base_mva = case.base_mva
b = case.connected_buses
l = case.online_branches
g = case.online_generators
nb = len(b)
nl = len(l)
ng = len(g)
return b, l, g, nb, nl, ng, base_mva
|
Returns the contents of the case to be used in the OPF.
|
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
|
locate the template from the given uri and include it in
the current output.
|
def write(self, outfile=None, section=None):
"""Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config.
"""
with io.open(outfile or self.user_config_file(), 'wb') as f:
self.data.write(outfile=f, section=section)
|
Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config.
|
def get_suitable_slot_for_duplicate(self, src_slot):
"""Returns the suitable position for a duplicate analysis, taking into
account if there is a WorksheetTemplate assigned to this worksheet.
By default, returns a new slot at the end of the worksheet unless there
is a slot defined for a duplicate of the src_slot in the worksheet
template layout not yet used.
:param src_slot:
:return: suitable slot position for a duplicate of src_slot
"""
slot_from = to_int(src_slot, 0)
if slot_from < 1:
return -1
# Are the analyses from src_slot suitable for duplicates creation?
container = self.get_container_at(slot_from)
if not container or not IAnalysisRequest.providedBy(container):
# We cannot create duplicates from analyses other than routine ones,
# those that belong to an Analysis Request.
return -1
occupied = self.get_slot_positions(type='all')
wst = self.getWorksheetTemplate()
if not wst:
# No worksheet template assigned, add a new slot at the end of
# the worksheet with the duplicate there
slot_to = max(occupied) + 1
return slot_to
# If there is a match with the layout defined in the Worksheet
# Template, use that slot instead of adding a new one at the end of
# the worksheet
layout = wst.getLayout()
for pos in layout:
if pos['type'] != 'd' or to_int(pos['dup']) != slot_from:
continue
slot_to = int(pos['pos'])
if slot_to in occupied:
# Not an empty slot
continue
# This slot is empty, use it instead of adding a new
# slot at the end of the worksheet
return slot_to
# Add a new slot at the end of the worksheet, but take into account
# that a worksheet template is assigned, so we need to take care to
# not override slots defined by its layout
occupied.append(len(layout))
slot_to = max(occupied) + 1
return slot_to
|
Returns the suitable position for a duplicate analysis, taking into
account if there is a WorksheetTemplate assigned to this worksheet.
By default, returns a new slot at the end of the worksheet unless there
is a slot defined for a duplicate of the src_slot in the worksheet
template layout not yet used.
:param src_slot:
:return: suitable slot position for a duplicate of src_slot
|
def calibration_stimulus(self, mode):
"""Gets the stimulus model for calibration
:param mode: Type of stimulus to get: tone or noise
:type mode: str
:returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>`
"""
if mode == 'tone':
return self.tone_calibrator.stimulus
elif mode =='noise':
return self.bs_calibrator.stimulus
|
Gets the stimulus model for calibration
:param mode: Type of stimulus to get: tone or noise
:type mode: str
:returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>`
|
def _merge_align_bams(data):
"""Merge multiple alignment BAMs, including split and discordant reads.
"""
for key in (["work_bam"], ["work_bam_plus", "disc"], ["work_bam_plus", "sr"], ["umi_bam"]):
in_files = tz.get_in(key, data, [])
if not isinstance(in_files, (list, tuple)):
in_files = [in_files]
in_files = [x for x in in_files if x and x != "None"]
if in_files:
ext = "-%s" % key[-1] if len(key) > 1 else ""
out_file = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data),
"%s-sort%s.bam" % (dd.get_sample_name(data), ext))
merged_file = merge_bam_files(in_files, utils.safe_makedir(os.path.dirname(out_file)),
data, out_file=out_file)
data = tz.update_in(data, key, lambda x: merged_file)
else:
data = tz.update_in(data, key, lambda x: None)
if "align_bam" in data and "work_bam" in data:
data["align_bam"] = data["work_bam"]
return data
|
Merge multiple alignment BAMs, including split and discordant reads.
|
def _build_header(self):
"""Adds the header template to the master template string
"""
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += hs.header
|
Adds the header template to the master template string
|
def segmentlistdict_fromsearchsummary_in(xmldoc, program = None):
"""
Convenience wrapper for a common case usage of the segmentlistdict
class: searches the process table in xmldoc for occurances of a
program named program, then scans the search summary table for
matching process IDs and constructs a segmentlistdict object from
the in segments in those rows.
Note: the segmentlists in the segmentlistdict are not necessarily
coalesced, they contain the segments as they appear in the
search_summary table.
"""
stbl = lsctables.SearchSummaryTable.get_table(xmldoc)
ptbl = lsctables.ProcessTable.get_table(xmldoc)
return stbl.get_in_segmentlistdict(program and ptbl.get_ids_by_program(program))
|
Convenience wrapper for a common case usage of the segmentlistdict
class: searches the process table in xmldoc for occurances of a
program named program, then scans the search summary table for
matching process IDs and constructs a segmentlistdict object from
the in segments in those rows.
Note: the segmentlists in the segmentlistdict are not necessarily
coalesced, they contain the segments as they appear in the
search_summary table.
|
def draw(self):
"""Draws the image at the given location."""
if not self.visible:
return
self.window.blit(self.image, self.loc)
|
Draws the image at the given location.
|
def start_health_check(self, recipient):
""" Starts a task for healthchecking `recipient` if there is not
one yet.
It also whitelists the address
"""
if recipient not in self.addresses_events:
self.whitelist(recipient) # noop for now, for compatibility
ping_nonce = self.nodeaddresses_to_nonces.setdefault(
recipient,
{'nonce': 0}, # HACK: Allows the task to mutate the object
)
events = healthcheck.HealthEvents(
event_healthy=Event(),
event_unhealthy=Event(),
)
self.addresses_events[recipient] = events
greenlet_healthcheck = gevent.spawn(
healthcheck.healthcheck,
self,
recipient,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.nat_keepalive_retries,
self.nat_keepalive_timeout,
self.nat_invitation_timeout,
ping_nonce,
)
greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}'
greenlet_healthcheck.link_exception(self.on_error)
self.greenlets.append(greenlet_healthcheck)
|
Starts a task for healthchecking `recipient` if there is not
one yet.
It also whitelists the address
|
def validate(self):
"""
Validates the given Amazon S3 file with :attr:`validators`. If errors
occur they are appended to :attr:`errors`. If the file is valid and a
`AWS_UNVALIDATED_PREFIX` config is present, its value will be removed
from the file key.
:return: a boolean indicating if the file vas valid.
"""
for validator in self.validators:
try:
validator(self.obj)
except ValidationError as e:
self.errors.append(e.error)
if not self.errors and self._has_unvalidated_prefix():
self._move_to_validated()
return not self.errors
|
Validates the given Amazon S3 file with :attr:`validators`. If errors
occur they are appended to :attr:`errors`. If the file is valid and a
`AWS_UNVALIDATED_PREFIX` config is present, its value will be removed
from the file key.
:return: a boolean indicating if the file vas valid.
|
def dump(self):
"""Item as a JSON representation."""
return json.dumps(
self.primitive,
sort_keys=True,
ensure_ascii=False,
separators=(',', ':'))
|
Item as a JSON representation.
|
def request(self, url, post=None, method="GET"):
""" Make the request"""
dsid = self.get_dsid()
baseurl = "https://auth.api.swedbank.se/TDE_DAP_Portal_REST_WEB/api/v1/%s?dsid=%s" % (
url, dsid)
if self.pch is None:
self.pch = build_opener(HTTPCookieProcessor(self.cj))
if post:
post = bytearray(post, "utf-8")
request = Request(baseurl, data=post)
request.add_header("Content-Type", "application/json")
else:
request = Request(baseurl)
request.add_header("User-Agent", self.useragent)
request.add_header("Authorization", self.get_authkey())
request.add_header("Accept", "*/*")
request.add_header("Accept-Language", "sv-se")
request.add_header("Connection", "keep-alive")
request.add_header("Proxy-Connection", "keep-alive")
self.cj.set_cookie(
Cookie(version=0, name='dsid', value=dsid, port=None,
port_specified=False, domain='.api.swedbank.se',
domain_specified=False, domain_initial_dot=False,
path='/',
path_specified=True, secure=False, expires=None,
discard=True, comment=None, comment_url=None,
rest={'HttpsOnly': None}, rfc2109=False))
request.get_method = lambda: method
tmp = self.pch.open(request)
self.data = tmp.read().decode("utf8")
|
Make the request
|
def sign(self, issuer_cert, issuer_key, digest):
"""
Sign the CRL.
Signing a CRL enables clients to associate the CRL itself with an
issuer. Before a CRL is meaningful to other OpenSSL functions, it must
be signed by an issuer.
This method implicitly sets the issuer's name based on the issuer
certificate and private key used to sign the CRL.
.. versionadded:: 16.1.0
:param X509 issuer_cert: The issuer's certificate.
:param PKey issuer_key: The issuer's private key.
:param bytes digest: The digest method to sign the CRL with.
"""
digest_obj = _lib.EVP_get_digestbyname(digest)
_openssl_assert(digest_obj != _ffi.NULL)
_lib.X509_CRL_set_issuer_name(
self._crl, _lib.X509_get_subject_name(issuer_cert._x509))
_lib.X509_CRL_sort(self._crl)
result = _lib.X509_CRL_sign(self._crl, issuer_key._pkey, digest_obj)
_openssl_assert(result != 0)
|
Sign the CRL.
Signing a CRL enables clients to associate the CRL itself with an
issuer. Before a CRL is meaningful to other OpenSSL functions, it must
be signed by an issuer.
This method implicitly sets the issuer's name based on the issuer
certificate and private key used to sign the CRL.
.. versionadded:: 16.1.0
:param X509 issuer_cert: The issuer's certificate.
:param PKey issuer_key: The issuer's private key.
:param bytes digest: The digest method to sign the CRL with.
|
def save_controls(self, parameterstep: 'timetools.PeriodConstrArg' = None,
simulationstep: 'timetools.PeriodConstrArg' = None,
auxfiler: 'Optional[auxfiletools.Auxfiler]' = None):
"""Save the control parameters of the |Model| object handled by
each |Element| object and eventually the ones handled by the
given |Auxfiler| object."""
if auxfiler:
auxfiler.save(parameterstep, simulationstep)
for element in printtools.progressbar(self):
element.model.parameters.save_controls(
parameterstep=parameterstep,
simulationstep=simulationstep,
auxfiler=auxfiler)
|
Save the control parameters of the |Model| object handled by
each |Element| object and eventually the ones handled by the
given |Auxfiler| object.
|
def Shah(m, x, D, rhol, mul, kl, Cpl, P, Pc):
r'''Calculates heat transfer coefficient for condensation
of a fluid inside a tube, as presented in [1]_ and again by the same
author in [2]_; also given in [3]_. Requires no properties of the gas.
Uses the Dittus-Boelter correlation for single phase heat transfer
coefficient, with a Reynolds number assuming all the flow is liquid.
.. math::
h_{TP} = h_L\left[(1-x)^{0.8} +\frac{3.8x^{0.76}(1-x)^{0.04}}
{P_r^{0.38}}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific interval [-]
D : float
Diameter of the channel [m]
rhol : float
Density of the liquid [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
P : float
Pressure of the fluid, [Pa]
Pc : float
Critical pressure of the fluid, [Pa]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ is well written an unambiguous as to how to apply this equation.
Examples
--------
>>> Shah(m=1, x=0.4, D=.3, rhol=800, mul=1E-5, kl=0.6, Cpl=2300, P=1E6, Pc=2E7)
2561.2593415479214
References
----------
.. [1] Shah, M. M. "A General Correlation for Heat Transfer during Film
Condensation inside Pipes." International Journal of Heat and Mass
Transfer 22, no. 4 (April 1, 1979): 547-56.
doi:10.1016/0017-9310(79)90058-9.
.. [2] Shah, M. M., Heat Transfer During Film Condensation in Tubes and
Annuli: A Review of the Literature, ASHRAE Transactions, vol. 87, no.
3, pp. 1086-1100, 1981.
.. [3] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
'''
VL = m/(rhol*pi/4*D**2)
ReL = Reynolds(V=VL, D=D, rho=rhol, mu=mul)
Prl = Prandtl(Cp=Cpl, k=kl, mu=mul)
hL = turbulent_Dittus_Boelter(ReL, Prl)*kl/D
Pr = P/Pc
return hL*((1-x)**0.8 + 3.8*x**0.76*(1-x)**0.04/Pr**0.38)
|
r'''Calculates heat transfer coefficient for condensation
of a fluid inside a tube, as presented in [1]_ and again by the same
author in [2]_; also given in [3]_. Requires no properties of the gas.
Uses the Dittus-Boelter correlation for single phase heat transfer
coefficient, with a Reynolds number assuming all the flow is liquid.
.. math::
h_{TP} = h_L\left[(1-x)^{0.8} +\frac{3.8x^{0.76}(1-x)^{0.04}}
{P_r^{0.38}}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific interval [-]
D : float
Diameter of the channel [m]
rhol : float
Density of the liquid [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
P : float
Pressure of the fluid, [Pa]
Pc : float
Critical pressure of the fluid, [Pa]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ is well written an unambiguous as to how to apply this equation.
Examples
--------
>>> Shah(m=1, x=0.4, D=.3, rhol=800, mul=1E-5, kl=0.6, Cpl=2300, P=1E6, Pc=2E7)
2561.2593415479214
References
----------
.. [1] Shah, M. M. "A General Correlation for Heat Transfer during Film
Condensation inside Pipes." International Journal of Heat and Mass
Transfer 22, no. 4 (April 1, 1979): 547-56.
doi:10.1016/0017-9310(79)90058-9.
.. [2] Shah, M. M., Heat Transfer During Film Condensation in Tubes and
Annuli: A Review of the Literature, ASHRAE Transactions, vol. 87, no.
3, pp. 1086-1100, 1981.
.. [3] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
|
def push_log(self, info, level, *args, **kwargs):
"""
Writes logs. To be fully implemented by subclasses.
:param info: Log message content.
:type info: unicode | str
:param level: Logging level.
:type level: int
:param args: Positional arguments to pass to logger.
:param kwargs: Keyword arguments to pass to logger.
"""
log.log(level, info, *args, **kwargs)
|
Writes logs. To be fully implemented by subclasses.
:param info: Log message content.
:type info: unicode | str
:param level: Logging level.
:type level: int
:param args: Positional arguments to pass to logger.
:param kwargs: Keyword arguments to pass to logger.
|
def set_trunk_groups(self, intf, value=None, default=False, disable=False):
"""Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False
"""
if default:
cmd = 'default switchport trunk group'
return self.configure_interface(intf, cmd)
if disable:
cmd = 'no switchport trunk group'
return self.configure_interface(intf, cmd)
current_value = self.get(intf)['trunk_groups']
failure = False
value = make_iterable(value)
for name in set(value).difference(current_value):
if not self.add_trunk_group(intf, name):
failure = True
for name in set(current_value).difference(value):
if not self.remove_trunk_group(intf, name):
failure = True
return not failure
|
Configures the switchport trunk group value
Args:
intf (str): The interface identifier to configure.
value (str): The set of values to configure the trunk group
default (bool): Configures the trunk group default value
disable (bool): Negates all trunk group settings
Returns:
True if the config operation succeeds otherwise False
|
def add_videos_to_playlist(self, access_token, playlist_id, video_ids):
"""doc: http://open.youku.com/docs/doc?id=75
"""
url = 'https://openapi.youku.com/v2/playlists/video/add.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'playlist_id': playlist_id,
'video_ids': video_ids
}
r = requests.post(url, data=data)
check_error(r)
return r.json()['id']
|
doc: http://open.youku.com/docs/doc?id=75
|
def get(self, url=None, params=None, retry=True):
'''
Execute HTTP GET
'''
headers = self._gen_headers(self.access_token, url)
attempts = 1
while attempts <= HTTP_ATTEMPTS_MAX:
try:
res = requests.get(url,
headers=headers,
params=params,
timeout=15,
verify=self.certs)
res.raise_for_status()
return res.json()
except requests.exceptions.RequestException as e:
attempts += 1
if res.status_code in [400]:
raise e
elif retry and res.status_code in [403]:
self.relogin_oauth2()
|
Execute HTTP GET
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.