code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def set_sys(layout):
'''
Set current system keyboard setting
CLI Example:
.. code-block:: bash
salt '*' keyboard.set_sys dvorak
'''
if salt.utils.path.which('localectl'):
__salt__['cmd.run']('localectl set-keymap {0}'.format(layout))
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed']('/etc/sysconfig/keyboard',
'^LAYOUT=.*',
'LAYOUT={0}'.format(layout))
elif 'Debian' in __grains__['os_family']:
__salt__['file.sed']('/etc/default/keyboard',
'^XKBLAYOUT=.*',
'XKBLAYOUT={0}'.format(layout))
elif 'Gentoo' in __grains__['os_family']:
__salt__['file.sed']('/etc/conf.d/keymaps',
'^keymap=.*',
'keymap={0}'.format(layout))
return layout
|
Set current system keyboard setting
CLI Example:
.. code-block:: bash
salt '*' keyboard.set_sys dvorak
|
def _new_err(self, errclass: str, *args) -> 'Err':
"""
Error constructor
"""
# get the message or exception
ex, msg = self._get_args(*args)
# construct the error
# handle exception
ftb = None # type: str
function = None # type: str
errtype = None # type: str
file = None # type: str
line = None # type: int
code = None # type: str
ex_msg = None # type: str
caller = None # type: str
caller_msg = None # type: str
st = inspect.stack()
if ex is not None:
# get info from exception
errobj, ex_msg, tb = sys.exc_info()
tb = traceback.extract_tb(tb)
file, line, function, code = tb[-1]
# if called from an external lib
if len(tb) > 1:
file, line, caller, code = tb[0]
else:
call_stack = []
for c in st:
call_stack.append(c[3])
caller = self._get_caller(call_stack, function)
internals = [
"err",
"_new_err",
"fatal",
"warning",
"debug",
"info",
"<module>"]
if caller == function or caller in internals:
caller = None
# handle messages
if msg is not None:
caller_msg = msg
msg = str(ex_msg)
else:
msg = str(ex_msg)
ftb = traceback.format_exc()
errtype = errobj.__name__
if function is None:
# for el in st:
# print(el)
function = st[3][3]
if function == "<module>":
function = None
# init error object
date = datetime.now()
error = Err(
function,
date,
msg,
errtype,
errclass,
line,
file,
code,
ftb,
ex,
caller,
caller_msg)
return error
|
Error constructor
|
def load_bot_parameters(config_bundle) -> ConfigObject:
"""
Initializes the agent in the bundle's python file and asks it to provide its
custom configuration object where its parameters can be set.
:return: the parameters as a ConfigObject
"""
# Python file relative to the config location.
python_file = config_bundle.python_file
agent_class_wrapper = import_agent(python_file)
bot_parameters = agent_class_wrapper.get_loaded_class().base_create_agent_configurations()
bot_parameters.parse_file(config_bundle.config_obj, config_directory=config_bundle.config_directory)
return bot_parameters
|
Initializes the agent in the bundle's python file and asks it to provide its
custom configuration object where its parameters can be set.
:return: the parameters as a ConfigObject
|
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
|
Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
|
def path2edges(path):
"""Given: [2000343, 32722, 1819] Return: set([(2000343, 32722), (32722, 1819)])."""
node_a, node_b = tee(path)
next(node_b, None)
return zip(node_a, node_b)
|
Given: [2000343, 32722, 1819] Return: set([(2000343, 32722), (32722, 1819)]).
|
async def filterindex(source, func):
"""Filter an asynchronous sequence using the index of the elements.
The given function is synchronous, takes the index as an argument,
and returns ``True`` if the corresponding should be forwarded,
``False`` otherwise.
"""
source = transform.enumerate.raw(source)
async with streamcontext(source) as streamer:
async for i, item in streamer:
if func(i):
yield item
|
Filter an asynchronous sequence using the index of the elements.
The given function is synchronous, takes the index as an argument,
and returns ``True`` if the corresponding should be forwarded,
``False`` otherwise.
|
def list_data_links(self, instance):
"""
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
response = self.get_proto(path='/links/' + instance)
message = rest_pb2.ListLinkInfoResponse()
message.ParseFromString(response.content)
links = getattr(message, 'link')
return iter([Link(link) for link in links])
|
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
|
def conv2bin(data):
"""Convert a matrix of probabilities into binary values.
If the matrix has values <= 0 or >= 1, the values are
normalized to be in [0, 1].
:type data: numpy array
:param data: input matrix
:return: converted binary matrix
"""
if data.min() < 0 or data.max() > 1:
data = normalize(data)
out_data = data.copy()
for i, sample in enumerate(out_data):
for j, val in enumerate(sample):
if np.random.random() <= val:
out_data[i][j] = 1
else:
out_data[i][j] = 0
return out_data
|
Convert a matrix of probabilities into binary values.
If the matrix has values <= 0 or >= 1, the values are
normalized to be in [0, 1].
:type data: numpy array
:param data: input matrix
:return: converted binary matrix
|
def _build(self):
"""Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor.
"""
flat_initial_state = nest.flatten(self._initial_state)
if self._mask is not None:
flat_mask = nest.flatten(self._mask)
flat_learnable_state = [
_single_learnable_state(state, state_id=i, learnable=mask)
for i, (state, mask) in enumerate(zip(flat_initial_state, flat_mask))]
else:
flat_learnable_state = [_single_learnable_state(state, state_id=i)
for i, state in enumerate(flat_initial_state)]
return nest.pack_sequence_as(structure=self._initial_state,
flat_sequence=flat_learnable_state)
|
Connects the module to the graph.
Returns:
The learnable state, which has the same type, structure and shape as
the `initial_state` passed to the constructor.
|
def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data):
""" Create a bit for bit plot """
plot_title = ""
plot_name = case + "_" + var_name + ".png"
plot_path = os.path.join(os.path.join(livvkit.output_dir, "verification", "imgs"))
functions.mkdir_p(plot_path)
m_ndim = np.ndim(model_data)
b_ndim = np.ndim(bench_data)
if m_ndim != b_ndim:
return "Dataset dimensions didn't match!"
if m_ndim == 3:
model_data = model_data[-1]
bench_data = bench_data[-1]
diff_data = diff_data[-1]
plot_title = "Showing "+var_name+"[-1,:,:]"
elif m_ndim == 4:
model_data = model_data[-1][0]
bench_data = bench_data[-1][0]
diff_data = diff_data[-1][0]
plot_title = "Showing "+var_name+"[-1,0,:,:]"
plt.figure(figsize=(12, 3), dpi=80)
plt.clf()
# Calculate min and max to scale the colorbars
_max = np.amax([np.amax(model_data), np.amax(bench_data)])
_min = np.amin([np.amin(model_data), np.amin(bench_data)])
# Plot the model output
plt.subplot(1, 3, 1)
plt.xlabel("Model Data")
plt.ylabel(var_name)
plt.xticks([])
plt.yticks([])
plt.imshow(model_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis)
plt.colorbar()
# Plot the benchmark data
plt.subplot(1, 3, 2)
plt.xlabel("Benchmark Data")
plt.xticks([])
plt.yticks([])
plt.imshow(bench_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis)
plt.colorbar()
# Plot the difference
plt.subplot(1, 3, 3)
plt.xlabel("Difference")
plt.xticks([])
plt.yticks([])
plt.imshow(diff_data, interpolation='nearest', cmap=colormaps.viridis)
plt.colorbar()
plt.tight_layout(rect=(0, 0, 0.95, 0.9))
plt.suptitle(plot_title)
plot_file = os.path.sep.join([plot_path, plot_name])
if livvkit.publish:
plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
plt.savefig(plot_file)
plt.close()
return os.path.join(os.path.relpath(plot_path,
os.path.join(livvkit.output_dir, "verification")),
plot_name)
|
Create a bit for bit plot
|
def remove_profile(name, s3=False):
"""
Removes a profile from your config
"""
user = os.path.expanduser("~")
if s3:
f = os.path.join(user, S3_PROFILE_ID + name)
else:
f = os.path.join(user, DBPY_PROFILE_ID + name)
try:
try:
open(f)
except:
raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f))
os.remove(f)
except Exception as e:
raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e))
|
Removes a profile from your config
|
def quick_api(api_key, secret_key, port=8000):
"""
This method helps you get access to linkedin api quickly when using it
from the interpreter.
Notice that this method creates http server and wait for a request, so it
shouldn't be used in real production code - it's just an helper for debugging
The usage is basically:
api = quick_api(KEY, SECRET)
After you do that, it will print a URL to the screen which you must go in
and allow the access, after you do that, the method will return with the api
object.
"""
auth = LinkedInAuthentication(api_key, secret_key, 'http://localhost:8000/',
PERMISSIONS.enums.values())
app = LinkedInApplication(authentication=auth)
print auth.authorization_url
_wait_for_user_to_enter_browser(app, port)
return app
|
This method helps you get access to linkedin api quickly when using it
from the interpreter.
Notice that this method creates http server and wait for a request, so it
shouldn't be used in real production code - it's just an helper for debugging
The usage is basically:
api = quick_api(KEY, SECRET)
After you do that, it will print a URL to the screen which you must go in
and allow the access, after you do that, the method will return with the api
object.
|
def acquire(self, *, raise_on_failure=True):
"""Attempt to acquire a slot under this rate limiter.
Parameters:
raise_on_failure(bool): Whether or not failures should raise an
exception. If this is false, the context manager will instead
return a boolean value representing whether or not the rate
limit slot was acquired.
Returns:
bool: Whether or not the slot could be acquired.
"""
acquired = False
try:
acquired = self._acquire()
if raise_on_failure and not acquired:
raise RateLimitExceeded("rate limit exceeded for key %(key)r" % vars(self))
yield acquired
finally:
if acquired:
self._release()
|
Attempt to acquire a slot under this rate limiter.
Parameters:
raise_on_failure(bool): Whether or not failures should raise an
exception. If this is false, the context manager will instead
return a boolean value representing whether or not the rate
limit slot was acquired.
Returns:
bool: Whether or not the slot could be acquired.
|
def get_operators(self, name=None):
"""Get the list of :py:class:`Operator` elements associated with this job.
Args:
name(str): Only return operators matching `name`, where `name` can be a regular expression. If
`name` is not supplied, then all operators for this job are returned.
Returns:
list(Operator): List of Operator elements associated with this job.
Retrieving a list of operators whose name contains the string "temperatureSensor" could be performed as followed
Example:
>>> from streamsx import rest
>>> sc = rest.StreamingAnalyticsConnection()
>>> instances = sc.get_instances()
>>> job = instances[0].get_jobs()[0]
>>> operators = job.get_operators(name="*temperatureSensor*")
.. versionchanged:: 1.9 `name` parameter added.
"""
return self._get_elements(self.operators, 'operators', Operator, name=name)
|
Get the list of :py:class:`Operator` elements associated with this job.
Args:
name(str): Only return operators matching `name`, where `name` can be a regular expression. If
`name` is not supplied, then all operators for this job are returned.
Returns:
list(Operator): List of Operator elements associated with this job.
Retrieving a list of operators whose name contains the string "temperatureSensor" could be performed as followed
Example:
>>> from streamsx import rest
>>> sc = rest.StreamingAnalyticsConnection()
>>> instances = sc.get_instances()
>>> job = instances[0].get_jobs()[0]
>>> operators = job.get_operators(name="*temperatureSensor*")
.. versionchanged:: 1.9 `name` parameter added.
|
def _get_link_indices(self, current_modified_line):
"""
Get a list of tuples containing start and end indices of inline
anchor links
:param current_modified_line: The line being examined for links
:return: A list containing tuples of the form (start, end),
the starting and ending indices of inline anchors links.
"""
# List of (start_index, end_index) tuples for each link in the line
links = []
for m in self._link_regex.finditer(current_modified_line):
links.append(m.span())
return links
|
Get a list of tuples containing start and end indices of inline
anchor links
:param current_modified_line: The line being examined for links
:return: A list containing tuples of the form (start, end),
the starting and ending indices of inline anchors links.
|
def resolve(self, resolve_from):
"""
:API: public
"""
session = requests.Session()
session.mount(resolve_from, requests.adapters.HTTPAdapter(max_retries=self._tries))
content = self._safe_get_content(session, resolve_from)
try:
parsed_urls = self._response_parser.parse(content)
if len(parsed_urls) > 0:
return parsed_urls
raise self.ResolverError('Empty result received from {0}'.format(resolve_from))
except ResponseParser.ResponseParserError as e:
raise self.ResolverError('Error parsing response: {0}'.format(str(e)))
|
:API: public
|
def build_all(cls, list_of_kwargs):
"""Similar to `create_all`. But transaction is not committed.
"""
return cls.add_all([
cls.new(**kwargs) for kwargs in list_of_kwargs], commit=False)
|
Similar to `create_all`. But transaction is not committed.
|
async def create_scene_member(self, shade_position, scene_id, shade_id):
"""Adds a shade to an existing scene"""
data = {
ATTR_SCENE_MEMBER: {
ATTR_POSITION_DATA: shade_position,
ATTR_SCENE_ID: scene_id,
ATTR_SHADE_ID: shade_id,
}
}
return await self.request.post(self._base_path, data=data)
|
Adds a shade to an existing scene
|
def expr_to_json(expr):
"""
Converts a Sympy expression to a json-compatible tree-structure.
"""
if isinstance(expr, symbolics.Mul):
return {"type": "Mul", "args": [expr_to_json(arg) for arg in expr.args]}
elif isinstance(expr, symbolics.Add):
return {"type": "Add", "args": [expr_to_json(arg) for arg in expr.args]}
elif isinstance(expr, symbolics.Symbol):
return {"type": "Symbol", "name": expr.name}
elif isinstance(expr, symbolics.Pow):
return {"type": "Pow", "args": [expr_to_json(arg) for arg in expr.args]}
elif isinstance(expr, (float, int)):
return {"type": "Number", "value": expr}
elif isinstance(expr, symbolics.Real):
return {"type": "Number", "value": float(expr)}
elif isinstance(expr, symbolics.Integer):
return {"type": "Number", "value": int(expr)}
else:
raise NotImplementedError("Type not implemented: " + str(type(expr)))
|
Converts a Sympy expression to a json-compatible tree-structure.
|
async def dataSources(loop=None, executor=None):
"""Returns a dictionary mapping available DSNs to their descriptions.
:param loop: asyncio compatible event loop
:param executor: instance of custom ThreadPoolExecutor, if not supplied
default executor will be used
:return dict: mapping of dsn to driver description
"""
loop = loop or asyncio.get_event_loop()
sources = await loop.run_in_executor(executor, _dataSources)
return sources
|
Returns a dictionary mapping available DSNs to their descriptions.
:param loop: asyncio compatible event loop
:param executor: instance of custom ThreadPoolExecutor, if not supplied
default executor will be used
:return dict: mapping of dsn to driver description
|
def dismiss_prompt(self, text=None, wait=None):
"""
Execute the wrapped code, dismissing a prompt.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found.
"""
with self.driver.dismiss_modal("prompt", text=text, wait=wait):
yield
|
Execute the wrapped code, dismissing a prompt.
Args:
text (str | RegexObject, optional): Text to match against the text in the modal.
wait (int | float, optional): Maximum time to wait for the modal to appear after
executing the wrapped code.
Raises:
ModalNotFound: If a modal dialog hasn't been found.
|
def get_field(expr, field):
""" Fetch a field from a struct expr
"""
weld_obj = WeldObject(encoder_, decoder_)
struct_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
struct_var = expr.obj_id
weld_obj.dependencies[struct_var] = expr
weld_template = """
%(struct)s.$%(field)s
"""
weld_obj.weld_code = weld_template % {"struct":struct_var,
"field":field}
return weld_obj
|
Fetch a field from a struct expr
|
def main():
"""Controls the flow of the ddg application"""
'Build the parser and parse the arguments'
parser = argparse.ArgumentParser(
description='www.duckduckgo.com zero-click api for your command-line'
)
parser.add_argument('query', nargs='*', help='the search query')
parser.add_argument('-b', '--bang', action='store_true',
help='open the !bang redirect url in a new browser tab')
parser.add_argument('-d', '--define', action='store_true',
help='return the definition result')
parser.add_argument('-j', '--json', action='store_true',
help='return the zero-click info api json response')
parser.add_argument('-l', '--lucky', action='store_true',
help='open the result url in a new browser tab')
parser.add_argument('-s', '--search', action='store_true',
help='launch a DuckDuckGo search in a new browser tab')
parser.add_argument('-u', '--url', action='store_true',
help='return the result url')
args = parser.parse_args()
'Get the queries'
if args.query:
queries = [' '.join(args.query)]
elif not sys.stdin.isatty():
queries = sys.stdin.read().splitlines()
else:
parser.print_help()
return
'Determine if we need to add any prefixes based on user flags'
prefix = '!ddg ' if args.search else '!' if args.bang else ''
'Loop through each query'
for query in queries:
'Prefix the query'
query = prefix + query
'Get a response from api.duckduck.com using the duckduckgo module'
results = duckduckgo.search(query)
'If results is null, continue to the next query'
if not results:
continue
'Print the raw json output and return'
if args.json:
print_result(results.json)
continue
'a list of where to look for an answer first'
results_priority = get_results_priority(args)
'do we want the text or url output of the answer found'
var = get_text_or_url(args)
'action to perform when an answer is found'
action = get_action(args)
'Search for an answer and perform an action'
failed_to_find_answer = True
for r in results_priority:
result = getattr(getattr(results, r), var)
if result:
action(result)
failed_to_find_answer = False
break
'Let the user know if no answer was found'
if failed_to_find_answer:
if results.type == 'disambiguation':
print 'Your query was ambiguous, please be more specific'
else:
print 'No results found'
|
Controls the flow of the ddg application
|
def _infer_xy_labels(darray, x, y, imshow=False, rgb=None):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array, or 3d for imshow only.
"""
assert x is None or x != y
if imshow and darray.ndim == 3:
return _infer_xy_labels_3d(darray, x, y, rgb)
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None:
if y not in darray.dims and y not in darray.coords:
raise ValueError('y must be a dimension name if x is not supplied')
x = darray.dims[0] if y == darray.dims[1] else darray.dims[1]
elif y is None:
if x not in darray.dims and x not in darray.coords:
raise ValueError('x must be a dimension name if y is not supplied')
y = darray.dims[0] if x == darray.dims[1] else darray.dims[1]
elif any(k not in darray.coords and k not in darray.dims for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
|
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array, or 3d for imshow only.
|
def edge(self, c):
"""rising edge"""
return ca.logic_and(c, ca.logic_not(self.pre_cond(c)))
|
rising edge
|
def make_url(domain, location):
""" This function helps to make full url path."""
url = urlparse(location)
if url.scheme == '' and url.netloc == '':
return domain + url.path
elif url.scheme == '':
return 'http://' + url.netloc + url.path
else:
return url.geturl()
|
This function helps to make full url path.
|
def read_dist_egginfo_json(dist, filename=DEFAULT_JSON):
"""
Safely get a json within an egginfo from a distribution.
"""
# use the given package's distribution to acquire the json file.
if not dist.has_metadata(filename):
logger.debug("no '%s' for '%s'", filename, dist)
return
try:
result = dist.get_metadata(filename)
except IOError:
logger.error("I/O error on reading of '%s' for '%s'.", filename, dist)
return
try:
obj = json.loads(result)
except (TypeError, ValueError):
logger.error(
"the '%s' found in '%s' is not a valid json.", filename, dist)
return
logger.debug("found '%s' for '%s'.", filename, dist)
return obj
|
Safely get a json within an egginfo from a distribution.
|
def get_project() -> Optional[str]:
"""
Returns the current project name.
"""
project = SETTINGS.project
if not project:
require_test_mode_enabled()
raise RunError('Missing project name; for test mode, please set PULUMI_NODEJS_PROJECT')
return project
|
Returns the current project name.
|
def set_content_type (self):
"""Return URL content type, or an empty string if content
type could not be found."""
if self.url:
self.content_type = mimeutil.guess_mimetype(self.url, read=self.get_content)
else:
self.content_type = u""
|
Return URL content type, or an empty string if content
type could not be found.
|
def make_file_exist(self):
"""Make sure the parent directory exists, then touch the file"""
self.parent.make_directory_exist()
self.parent.touch_file(self.name)
return self
|
Make sure the parent directory exists, then touch the file
|
def list_modules(root_package = 'vlcp'):
'''
Walk through all the sub modules, find subclasses of vlcp.server.module.Module,
list their apis through apidefs
'''
pkg = __import__(root_package, fromlist=['_'])
module_dict = OrderedDict()
_server = Server()
for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'):
m = __import__(module, fromlist = ['_'])
for name, v in vars(m).items():
if v is not None and isinstance(v, type) and issubclass(v, Module) \
and v is not Module \
and not isinstance(v, _ProxyModule) \
and hasattr(v, '__dict__') and 'configkey' in v.__dict__ \
and v.__module__ == module:
module_name = v.__name__.lower()
if module_name not in module_dict:
_inst = v(_server)
module_info = OrderedDict((('class', v.__module__ + '.' + v.__name__),
('dependencies', [d.__name__.lower()
for d in v.depends]),
('classdescription', getdoc(v)),
('apis', [])))
if hasattr(_inst, 'apiHandler'):
apidefs = _inst.apiHandler.apidefs
module_info['apis'] = [(d[0], d[3])
for d in apidefs
if len(d) > 3 and \
not d[0].startswith('public/')]
module_dict[module_name] = module_info
return module_dict
|
Walk through all the sub modules, find subclasses of vlcp.server.module.Module,
list their apis through apidefs
|
def authenticate(self, request, username=None, password=None, realm=None):
"""
Check credentials against the RADIUS server identified by `realm` and
return a User object or None. If no argument is supplied, Django will
skip this backend and try the next one (as a TypeError will be raised
and caught).
"""
if isinstance(username, basestring):
username = username.encode('utf-8')
if isinstance(password, basestring):
password = password.encode('utf-8')
server = self.get_server(realm)
if not server:
return None
result = self._radius_auth(server, username, password)
if result:
full_username = self.construct_full_username(username, realm)
return self.get_django_user(full_username, password)
return None
|
Check credentials against the RADIUS server identified by `realm` and
return a User object or None. If no argument is supplied, Django will
skip this backend and try the next one (as a TypeError will be raised
and caught).
|
def _set_types(self):
"""Make sure that x, y have consistent types and set dtype."""
# If we given something that is not an int or a float we raise
# a RuntimeError as we do not want to have to guess if the given
# input should be interpreted as an int or a float, for example the
# interpretation of the string "1" vs the interpretation of the string
# "1.0".
for c in (self.x, self.y):
if not ( isinstance(c, int) or isinstance(c, float) ):
raise(RuntimeError('x, y coords should be int or float'))
if isinstance(self.x, int) and isinstance(self.y, int):
self._dtype = "int"
else:
# At least one value is a float so promote both to float.
self.x = float(self.x)
self.y = float(self.y)
self._dtype = "float"
|
Make sure that x, y have consistent types and set dtype.
|
def register(cache):
''' Registers a cache. '''
global caches
name = cache().name
if not caches.has_key(name):
caches[name] = cache
|
Registers a cache.
|
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
|
Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
|
def t_MINUS(self, t):
r'-'
t.endlexpos = t.lexpos + len(t.value)
return t
|
r'-
|
def cmd_output_remove(self, args):
'''remove an output'''
device = args[0]
for i in range(len(self.mpstate.mav_outputs)):
conn = self.mpstate.mav_outputs[i]
if str(i) == device or conn.address == device:
print("Removing output %s" % conn.address)
try:
mp_util.child_fd_list_add(conn.port.fileno())
except Exception:
pass
conn.close()
self.mpstate.mav_outputs.pop(i)
return
|
remove an output
|
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minisat and self.status == False:
return pysolvers.minisatgh_core(self.minisat)
|
Get an unsatisfiable core if the formula was previously
unsatisfied.
|
def get_token_and_data(self, data):
'''
When we receive this, we have 'token):data'
'''
token = ''
for c in data:
if c != ')':
token = token + c
else:
break;
return token, data.lstrip(token + '):')
|
When we receive this, we have 'token):data'
|
def _is_valid_amendment_json(self, json_repr):
"""Call the primary validator for a quick test"""
amendment = self._coerce_json_to_amendment(json_repr)
if amendment is None:
# invalid JSON, definitely broken
return False
aa = validate_amendment(amendment)
errors = aa[0]
for e in errors:
_LOG.debug('> invalid JSON: {m}'.format(m=e.encode('utf-8')))
if len(errors) > 0:
return False
return True
|
Call the primary validator for a quick test
|
def save(self, *args, **kwargs):
"""
Before saving, if slide is for a publication, use publication info
for slide's title, subtitle, description.
"""
if self.publication:
publication = self.publication
if not self.title:
self.title = publication.title
if not self.subtitle:
first_author = publication.first_author
if first_author == publication.last_author:
authors = first_author
else:
authors = '{} et al.'.format(first_author)
self.subtitle = '{}, {} ({})'.format(authors,
publication.journal, publication.year)
if not self.description:
self.description = publication.abstract
if self.publication.year and not self.pk:
delta = timezone.now() - self.publish_datetime
if self.publish_datetime <= timezone.now() and delta.days == 0:
self.publish_datetime = datetime.datetime(
year=int(self.publication.year),
month=int(self.publication.month or 1),
day=int(self.publication.day or 1),
)
super().save(*args, **kwargs)
|
Before saving, if slide is for a publication, use publication info
for slide's title, subtitle, description.
|
def load_config(data, *models, **kwargs):
'''
Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True
Output Example:
.. code-block:: jinja
device1:
----------
already_configured:
False
comment:
diff:
[edit interfaces ge-0/0/0]
- mtu 1400;
[edit interfaces ge-0/0/0 unit 0 family inet]
- dhcp;
[edit interfaces lo0]
- unit 0 {
- description lo0.0;
- }
+ unit 1 {
+ description "new loopback";
+ }
loaded_config:
<configuration>
<interfaces replace="replace">
<interface>
<name>ge-0/0/0</name>
<unit>
<name>0</name>
<family>
<inet/>
</family>
<description>ge-0/0/0.0</description>
</unit>
<description>management interface</description>
</interface>
<interface>
<name>ge-0/0/1</name>
<disable/>
<description>ge-0/0/1</description>
</interface>
<interface>
<name>ae0</name>
<unit>
<name>0</name>
<vlan-id>100</vlan-id>
<family>
<inet>
<address>
<name>192.168.100.1/24</name>
</address>
<address>
<name>172.20.100.1/24</name>
</address>
</inet>
</family>
<description>a description</description>
</unit>
<vlan-tagging/>
<unit>
<name>1</name>
<vlan-id>1</vlan-id>
<family>
<inet>
<address>
<name>192.168.101.1/24</name>
</address>
</inet>
</family>
<disable/>
<description>ae0.1</description>
</unit>
<vlan-tagging/>
<unit>
<name>2</name>
<vlan-id>2</vlan-id>
<family>
<inet>
<address>
<name>192.168.102.1/24</name>
</address>
</inet>
</family>
<description>ae0.2</description>
</unit>
<vlan-tagging/>
</interface>
<interface>
<name>lo0</name>
<unit>
<name>1</name>
<description>new loopback</description>
</unit>
<description>lo0</description>
</interface>
</interfaces>
</configuration>
result:
True
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
config = get_config(data, *models, **kwargs)
test = kwargs.pop('test', False)
debug = kwargs.pop('debug', False)
commit = kwargs.pop('commit', True)
replace = kwargs.pop('replace', False)
return __salt__['net.load_config'](text=config,
test=test,
debug=debug,
commit=commit,
replace=replace,
inherit_napalm_device=napalm_device)
|
Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True
Output Example:
.. code-block:: jinja
device1:
----------
already_configured:
False
comment:
diff:
[edit interfaces ge-0/0/0]
- mtu 1400;
[edit interfaces ge-0/0/0 unit 0 family inet]
- dhcp;
[edit interfaces lo0]
- unit 0 {
- description lo0.0;
- }
+ unit 1 {
+ description "new loopback";
+ }
loaded_config:
<configuration>
<interfaces replace="replace">
<interface>
<name>ge-0/0/0</name>
<unit>
<name>0</name>
<family>
<inet/>
</family>
<description>ge-0/0/0.0</description>
</unit>
<description>management interface</description>
</interface>
<interface>
<name>ge-0/0/1</name>
<disable/>
<description>ge-0/0/1</description>
</interface>
<interface>
<name>ae0</name>
<unit>
<name>0</name>
<vlan-id>100</vlan-id>
<family>
<inet>
<address>
<name>192.168.100.1/24</name>
</address>
<address>
<name>172.20.100.1/24</name>
</address>
</inet>
</family>
<description>a description</description>
</unit>
<vlan-tagging/>
<unit>
<name>1</name>
<vlan-id>1</vlan-id>
<family>
<inet>
<address>
<name>192.168.101.1/24</name>
</address>
</inet>
</family>
<disable/>
<description>ae0.1</description>
</unit>
<vlan-tagging/>
<unit>
<name>2</name>
<vlan-id>2</vlan-id>
<family>
<inet>
<address>
<name>192.168.102.1/24</name>
</address>
</inet>
</family>
<description>ae0.2</description>
</unit>
<vlan-tagging/>
</interface>
<interface>
<name>lo0</name>
<unit>
<name>1</name>
<description>new loopback</description>
</unit>
<description>lo0</description>
</interface>
</interfaces>
</configuration>
result:
True
|
def filter_leader_files(cluster_config, broker_files):
"""Given a list of broker files, filters out all the files that
are in the replicas.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:param broker_files: the broker files
:type broker_files: list of (b_id, host, [file_path, file_path ...]) tuples
:returns: the filtered list
:rtype: list of (broker_id, host, [file_path, file_path ...]) tuples
"""
print("Filtering leaders")
leader_of = get_partition_leaders(cluster_config)
result = []
for broker, host, files in broker_files:
filtered = []
for file_path in files:
tp = get_tp_from_file(file_path)
if tp not in leader_of or leader_of[tp] == broker:
filtered.append(file_path)
result.append((broker, host, filtered))
print(
"Broker: {broker}, leader of {l_count} over {f_count} files".format(
broker=broker,
l_count=len(filtered),
f_count=len(files),
)
)
return result
|
Given a list of broker files, filters out all the files that
are in the replicas.
:param cluster_config: the cluster
:type cluster_config: kafka_utils.utils.config.ClusterConfig
:param broker_files: the broker files
:type broker_files: list of (b_id, host, [file_path, file_path ...]) tuples
:returns: the filtered list
:rtype: list of (broker_id, host, [file_path, file_path ...]) tuples
|
def identify_and_tag_authors(line, authors_kb):
"""Given a reference, look for a group of author names,
place tags around the author group, return the newly tagged line.
"""
re_auth, re_auth_near_miss = get_author_regexps()
# Replace authors which do not convert well from utf-8
for pattern, repl in authors_kb:
line = line.replace(pattern, repl)
output_line = line
# We matched authors here
line = strip_tags(output_line)
matched_authors = list(re_auth.finditer(line))
# We try to have better results by unidecoding
unidecoded_line = strip_tags(unidecode(output_line))
matched_authors_unidecode = list(re_auth.finditer(unidecoded_line))
if len(matched_authors_unidecode) > len(matched_authors):
output_line = unidecode(output_line)
matched_authors = matched_authors_unidecode
# If there is at least one matched author group
if matched_authors:
matched_positions = []
preceeding_text_string = line
preceeding_text_start = 0
for auth_no, match in enumerate(matched_authors):
# Only if there are no underscores or closing arrows found in the matched author group
# This must be checked for here, as it cannot be applied to the re without clashing with
# other Unicode characters
if line[match.start():match.end()].find("_") == -1:
# Has the group with name 'et' (for 'et al') been found in the pattern?
# Has the group with name 'es' (for ed. before the author) been found in the pattern?
# Has the group with name 'ee' (for ed. after the author) been
# found in the pattern?
matched_positions.append({
'start': match.start(),
'end': match.end(),
'etal': match.group('et') or match.group('et2'),
'ed_start': match.group('es'),
'ed_end': match.group('ee'),
'multi_auth': match.group('multi_auth'),
'multi_surs': match.group('multi_surs'),
'text_before': preceeding_text_string[preceeding_text_start:match.start()],
'auth_no': auth_no,
'author_names': match.group('author_names')
})
# Save the end of the match, from where to snip the misc text
# found before an author match
preceeding_text_start = match.end()
# Work backwards to avoid index problems when adding AUTH tags
matched_positions.reverse()
for m in matched_positions:
dump_in_misc = False
start = m['start']
end = m['end']
# Check the text before the current match to see if it has a bad
# 'et al'
lower_text_before = m['text_before'].strip().lower()
for e in etal_matches:
if lower_text_before.endswith(e):
# If so, this author match is likely to be a bad match on a
# missed title
dump_in_misc = True
break
# An AND found here likely indicates a missed author before this text
# Thus, triggers weaker author searching, within the previous misc text
# (Check the text before the current match to see if it has a bad 'and')
# A bad 'and' will only be denoted as such if there exists only one author after it
# and the author group is legit (not to be dumped in misc)
if not dump_in_misc and not (m['multi_auth'] or m['multi_surs']) \
and (lower_text_before.endswith(' and')):
# Search using a weaker author pattern to try and find the
# missed author(s) (cut away the end 'and')
weaker_match = re_auth_near_miss.match(m['text_before'])
if weaker_match and not (weaker_match.group('es') or weaker_match.group('ee')):
# Change the start of the author group to include this new
# author group
start = start - \
(len(m['text_before']) - weaker_match.start())
# Still no match, do not add tags for this author match.. dump
# it into misc
else:
dump_in_misc = True
add_to_misc = ""
# If a semi-colon was found at the end of this author group, keep it in misc
# so that it can be looked at for splitting heurisitics
if len(output_line) > m['end']:
if output_line[m['end']].strip(" ,.") == ';':
add_to_misc = ';'
# Standardize eds. notation
tmp_output_line = re.sub(re_ed_notation, '(ed.)',
output_line[start:end], re.IGNORECASE)
# Standardize et al. notation
tmp_output_line = re.sub(re_etal, 'et al.',
tmp_output_line, re.IGNORECASE)
# Strip
tmp_output_line = tmp_output_line.lstrip('.').strip(",:;- [](")
if not tmp_output_line.endswith('(ed.)'):
tmp_output_line = tmp_output_line.strip(')')
# ONLY wrap author data with tags IF there is no evidence that it is an
# ed. author. (i.e. The author is not referred to as an editor)
# Does this author group string have 'et al.'?
if m['etal'] and not (m['ed_start'] or m['ed_end'] or dump_in_misc):
output_line = output_line[:start] \
+ "<cds.AUTHetal>" \
+ tmp_output_line \
+ CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL \
+ add_to_misc \
+ output_line[end:]
elif not (m['ed_start'] or m['ed_end'] or dump_in_misc):
# Insert the std (standard) tag
output_line = output_line[:start] \
+ "<cds.AUTHstnd>" \
+ tmp_output_line \
+ CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND \
+ add_to_misc \
+ output_line[end:]
# Apply the 'include in $h' method to author groups marked as
# editors
elif m['ed_start'] or m['ed_end']:
ed_notation = " (eds.)"
# Standardize et al. notation
tmp_output_line = re.sub(re_etal, 'et al.',
m['author_names'], re.IGNORECASE)
# remove any characters which denote this author group
# to be editors, just take the
# author names, and append '(ed.)'
output_line = output_line[:start] \
+ "<cds.AUTHincl>" \
+ tmp_output_line.strip(",:;- [](") \
+ ed_notation \
+ CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL \
+ add_to_misc \
+ output_line[end:]
return output_line
|
Given a reference, look for a group of author names,
place tags around the author group, return the newly tagged line.
|
async def SetFilesystemAttachmentInfo(self, filesystem_attachments):
'''
filesystem_attachments : typing.Sequence[~FilesystemAttachment]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='StorageProvisioner',
request='SetFilesystemAttachmentInfo',
version=3,
params=_params)
_params['filesystem-attachments'] = filesystem_attachments
reply = await self.rpc(msg)
return reply
|
filesystem_attachments : typing.Sequence[~FilesystemAttachment]
Returns -> typing.Sequence[~ErrorResult]
|
def setup(argv):
"""Sets up the ArgumentParser.
Args:
argv: an array of arguments
"""
parser = argparse.ArgumentParser(
description='Compute Jekyl- and prose-aware wordcounts',
epilog='Accepted filetypes: plaintext, markdown, markdown (Jekyll)')
parser.add_argument('-S', '--split-hyphens', action='store_true',
dest='split_hyphens',
help='split hyphenated words rather than counting '
'them as one word ("non-trivial" counts as two words '
'rather than one)')
parser.add_argument('-u', '--update', action='store_true',
help='update the jekyll file in place with the counts.'
' Does nothing if the file is not a Jekyll markdown '
'file. Implies format=yaml, invalid with input '
'from STDIN and non-Jekyll files.')
parser.add_argument('-f', '--format', nargs='?',
choices=['yaml', 'json', 'default'], default='default',
help='output format.')
parser.add_argument('-i', '--indent', type=int, nargs='?', default=4,
help='indentation depth (default: 4).')
parser.add_argument('file', type=argparse.FileType('rb'),
help='file to parse (or - for STDIN)')
return parser.parse_args(argv)
|
Sets up the ArgumentParser.
Args:
argv: an array of arguments
|
def get_ordering(self, request, queryset, view):
"""Return an ordering for a given request.
DRF expects a comma separated list, while DREST expects an array.
This method overwrites the DRF default so it can parse the array.
"""
params = view.get_request_feature(view.SORT)
if params:
fields = [param.strip() for param in params]
valid_ordering, invalid_ordering = self.remove_invalid_fields(
queryset, fields, view
)
# if any of the sort fields are invalid, throw an error.
# else return the ordering
if invalid_ordering:
raise ValidationError(
"Invalid filter field: %s" % invalid_ordering
)
else:
return valid_ordering
# No sorting was included
return self.get_default_ordering(view)
|
Return an ordering for a given request.
DRF expects a comma separated list, while DREST expects an array.
This method overwrites the DRF default so it can parse the array.
|
def type(self, name: str):
"""return the first complete definition of type 'name'"""
for f in self.body:
if (hasattr(f, '_ctype')
and f._ctype._storage == Storages.TYPEDEF
and f._name == name):
return f
|
return the first complete definition of type 'name
|
def _try_cast(self, result, obj, numeric_only=False):
"""
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
"""
if obj.ndim > 1:
dtype = obj._values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if is_datetime64tz_dtype(dtype):
# GH 23683
# Prior results _may_ have been generated in UTC.
# Ensure we localize to UTC first before converting
# to the target timezone
try:
result = obj._values._from_sequence(
result, dtype='datetime64[ns, UTC]'
)
result = result.astype(dtype)
except TypeError:
# _try_cast was called at a point where the result
# was already tz-aware
pass
elif is_extension_array_dtype(dtype):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
result = obj._values._from_sequence(result, dtype=dtype)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
|
Try to cast the result to our obj original type,
we may have roundtripped through object in the mean-time.
If numeric_only is True, then only try to cast numerics
and not datetimelikes.
|
def get_page_url_title(self):
'''
Get the title and current url from the remote session.
Return is a 2-tuple: (page_title, page_url).
'''
cr_tab_id = self.transport._get_cr_tab_meta_for_key(self.tab_id)['id']
targets = self.Target_getTargets()
assert 'result' in targets
assert 'targetInfos' in targets['result']
for tgt in targets['result']['targetInfos']:
if tgt['targetId'] == cr_tab_id:
# {
# 'title': 'Page Title 1',
# 'targetId': '9d2c503c-e39e-42cc-b950-96db073918ee',
# 'attached': True,
# 'url': 'http://localhost:47181/with_title_1',
# 'type': 'page'
# }
title = tgt['title']
cur_url = tgt['url']
return title, cur_url
|
Get the title and current url from the remote session.
Return is a 2-tuple: (page_title, page_url).
|
def get_staged_signatures(vcs):
"""Get the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
Returns:
list(basestring) - list of signatures
"""
staged_path = _get_staged_history_path(vcs)
known_signatures = []
if os.path.exists(staged_path):
with open(staged_path, 'r') as f:
known_signatures = f.read().split()
return known_signatures
|
Get the list of staged signatures
Args:
vcs (easyci.vcs.base.Vcs)
Returns:
list(basestring) - list of signatures
|
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in xrange(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in xrange(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError, 'unknown tag %r' % (tag,)
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
|
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print ''.join(results),
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
|
def lxqstr(string, qchar, first):
"""
Lex (scan) a quoted string.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lxqstr_c.html
:param string: String to be scanned.
:type string: str
:param qchar: Quote delimiter character.
:type qchar: char (string of one char)
:param first: Character position at which to start scanning.
:type first: int
:return: last and nchar
:rtype: tuple
"""
string = stypes.stringToCharP(string)
qchar = ctypes.c_char(qchar.encode(encoding='UTF-8'))
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lxqstr_c(string, qchar, first, ctypes.byref(last),
ctypes.byref(nchar))
return last.value, nchar.value
|
Lex (scan) a quoted string.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lxqstr_c.html
:param string: String to be scanned.
:type string: str
:param qchar: Quote delimiter character.
:type qchar: char (string of one char)
:param first: Character position at which to start scanning.
:type first: int
:return: last and nchar
:rtype: tuple
|
def validate(func):
"""
Check if annotated function arguments validate according to spec
"""
call = PythonCall(func)
@wraps(func)
def decorator(*args, **kwargs):
parameters = call.bind(args, kwargs)
for arg_name, validator in func.__annotations__.items():
if not validator(parameters[arg_name]):
raise TypeError(
"Argument {!r} failed to validate".format(arg_name))
return call.apply(args, kwargs)
return decorator
|
Check if annotated function arguments validate according to spec
|
def bgp_summary_parser(bgp_summary):
"""Parse 'show bgp all summary vrf' output information from NX-OS devices."""
bgp_summary_dict = {}
# Check for BGP summary information lines that have no data
if len(bgp_summary.strip().splitlines()) <= 1:
return {}
allowed_afi = ["ipv4", "ipv6", "l2vpn"]
vrf_regex = r"^BGP summary information for VRF\s+(?P<vrf>\S+),"
afi_regex = (
r"^BGP summary information.*address family (?P<afi>\S+ (?:Unicast|EVPN))"
)
local_router_regex = (
r"^BGP router identifier\s+(?P<router_id>\S+)"
r",\s+local AS number\s+(?P<local_as>\S+)"
)
for pattern in [vrf_regex, afi_regex, local_router_regex]:
match = re.search(pattern, bgp_summary, flags=re.M)
if match:
bgp_summary_dict.update(match.groupdict(1))
# Some post regex cleanup and validation
vrf = bgp_summary_dict["vrf"]
if vrf.lower() == "default":
bgp_summary_dict["vrf"] = "global"
afi = bgp_summary_dict["afi"]
afi = afi.split()[0].lower()
if afi not in allowed_afi:
raise ValueError("AFI ({}) is invalid and not supported.".format(afi))
bgp_summary_dict["afi"] = afi
local_as = bgp_summary_dict["local_as"]
local_as = helpers.as_number(local_as)
match = re.search(IPV4_ADDR_REGEX, bgp_summary_dict["router_id"])
if not match:
raise ValueError(
"BGP router_id ({}) is not valid".format(bgp_summary_dict["router_id"])
)
vrf = bgp_summary_dict["vrf"]
bgp_return_dict = {vrf: {"router_id": bgp_summary_dict["router_id"], "peers": {}}}
# Extract and process the tabular data
tabular_divider = r"^Neighbor\s+.*PfxRcd$"
tabular_data = re.split(tabular_divider, bgp_summary, flags=re.M)
if len(tabular_data) != 2:
msg = "Unexpected data processing BGP summary information:\n\n{}".format(
bgp_summary
)
raise ValueError(msg)
tabular_data = tabular_data[1]
bgp_table = bgp_normalize_table_data(tabular_data)
for bgp_entry in bgp_table_parser(bgp_table):
bgp_return_dict[vrf]["peers"].update(bgp_entry)
bgp_new_dict = {}
for neighbor, bgp_data in bgp_return_dict[vrf]["peers"].items():
received_prefixes = bgp_data.pop("received_prefixes")
bgp_data["address_family"] = {}
prefixes_dict = {
"sent_prefixes": -1,
"accepted_prefixes": -1,
"received_prefixes": received_prefixes,
}
bgp_data["address_family"][afi] = prefixes_dict
bgp_data["local_as"] = local_as
# FIX, hard-coding
bgp_data["remote_id"] = "0.0.0.0"
bgp_new_dict[neighbor] = bgp_data
bgp_return_dict[vrf]["peers"] = bgp_new_dict
return bgp_return_dict
|
Parse 'show bgp all summary vrf' output information from NX-OS devices.
|
def pivot_query_as_matrix(facet=None, facet_pivot_fields=None, **kwargs):
"""
Pivot query
"""
if facet_pivot_fields is None:
facet_pivot_fields = []
logging.info("Additional args: {}".format(kwargs))
fp = search_associations(rows=0,
facet_fields=[facet],
facet_pivot_fields=facet_pivot_fields,
**kwargs)['facet_pivot']
# we assume only one
results = list(fp.items())[0][1]
tups = []
xtype=None
ytype=None
xlabels=set()
ylabels=set()
for r in results:
logging.info("R={}".format(r))
xtype=r['field']
rv = r['value']
xlabels.add(rv)
for piv in r['pivot']:
ytype=piv['field']
pv = piv['value']
ylabels.add(pv)
tups.append( (rv,pv,piv['count']) )
z = [ [0] * len(xlabels) for i1 in range(len(ylabels)) ]
xlabels=list(xlabels)
ylabels=list(ylabels)
xmap = dict([x[::-1] for x in enumerate(xlabels)])
ymap = dict([x[::-1] for x in enumerate(ylabels)])
for t in tups:
z[ymap[t[1]]][xmap[t[0]]] = t[2]
m = {'xtype':xtype,
'ytype':ytype,
'xaxis':xlabels,
'yaxis':ylabels,
'z':z}
return m
|
Pivot query
|
def gpu_a_trous():
"""
Simple convenience function so that the a trous kernels can be easily accessed by any function.
"""
ker1 = SourceModule("""
__global__ void gpu_a_trous_row_kernel(float *in1, float *in2, float *wfil, int *scale)
{
const int len = gridDim.x*blockDim.x;
const int col = (blockDim.x * blockIdx.x + threadIdx.x);
const int i = col;
const int row = (blockDim.y * blockIdx.y + threadIdx.y);
const int j = row*len;
const int tid2 = i + j;
const int lstp = exp2(float(scale[0] + 1));
const int sstp = exp2(float(scale[0]));
in2[tid2] = wfil[2]*in1[tid2];
if (row < lstp)
{ in2[tid2] += wfil[0]*in1[col + len*(lstp - row - 1)]; }
else
{ in2[tid2] += wfil[0]*in1[tid2 - lstp*len]; }
if (row < sstp)
{ in2[tid2] += wfil[1]*in1[col + len*(sstp - row - 1)]; }
else
{ in2[tid2] += wfil[1]*in1[tid2 - sstp*len]; }
if (row >= (len - sstp))
{ in2[tid2] += wfil[3]*in1[col + len*(2*len - row - sstp - 1)]; }
else
{ in2[tid2] += wfil[3]*in1[tid2 + sstp*len]; }
if (row >= (len - lstp))
{ in2[tid2] += wfil[4]*in1[col + len*(2*len - row - lstp - 1)]; }
else
{ in2[tid2] += wfil[4]*in1[tid2 + lstp*len]; }
}
""", keep=True)
ker2 = SourceModule("""
__global__ void gpu_a_trous_col_kernel(float *in1, float *in2, float *wfil, int *scale)
{
const int len = gridDim.x*blockDim.x;
const int col = (blockDim.x * blockIdx.x + threadIdx.x);
const int i = col;
const int row = (blockDim.y * blockIdx.y + threadIdx.y);
const int j = row*len;
const int tid2 = i + j;
const int lstp = exp2(float(scale[0] + 1));
const int sstp = exp2(float(scale[0]));
in2[tid2] = wfil[2]*in1[tid2];
if (col < lstp)
{ in2[tid2] += wfil[0]*in1[j - col + lstp - 1]; }
else
{ in2[tid2] += wfil[0]*in1[tid2 - lstp]; }
if (col < sstp)
{ in2[tid2] += wfil[1]*in1[j - col + sstp - 1]; }
else
{ in2[tid2] += wfil[1]*in1[tid2 - sstp]; }
if (col >= (len - sstp))
{ in2[tid2] += wfil[3]*in1[j + 2*len - sstp - col - 1]; }
else
{ in2[tid2] += wfil[3]*in1[tid2 + sstp]; }
if (col >= (len - lstp))
{ in2[tid2] += wfil[4]*in1[j + 2*len - lstp - col - 1]; }
else
{ in2[tid2] += wfil[4]*in1[tid2 + lstp]; }
}
""", keep=True)
return ker1.get_function("gpu_a_trous_row_kernel"), ker2.get_function("gpu_a_trous_col_kernel")
|
Simple convenience function so that the a trous kernels can be easily accessed by any function.
|
def get_artist(self, object_id, relation=None, **kwargs):
"""
Get the artist with the provided id
:returns: an :class:`~deezer.resources.Artist` object
"""
return self.get_object("artist", object_id, relation=relation, **kwargs)
|
Get the artist with the provided id
:returns: an :class:`~deezer.resources.Artist` object
|
def connect(uri):
"""
Connects to an nREPL endpoint identified by the given URL/URI. Valid
examples include:
nrepl://192.168.0.12:7889
telnet://localhost:5000
http://your-app-name.heroku.com/repl
This fn delegates to another looked up in that dispatches on the scheme of
the URI provided (which can be a string or java.net.URI). By default, only
`nrepl` (corresponding to using the default bencode transport) is
supported. Alternative implementations may add support for other schemes,
such as http/https, JMX, various message queues, etc.
"""
#
uri = uri if isinstance(uri, ParseResult) else urlparse(uri)
if not uri.scheme:
raise ValueError("uri has no scheme: " + uri)
f = _connect_fns.get(uri.scheme.lower(), None)
if not f:
err = "No connect function registered for scheme `%s`" % uri.scheme
raise Exception(err)
return f(uri)
|
Connects to an nREPL endpoint identified by the given URL/URI. Valid
examples include:
nrepl://192.168.0.12:7889
telnet://localhost:5000
http://your-app-name.heroku.com/repl
This fn delegates to another looked up in that dispatches on the scheme of
the URI provided (which can be a string or java.net.URI). By default, only
`nrepl` (corresponding to using the default bencode transport) is
supported. Alternative implementations may add support for other schemes,
such as http/https, JMX, various message queues, etc.
|
def validate_submit_args_or_fail(job_descriptor, provider_name, input_providers,
output_providers, logging_providers):
"""Validate that arguments passed to submit_job have valid file providers.
This utility function takes resources and task data args from `submit_job`
in the base provider. This function will fail with a value error if any of the
parameters are not valid. See the following example;
>>> job_resources = type('', (object,),
... {"logging": job_model.LoggingParam('gs://logtemp', job_model.P_GCS)})()
>>> job_params={'inputs': set(), 'outputs': set(), 'mounts': set()}
>>> task_descriptors = [
... job_model.TaskDescriptor(None, {
... 'inputs': {
... job_model.FileParam('IN', uri='gs://in/*',
... file_provider=job_model.P_GCS)},
... 'outputs': set()}, None),
... job_model.TaskDescriptor(None, {
... 'inputs': set(),
... 'outputs': {
... job_model.FileParam('OUT', uri='gs://out/*',
... file_provider=job_model.P_GCS)}}, None)]
...
>>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,
... job_resources, task_descriptors),
... provider_name='MYPROVIDER',
... input_providers=[job_model.P_GCS],
... output_providers=[job_model.P_GCS],
... logging_providers=[job_model.P_GCS])
...
>>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,
... job_resources, task_descriptors),
... provider_name='MYPROVIDER',
... input_providers=[job_model.P_GCS],
... output_providers=[job_model.P_LOCAL],
... logging_providers=[job_model.P_GCS])
Traceback (most recent call last):
...
ValueError: Unsupported output path (gs://out/*) for provider 'MYPROVIDER'.
Args:
job_descriptor: instance of job_model.JobDescriptor.
provider_name: (str) the name of the execution provider.
input_providers: (string collection) whitelist of file providers for input.
output_providers: (string collection) whitelist of providers for output.
logging_providers: (string collection) whitelist of providers for logging.
Raises:
ValueError: if any file providers do not match the whitelists.
"""
job_resources = job_descriptor.job_resources
job_params = job_descriptor.job_params
task_descriptors = job_descriptor.task_descriptors
# Validate logging file provider.
_validate_providers([job_resources.logging], 'logging', logging_providers,
provider_name)
# Validate job input and output file providers
_validate_providers(job_params['inputs'], 'input', input_providers,
provider_name)
_validate_providers(job_params['outputs'], 'output', output_providers,
provider_name)
# Validate input and output file providers.
for task_descriptor in task_descriptors:
_validate_providers(task_descriptor.task_params['inputs'], 'input',
input_providers, provider_name)
_validate_providers(task_descriptor.task_params['outputs'], 'output',
output_providers, provider_name)
|
Validate that arguments passed to submit_job have valid file providers.
This utility function takes resources and task data args from `submit_job`
in the base provider. This function will fail with a value error if any of the
parameters are not valid. See the following example;
>>> job_resources = type('', (object,),
... {"logging": job_model.LoggingParam('gs://logtemp', job_model.P_GCS)})()
>>> job_params={'inputs': set(), 'outputs': set(), 'mounts': set()}
>>> task_descriptors = [
... job_model.TaskDescriptor(None, {
... 'inputs': {
... job_model.FileParam('IN', uri='gs://in/*',
... file_provider=job_model.P_GCS)},
... 'outputs': set()}, None),
... job_model.TaskDescriptor(None, {
... 'inputs': set(),
... 'outputs': {
... job_model.FileParam('OUT', uri='gs://out/*',
... file_provider=job_model.P_GCS)}}, None)]
...
>>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,
... job_resources, task_descriptors),
... provider_name='MYPROVIDER',
... input_providers=[job_model.P_GCS],
... output_providers=[job_model.P_GCS],
... logging_providers=[job_model.P_GCS])
...
>>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,
... job_resources, task_descriptors),
... provider_name='MYPROVIDER',
... input_providers=[job_model.P_GCS],
... output_providers=[job_model.P_LOCAL],
... logging_providers=[job_model.P_GCS])
Traceback (most recent call last):
...
ValueError: Unsupported output path (gs://out/*) for provider 'MYPROVIDER'.
Args:
job_descriptor: instance of job_model.JobDescriptor.
provider_name: (str) the name of the execution provider.
input_providers: (string collection) whitelist of file providers for input.
output_providers: (string collection) whitelist of providers for output.
logging_providers: (string collection) whitelist of providers for logging.
Raises:
ValueError: if any file providers do not match the whitelists.
|
def deserialize(cls, data, content_type=None):
"""Parse a str using the RestAPI syntax and return a model.
:param str data: A str using RestAPI structure. JSON by default.
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
"""
deserializer = Deserializer(cls._infer_class_models())
return deserializer(cls.__name__, data, content_type=content_type)
|
Parse a str using the RestAPI syntax and return a model.
:param str data: A str using RestAPI structure. JSON by default.
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
|
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
|
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
|
def get_recent_matches(self, card_type="micro_card"):
"""
Calling the Recent Matches API.
Arg:
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card.
Return:
json data
"""
recent_matches_url = self.api_path + "recent_matches/"
params = {}
params["card_type"] = card_type
response = self.get_response(recent_matches_url, params)
return response
|
Calling the Recent Matches API.
Arg:
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card.
Return:
json data
|
def _kl_divergence(self, other_locs, other_weights, kernel=None, delta=1e-2):
"""
Finds the KL divergence between this and another particle
distribution by using a kernel density estimator to smooth over the
other distribution's particles.
"""
if kernel is None:
kernel = st.norm(loc=0, scale=1).pdf
dist = rescaled_distance_mtx(self, other_locs) / delta
K = kernel(dist)
return -self.est_entropy() - (1 / delta) * np.sum(
self.particle_weights *
np.log(
np.sum(
other_weights * K,
axis=1 # Sum over the particles of ``other``.
)
),
axis=0 # Sum over the particles of ``self``.
)
|
Finds the KL divergence between this and another particle
distribution by using a kernel density estimator to smooth over the
other distribution's particles.
|
def extract_tag_metadata(self, el):
"""Extract meta data."""
if self.type == 'odp':
if el.namespace and el.namespace == self.namespaces['draw'] and el.name == 'page-thumbnail':
name = el.attrs.get('draw:page-number', '')
self.additional_context = 'slide{}:'.format(name)
super().extract_tag_metadata(el)
|
Extract meta data.
|
def forwards(self, orm):
"Write your forwards methods here."
for doc in orm['document_library.Document'].objects.all():
for title in doc.documenttitle_set.all():
title.is_published = doc.is_published
title.save()
|
Write your forwards methods here.
|
def filter_sequences(self, seq_type):
"""Return a DictList of only specified types in the sequences attribute.
Args:
seq_type (SeqProp): Object type
Returns:
DictList: A filtered DictList of specified object type only
"""
return DictList(x for x in self.sequences if isinstance(x, seq_type))
|
Return a DictList of only specified types in the sequences attribute.
Args:
seq_type (SeqProp): Object type
Returns:
DictList: A filtered DictList of specified object type only
|
def is_literal_or_name(value):
"""Return True if value is a literal or a name."""
try:
ast.literal_eval(value)
return True
except (SyntaxError, ValueError):
pass
if value.strip() in ['dict()', 'list()', 'set()']:
return True
# Support removal of variables on the right side. But make sure
# there are no dots, which could mean an access of a property.
return re.match(r'^\w+\s*$', value)
|
Return True if value is a literal or a name.
|
def file_data_to_str(data):
"""
Convert file data to a string for display.
This function takes the file data produced by gather_file_data().
"""
if not data:
return _('<i>File name not recorded</i>')
res = data['name']
try:
mtime_as_str = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(data['mtime']))
res += '<br><i>{}</i>: {}'.format(_('Last modified'), mtime_as_str)
res += '<br><i>{}</i>: {} {}'.format(
_('Size'), data['size'], _('bytes'))
except KeyError:
res += '<br>' + _('<i>File no longer exists</i>')
return res
|
Convert file data to a string for display.
This function takes the file data produced by gather_file_data().
|
def makeEndOfPrdvFuncCond(self):
'''
Construct the end-of-period value function conditional on next period's
state. NOTE: It might be possible to eliminate this method and replace
it with ConsIndShockSolver.makeEndOfPrdvFunc, but the self.X_cond
variables must be renamed.
Parameters
----------
none
Returns
-------
EndofPrdvFunc_cond : ValueFunc
The end-of-period value function conditional on a particular state
occuring in the next period.
'''
VLvlNext = (self.PermShkVals_temp**(1.0-self.CRRA)*
self.PermGroFac**(1.0-self.CRRA))*self.vFuncNext(self.mNrmNext)
EndOfPrdv_cond = self.DiscFacEff*np.sum(VLvlNext*self.ShkPrbs_temp,axis=0)
EndOfPrdvNvrs_cond = self.uinv(EndOfPrdv_cond)
EndOfPrdvNvrsP_cond = self.EndOfPrdvP_cond*self.uinvP(EndOfPrdv_cond)
EndOfPrdvNvrs_cond = np.insert(EndOfPrdvNvrs_cond,0,0.0)
EndOfPrdvNvrsP_cond = np.insert(EndOfPrdvNvrsP_cond,0,EndOfPrdvNvrsP_cond[0])
aNrm_temp = np.insert(self.aNrm_cond,0,self.BoroCnstNat)
EndOfPrdvNvrsFunc_cond = CubicInterp(aNrm_temp,EndOfPrdvNvrs_cond,EndOfPrdvNvrsP_cond)
EndofPrdvFunc_cond = ValueFunc(EndOfPrdvNvrsFunc_cond,self.CRRA)
return EndofPrdvFunc_cond
|
Construct the end-of-period value function conditional on next period's
state. NOTE: It might be possible to eliminate this method and replace
it with ConsIndShockSolver.makeEndOfPrdvFunc, but the self.X_cond
variables must be renamed.
Parameters
----------
none
Returns
-------
EndofPrdvFunc_cond : ValueFunc
The end-of-period value function conditional on a particular state
occuring in the next period.
|
def API520_W(Pset, Pback):
r'''Calculates capacity correction due to backpressure on balanced
spring-loaded PRVs in liquid service. For pilot operated valves,
this is always 1. Applicable up to 50% of the percent gauge backpressure,
For use in API 520 relief valve sizing. 1D interpolation among a table with
53 backpressures is performed.
Parameters
----------
Pset : float
Set pressure for relief [Pa]
Pback : float
Backpressure, [Pa]
Returns
-------
KW : float
Correction due to liquid backpressure [-]
Notes
-----
If the calculated gauge backpressure is less than 15%, a value of 1 is
returned.
Examples
--------
Custom example from figure 31:
>>> API520_W(1E6, 3E5) # 22% overpressure
0.9511471848008564
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
gauge_backpressure = (Pback-atm)/(Pset-atm)*100.0 # in percent
if gauge_backpressure < 15.0:
return 1.0
return interp(gauge_backpressure, Kw_x, Kw_y)
|
r'''Calculates capacity correction due to backpressure on balanced
spring-loaded PRVs in liquid service. For pilot operated valves,
this is always 1. Applicable up to 50% of the percent gauge backpressure,
For use in API 520 relief valve sizing. 1D interpolation among a table with
53 backpressures is performed.
Parameters
----------
Pset : float
Set pressure for relief [Pa]
Pback : float
Backpressure, [Pa]
Returns
-------
KW : float
Correction due to liquid backpressure [-]
Notes
-----
If the calculated gauge backpressure is less than 15%, a value of 1 is
returned.
Examples
--------
Custom example from figure 31:
>>> API520_W(1E6, 3E5) # 22% overpressure
0.9511471848008564
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
|
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.sosfilt(self.sos,x)
y = ssd.downsample(y,M_change)
return y
|
Downsample and filter the signal
|
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
It does not copy the extra Params into the subModels.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = self.avgMetrics
subModels = self.subModels
return CrossValidatorModel(bestModel, avgMetrics, subModels)
|
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
It does not copy the extra Params into the subModels.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
|
def get_node_at_path(query_path, context):
"""Return the SqlNode associated with the query path."""
if query_path not in context.query_path_to_node:
raise AssertionError(
u'Unable to find SqlNode for query path {} with context {}.'.format(
query_path, context))
node = context.query_path_to_node[query_path]
return node
|
Return the SqlNode associated with the query path.
|
def draw(canvas, mol):
"""Draw molecule structure image.
Args:
canvas: draw.drawable.Drawable
mol: model.graphmol.Compound
"""
mol.require("ScaleAndCenter")
mlb = mol.size2d[2]
if not mol.atom_count():
return
bond_type_fn = {
1: {
0: single_bond,
1: wedged_single,
2: dashed_wedged_single,
3: wave_single,
}, 2: {
0: cw_double,
1: counter_cw_double,
2: double_bond,
3: cross_double
}, 3: {
0: triple_bond
}
}
# Draw bonds
for u, v, bond in mol.bonds_iter():
if not bond.visible:
continue
if (u < v) == bond.is_lower_first:
f, s = (u, v)
else:
s, f = (u, v)
p1 = mol.atom(f).coords
p2 = mol.atom(s).coords
if p1 == p2:
continue # avoid zero division
if mol.atom(f).visible:
p1 = gm.t_seg(p1, p2, F_AOVL, 2)[0]
if mol.atom(s).visible:
p2 = gm.t_seg(p1, p2, F_AOVL, 1)[1]
color1 = mol.atom(f).color
color2 = mol.atom(s).color
bond_type_fn[bond.order][bond.type](
canvas, p1, p2, color1, color2, mlb)
# Draw atoms
for n, atom in mol.atoms_iter():
if not atom.visible:
continue
p = atom.coords
color = atom.color
# Determine text direction
if atom.H_count:
cosnbrs = []
hrzn = (p[0] + 1, p[1])
for nbr in mol.graph.neighbors(n):
pnbr = mol.atom(nbr).coords
try:
cosnbrs.append(gm.dot_product(hrzn, pnbr, p) /
gm.distance(p, pnbr))
except ZeroDivisionError:
pass
if not cosnbrs or min(cosnbrs) > 0:
# [atom]< or isolated node(ex. H2O, HCl)
text = atom.formula_html(True)
canvas.draw_text(p, text, color, "right")
continue
elif max(cosnbrs) < 0:
# >[atom]
text = atom.formula_html()
canvas.draw_text(p, text, color, "left")
continue
# -[atom]- or no hydrogens
text = atom.formula_html()
canvas.draw_text(p, text, color, "center")
|
Draw molecule structure image.
Args:
canvas: draw.drawable.Drawable
mol: model.graphmol.Compound
|
def append(self, filename_in_zip, file_contents):
'''
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
'''
# Set the file pointer to the end of the file
self.in_memory_zip.seek(-1, io.SEEK_END)
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
# Close the ZipFile
zf.close()
# Rewind the file
self.in_memory_zip.seek(0)
return self
|
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
|
def _replace_global_vars(xs, global_vars):
"""Replace globally shared names from input header with value.
The value of the `algorithm` item may be a pointer to a real
file specified in the `global` section. If found, replace with
the full value.
"""
if isinstance(xs, (list, tuple)):
return [_replace_global_vars(x) for x in xs]
elif isinstance(xs, dict):
final = {}
for k, v in xs.items():
if isinstance(v, six.string_types) and v in global_vars:
v = global_vars[v]
final[k] = v
return final
else:
return xs
|
Replace globally shared names from input header with value.
The value of the `algorithm` item may be a pointer to a real
file specified in the `global` section. If found, replace with
the full value.
|
def sample_name(in_bam):
"""Get sample name from BAM file.
"""
with pysam.AlignmentFile(in_bam, "rb", check_sq=False) as in_pysam:
try:
if "RG" in in_pysam.header:
return in_pysam.header["RG"][0]["SM"]
except ValueError:
return None
|
Get sample name from BAM file.
|
def get_assessment_taken_bank_assignment_session(self, proxy):
"""Gets the session for assigning taken assessments to bank mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentTakenBankAssignmentSession) -
an ``AssessmentTakenBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_taken_bank_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_taken_bank_assignment()`` is ``true``.*
"""
if not self.supports_assessment_taken_bank_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentTakenBankAssignmentSession(proxy=proxy, runtime=self._runtime)
|
Gets the session for assigning taken assessments to bank mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentTakenBankAssignmentSession) -
an ``AssessmentTakenBankAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_taken_bank_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_taken_bank_assignment()`` is ``true``.*
|
def set_archive_layout(self, archive_id, layout_type, stylesheet=None):
"""
Use this method to change the layout of videos in an OpenTok archive
:param String archive_id: The ID of the archive that will be updated
:param String layout_type: The layout type for the archive. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
"""
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.set_archive_layout_url(archive_id)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise ArchiveError('Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code)
|
Use this method to change the layout of videos in an OpenTok archive
:param String archive_id: The ID of the archive that will be updated
:param String layout_type: The layout type for the archive. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom'
|
def _handle_next_export_subtask(self, export_state=None):
"""
Process the next export sub-task, if there is one.
:param ExportState export_state:
If provided, this is used instead of the database queue, in effect directing the exporter to process the
previous export again. This is used to avoid having to query the database when we know already what needs
to be done. It also maintains a cache of the entity so we don't have to re-acquire it on multiple exports.
:return:
A :class:`meteorpi_db.exporter.MeteorExporter.ExportStateCache` representing the state of the export, or
None if there was nothing to do.
"""
# Use a cached state, or generate a new one if required
if export_state is None or export_state.export_task is None:
export = self.db.get_next_entity_to_export()
if export is not None:
export_state = self.ExportState(export_task=export)
else:
return None
try:
auth = (export_state.export_task.target_user,
export_state.export_task.target_password)
target_url = export_state.export_task.target_url
response = post(url=target_url, verify=False,
json=export_state.entity_dict,
auth=auth)
response.raise_for_status()
json = response.json()
state = json['state']
if state == 'complete':
return export_state.fully_processed()
elif state == 'need_file_data':
file_id = json['file_id']
file_record = self.db.get_file(repository_fname=file_id)
if file_record is None:
return export_state.failed()
with open(self.db.file_path_for_id(file_id), 'rb') as file_content:
multi = MultipartEncoder(fields={'file': ('file', file_content, file_record.mime_type)})
post(url="{0}/data/{1}/{2}".format(target_url, file_id, file_record.file_md5),
data=multi, verify=False,
headers={'Content-Type': multi.content_type},
auth=auth)
return export_state.partially_processed()
elif state == 'continue':
return export_state.partially_processed()
else:
return export_state.confused()
except HTTPError:
traceback.print_exc()
return export_state.failed()
except ConnectionError:
traceback.print_exc()
return export_state.failed()
|
Process the next export sub-task, if there is one.
:param ExportState export_state:
If provided, this is used instead of the database queue, in effect directing the exporter to process the
previous export again. This is used to avoid having to query the database when we know already what needs
to be done. It also maintains a cache of the entity so we don't have to re-acquire it on multiple exports.
:return:
A :class:`meteorpi_db.exporter.MeteorExporter.ExportStateCache` representing the state of the export, or
None if there was nothing to do.
|
def exec_rabbitmqctl(self, command, args=[], rabbitmqctl_opts=['-q']):
"""
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
cmd = ['rabbitmqctl'] + rabbitmqctl_opts + [command] + args
return self.inner().exec_run(cmd)
|
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
|
def GetChildClassId(self, classId):
"""
Method extracts and returns the child object list same as the given classId
"""
childList = []
for ch in self.child:
if ch.classId.lower() == classId.lower():
childList.append(ch)
return childList
|
Method extracts and returns the child object list same as the given classId
|
def is_possible_number(numobj):
"""Convenience wrapper around is_possible_number_with_reason.
Instead of returning the reason for failure, this method returns true if
the number is either a possible fully-qualified number (containing the area
code and country code), or if the number could be a possible local number
(with a country code, but missing an area code). Local numbers are
considered possible if they could be possibly dialled in this format: if
the area code is needed for a call to connect, the number is not considered
possible without it.
Arguments:
numobj -- the number object that needs to be checked
Returns True if the number is possible
"""
result = is_possible_number_with_reason(numobj)
return (result == ValidationResult.IS_POSSIBLE or
result == ValidationResult.IS_POSSIBLE_LOCAL_ONLY)
|
Convenience wrapper around is_possible_number_with_reason.
Instead of returning the reason for failure, this method returns true if
the number is either a possible fully-qualified number (containing the area
code and country code), or if the number could be a possible local number
(with a country code, but missing an area code). Local numbers are
considered possible if they could be possibly dialled in this format: if
the area code is needed for a call to connect, the number is not considered
possible without it.
Arguments:
numobj -- the number object that needs to be checked
Returns True if the number is possible
|
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.lingeling:
pysolvers.lingeling_setphases(self.lingeling, literals)
|
Sets polarities of a given list of variables.
|
def edit_securitygroup(self, group_id, name=None, description=None):
"""Edit security group details.
:param int group_id: The ID of the security group
:param string name: The name of the security group
:param string description: The description of the security group
"""
successful = False
obj = {}
if name:
obj['name'] = name
if description:
obj['description'] = description
if obj:
successful = self.security_group.editObject(obj, id=group_id)
return successful
|
Edit security group details.
:param int group_id: The ID of the security group
:param string name: The name of the security group
:param string description: The description of the security group
|
def get_block_hash(self, height, id=None, endpoint=None):
"""
Get hash of a block by its height
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_BLOCK_HASH, params=[height], id=id, endpoint=endpoint)
|
Get hash of a block by its height
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
|
def create(self, Name, Subject, HtmlBody=None, TextBody=None, Alias=None):
"""
Creates a template.
:param Name: Name of template
:param Subject: The content to use for the Subject when this template is used to send email.
:param HtmlBody: The content to use for the HtmlBody when this template is used to send email.
:param TextBody: The content to use for the HtmlBody when this template is used to send email.
:return:
"""
assert TextBody or HtmlBody, "Provide either email TextBody or HtmlBody or both"
data = {"Name": Name, "Subject": Subject, "HtmlBody": HtmlBody, "TextBody": TextBody, "Alias": Alias}
return self._init_instance(self.call("POST", "/templates", data=data))
|
Creates a template.
:param Name: Name of template
:param Subject: The content to use for the Subject when this template is used to send email.
:param HtmlBody: The content to use for the HtmlBody when this template is used to send email.
:param TextBody: The content to use for the HtmlBody when this template is used to send email.
:return:
|
def start_continuous(self, aichans, update_hz=10):
"""Begins a continuous analog generation, calling a provided function
at a rate of 10Hz
:param aichans: name of channel(s) to record (analog input) from
:type aichans: list<str>
:param update_hz: Rate (Hz) at which to read data from the device input buffer
:type update_hz: int
"""
self.daq_lock.acquire()
self.ngenerated = 0 # number of stimuli presented during chart run
npts = int(self.aifs/update_hz) #update display at 10Hz rate
nchans = len(aichans)
self.aitask = AITask(aichans, self.aifs, npts*5*nchans)
self.aitask.register_callback(self._read_continuous, npts)
self.aitask.start()
|
Begins a continuous analog generation, calling a provided function
at a rate of 10Hz
:param aichans: name of channel(s) to record (analog input) from
:type aichans: list<str>
:param update_hz: Rate (Hz) at which to read data from the device input buffer
:type update_hz: int
|
def update():
'''
When we are asked to update (regular interval) lets reap the cache
'''
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'roots', 'hash'),
find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass
mtime_map_path = os.path.join(__opts__['cachedir'], 'roots', 'mtime_map')
# data to send on event
data = {'changed': False,
'files': {'changed': []},
'backend': 'roots'}
# generate the new map
new_mtime_map = salt.fileserver.generate_mtime_map(__opts__, __opts__['file_roots'])
old_mtime_map = {}
# if you have an old map, load that
if os.path.exists(mtime_map_path):
with salt.utils.files.fopen(mtime_map_path, 'rb') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
try:
file_path, mtime = line.replace('\n', '').split(':', 1)
old_mtime_map[file_path] = mtime
if mtime != new_mtime_map.get(file_path, mtime):
data['files']['changed'].append(file_path)
except ValueError:
# Document the invalid entry in the log
log.warning(
'Skipped invalid cache mtime entry in %s: %s',
mtime_map_path, line
)
# compare the maps, set changed to the return value
data['changed'] = salt.fileserver.diff_mtime_map(old_mtime_map, new_mtime_map)
# compute files that were removed and added
old_files = set(old_mtime_map.keys())
new_files = set(new_mtime_map.keys())
data['files']['removed'] = list(old_files - new_files)
data['files']['added'] = list(new_files - old_files)
# write out the new map
mtime_map_path_dir = os.path.dirname(mtime_map_path)
if not os.path.exists(mtime_map_path_dir):
os.makedirs(mtime_map_path_dir)
with salt.utils.files.fopen(mtime_map_path, 'wb') as fp_:
for file_path, mtime in six.iteritems(new_mtime_map):
fp_.write(
salt.utils.stringutils.to_bytes(
'{0}:{1}\n'.format(file_path, mtime)
)
)
if __opts__.get('fileserver_events', False):
# if there is a change, fire an event
event = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=False)
event.fire_event(data,
salt.utils.event.tagify(['roots', 'update'], prefix='fileserver'))
|
When we are asked to update (regular interval) lets reap the cache
|
def extract_name_from_job_arn(arn):
"""Returns the name used in the API given a full ARN for a training job
or hyperparameter tuning job.
"""
slash_pos = arn.find('/')
if slash_pos == -1:
raise ValueError("Cannot parse invalid ARN: %s" % arn)
return arn[(slash_pos + 1):]
|
Returns the name used in the API given a full ARN for a training job
or hyperparameter tuning job.
|
def extract_spans(html_string):
"""
Creates a list of the spanned cell groups of [row, column] pairs.
Parameters
----------
html_string : str
Returns
-------
list of lists of lists of int
"""
try:
from bs4 import BeautifulSoup
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return []
trs = table.findAll('tr')
if len(trs) == 0:
return []
spans = []
for tr in range(len(trs)):
if tr == 0:
ths = trs[tr].findAll('th')
if len(ths) == 0:
ths = trs[tr].findAll('td')
tds = ths
else:
tds = trs[tr].findAll('td')
column = 0
for td in tds:
r_span_count = 1
c_span_count = 1
current_column = column
if td.has_attr('rowspan'):
r_span_count = int(td['rowspan'])
if td.has_attr('colspan'):
c_span_count = int(td['colspan'])
column += c_span_count
else:
column += 1
new_span = []
for r_index in range(tr, tr + r_span_count):
for c_index in range(current_column, column):
if not get_span(spans, r_index, c_index):
new_span.append([r_index, c_index])
if len(new_span) > 0:
spans.append(new_span)
return spans
|
Creates a list of the spanned cell groups of [row, column] pairs.
Parameters
----------
html_string : str
Returns
-------
list of lists of lists of int
|
def ensure_property_set(host=None, admin_username=None, admin_password=None, property=None, value=None):
'''
.. versionadded:: Fluorine
Ensure that property is set to specific value
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
property:
The property which should be set.
value:
The value which should be set to property.
CLI Example:
.. code-block:: bash
salt dell dracr.ensure_property_set property=System.ServerOS.HostName value=Pretty-server
'''
ret = get_property(host, admin_username, admin_password, property)
if ret['stdout'] == value:
return True
ret = set_property(host, admin_username, admin_password, property, value)
return ret
|
.. versionadded:: Fluorine
Ensure that property is set to specific value
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
property:
The property which should be set.
value:
The value which should be set to property.
CLI Example:
.. code-block:: bash
salt dell dracr.ensure_property_set property=System.ServerOS.HostName value=Pretty-server
|
def to_bigquery_fields(self, name_case=DdlParseBase.NAME_CASE.original):
"""
Generate BigQuery JSON fields define
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: BigQuery JSON fields define
"""
bq_fields = []
for col in self.values():
bq_fields.append(col.to_bigquery_field(name_case))
return "[{}]".format(",".join(bq_fields))
|
Generate BigQuery JSON fields define
:param name_case: name case type
* DdlParse.NAME_CASE.original : Return to no convert
* DdlParse.NAME_CASE.lower : Return to lower
* DdlParse.NAME_CASE.upper : Return to upper
:return: BigQuery JSON fields define
|
def p_try_statement_3(self, p):
"""try_statement : TRY block catch finally"""
p[0] = ast.Try(statements=p[2], catch=p[3], fin=p[4])
|
try_statement : TRY block catch finally
|
def unicode_iter(val):
"""Provides an iterator over the *code points* of the given Unicode sequence.
Notes:
Before PEP-393, Python has the potential to support Unicode as UTF-16 or UTF-32.
This is reified in the property as ``sys.maxunicode``. As a result, naive iteration
of Unicode sequences will render non-character code points such as UTF-16 surrogates.
Args:
val (unicode): The unicode sequence to iterate over as integer code points in the range
``0x0`` to ``0x10FFFF``.
"""
val_iter = iter(val)
while True:
try:
code_point = next(_next_code_point(val, val_iter, to_int=ord))
except StopIteration:
return
if code_point is None:
raise ValueError('Unpaired high surrogate at end of Unicode sequence: %r' % val)
yield code_point
|
Provides an iterator over the *code points* of the given Unicode sequence.
Notes:
Before PEP-393, Python has the potential to support Unicode as UTF-16 or UTF-32.
This is reified in the property as ``sys.maxunicode``. As a result, naive iteration
of Unicode sequences will render non-character code points such as UTF-16 surrogates.
Args:
val (unicode): The unicode sequence to iterate over as integer code points in the range
``0x0`` to ``0x10FFFF``.
|
def as_mpl_artists(shape_list,
properties_func=None,
text_offset=5.0, origin=1):
"""
Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
"""
patch_list = []
artist_list = []
if properties_func is None:
properties_func = properties_func_default
# properties for continued(? multiline?) regions
saved_attrs = None
for shape in shape_list:
patches = []
if saved_attrs is None:
_attrs = [], {}
else:
_attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])
kwargs = properties_func(shape, _attrs)
if shape.name == "composite":
saved_attrs = shape.attr
continue
if saved_attrs is None and shape.continued:
saved_attrs = shape.attr
# elif (shape.name in shape.attr[1]):
# if (shape.attr[1][shape.name] != "ignore"):
# saved_attrs = shape.attr
if not shape.continued:
saved_attrs = None
# text associated with the shape
txt = shape.attr[1].get("text")
if shape.name == "polygon":
xy = np.array(shape.coord_list)
xy.shape = -1, 2
# -1 for change origin to 0,0
patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
_box = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]])
box = _box + [xc, yc]
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,
angle=angle, **kwargs)
for maj, min in zip(maj_list, min_list)]
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]
elif shape.name == "circle":
xc, yc, major = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in np.linspace(r1, r2, rn + 1)]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in [r1, r2]]
for aa in [a1, a2]:
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
# mpl takes angle a1, a2 as angle as in circle before
# transformation to ellipse.
x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12
x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12
a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.
patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,
angle=angle, theta1=a1, theta2=a2,
**kwargs)
for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),
np.linspace(r12, r22, rn + 1))]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)
yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif shape.name == "text":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif shape.name == "point":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
artist_list.append(Line2D([xc], [yc],
**kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
elif shape.name in ["line", "vector"]:
if shape.name == "line":
x1, y1, x2, y2 = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin
a1, a2 = shape.attr[1].get("line", "0 0").strip().split()[:2]
arrowstyle = "-"
if int(a1):
arrowstyle = "<" + arrowstyle
if int(a2):
arrowstyle = arrowstyle + ">"
else: # shape.name == "vector"
x1, y1, l, a = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1 = x1 - origin, y1 - origin
x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)
v1 = int(shape.attr[1].get("vector", "0").strip())
if v1:
arrowstyle = "->"
else:
arrowstyle = "-"
patches = [mpatches.FancyArrowPatch(posA=(x1, y1),
posB=(x2, y2),
arrowstyle=arrowstyle,
arrow_transmuter=None,
connectionstyle="arc3",
patchA=None, patchB=None,
shrinkA=0, shrinkB=0,
connector=None,
**kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} "
"to mpl artist".format(shape.name))
patch_list.extend(patches)
if txt and patches:
# the text associated with a shape uses different
# matplotlib keywords than the shape itself for, e.g.,
# color
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
# calculate the text position
_bb = [p.get_window_extent() for p in patches]
# this is to work around backward-incompatible change made
# in matplotlib 1.2. This change is later reverted so only
# some versions are affected. With affected version of
# matplotlib, get_window_extent method calls get_transform
# method which sets the _transformSet to True, which is
# not desired.
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
x0, y0, x1, y1 = _bbox.extents
xc = .5 * (x0 + x1)
_t = _get_text(txt, xc, y1, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
return patch_list, artist_list
|
Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
|
def query_all():
'''
Query all the records from TabPost2Tag.
'''
recs = TabPost2Tag.select(
TabPost2Tag,
TabTag.kind.alias('tag_kind'),
).join(
TabTag,
on=(TabPost2Tag.tag_id == TabTag.uid)
)
return recs
|
Query all the records from TabPost2Tag.
|
def add_header(self, name, value):
"""Add an HTTP header to response object.
Arguments:
name (str): HTTP header field name
value (str): HTTP header field value
"""
if value is not None:
self._headers.append((name, value))
|
Add an HTTP header to response object.
Arguments:
name (str): HTTP header field name
value (str): HTTP header field value
|
def load_file(self, fname, table=None, sep="\t", bins=False, indexes=None):
"""
use some of the machinery in pandas to load a file into a table
Parameters
----------
fname : str
filename or filehandle to load
table : str
table to load the file to
sep : str
CSV separator
bins : bool
add a "bin" column for efficient spatial queries.
indexes : list[str]
list of columns to index
"""
convs = {"#chr": "chrom", "start": "txStart", "end": "txEnd", "chr":
"chrom", "pos": "start", "POS": "start", "chromStart": "txStart",
"chromEnd": "txEnd"}
if table is None:
import os.path as op
table = op.basename(op.splitext(fname)[0]).replace(".", "_")
print("writing to:", table, file=sys.stderr)
from pandas.io import sql
import pandas as pa
from toolshed import nopen
needs_name = False
for i, chunk in enumerate(pa.read_csv(nopen(fname), iterator=True,
chunksize=100000, sep=sep, encoding="latin-1")):
chunk.columns = [convs.get(k, k) for k in chunk.columns]
if not "name" in chunk.columns:
needs_name = True
chunk['name'] = chunk.get('chrom', chunk[chunk.columns[0]])
if bins:
chunk['bin'] = 1
if i == 0 and not table in self.tables:
flavor = self.url.split(":")[0]
schema = sql.get_schema(chunk, table, flavor)
print(schema)
self.engine.execute(schema)
elif i == 0:
print >>sys.stderr,\
"""adding to existing table, you may want to drop first"""
tbl = getattr(self, table)._table
cols = chunk.columns
data = list(dict(zip(cols, x)) for x in chunk.values)
if needs_name:
for d in data:
d['name'] = "%s:%s" % (d.get("chrom"), d.get("txStart", d.get("chromStart")))
if bins:
for d in data:
d['bin'] = max(Genome.bins(int(d["txStart"]), int(d["txEnd"])))
self.engine.execute(tbl.insert(), data)
self.session.commit()
if i > 0:
print >>sys.stderr, "writing row:", i * 100000
if "txStart" in chunk.columns:
if "chrom" in chunk.columns:
ssql = """CREATE INDEX "%s.chrom_txStart" ON "%s" (chrom, txStart)""" % (table, table)
else:
ssql = """CREATE INDEX "%s.txStart" ON "%s" (txStart)""" % (table, table)
self.engine.execute(ssql)
for index in (indexes or []):
ssql = """CREATE INDEX "%s.%s" ON "%s" (%s)""" % (table,
index, table, index)
self.engine.execute(ssql)
if bins:
ssql = """CREATE INDEX "%s.chrom_bin" ON "%s" (chrom, bin)""" % (table, table)
self.engine.execute(ssql)
self.session.commit()
|
use some of the machinery in pandas to load a file into a table
Parameters
----------
fname : str
filename or filehandle to load
table : str
table to load the file to
sep : str
CSV separator
bins : bool
add a "bin" column for efficient spatial queries.
indexes : list[str]
list of columns to index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.