code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def register_target(repo_cmd, repo_service):
def decorate(klass):
log.debug('Loading service module class: {}'.format(klass.__name__) )
klass.command = repo_cmd
klass.name = repo_service
RepositoryService.service_map[repo_service] = klass
RepositoryService.command_map[repo_cmd] = repo_service
return klass
return decorate | Decorator to register a class with an repo_service |
def main(pyc_file, asm_path):
if os.stat(asm_path).st_size == 0:
print("Size of assembly file %s is zero" % asm_path)
sys.exit(1)
asm = asm_file(asm_path)
if not pyc_file and asm_path.endswith('.pyasm'):
pyc_file = asm_path[:-len('.pyasm')] + '.pyc'
write_pycfile(pyc_file, asm) | Create Python bytecode from a Python assembly file.
ASM_PATH gives the input Python assembly file. We suggest ending the
file in .pyc
If --pyc-file is given, that indicates the path to write the
Python bytecode. The path should end in '.pyc'.
See https://github.com/rocky/python-xasm/blob/master/HOW-TO-USE.rst
for how to write a Python assembler file. |
def accuracy(self, test_set, format=None):
if isinstance(test_set, basestring):
test_data = self._read_data(test_set)
else:
test_data = test_set
test_features = [(self.extract_features(d), c) for d, c in test_data]
return nltk.classify.accuracy(self.classifier, test_features) | Compute the accuracy on a test set.
:param test_set: A list of tuples of the form ``(text, label)``, or a
filename.
:param format: If ``test_set`` is a filename, the file format, e.g.
``"csv"`` or ``"json"``. If ``None``, will attempt to detect the
file format. |
def _find_corresponding_multicol_key(key, keys_multicol):
for mk in keys_multicol:
if key.startswith(mk) and 'of' in key:
return mk
return None | Find the corresponding multicolumn key. |
def detect_model_num(string):
match = re.match(MODEL_NUM_REGEX, string)
if match:
return int(match.group())
return None | Takes a string related to a model name and extract its model number.
For example:
'000000-bootstrap.index' => 0 |
def map_sid2sub(self, sid, sub):
self.set('sid2sub', sid, sub)
self.set('sub2sid', sub, sid) | Store the connection between a Session ID and a subject ID.
:param sid: Session ID
:param sub: subject ID |
def _gl_initialize(self):
if '.es' in gl.current_backend.__name__:
pass
else:
GL_VERTEX_PROGRAM_POINT_SIZE = 34370
GL_POINT_SPRITE = 34913
gl.glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(GL_POINT_SPRITE)
if self.capabilities['max_texture_size'] is None:
self.capabilities['gl_version'] = gl.glGetParameter(gl.GL_VERSION)
self.capabilities['max_texture_size'] = \
gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE)
this_version = self.capabilities['gl_version'].split(' ')[0]
this_version = LooseVersion(this_version) | Deal with compatibility; desktop does not have sprites
enabled by default. ES has. |
def _page_gen(self):
track = ""
for page in self.__pages__:
track += "/{page}".format(page=page)
return track | Generates The String for pages |
def key_deploy(self, host, ret):
if not isinstance(ret[host], dict) or self.opts.get('ssh_key_deploy'):
target = self.targets[host]
if target.get('passwd', False) or self.opts['ssh_passwd']:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get('stderr', '').count('Permission denied'):
target = self.targets[host]
print(('Permission denied for host {0}, do you want to deploy '
'the salt-ssh key? (password required):').format(host))
deploy = input('[Y/n] ')
if deploy.startswith(('n', 'N')):
return ret
target['passwd'] = getpass.getpass(
'Password for {0}@{1}: '.format(target['user'], host)
)
return self._key_deploy_run(host, target, True)
return ret | Deploy the SSH key if the minions don't auth |
def conflicts_with(self, other):
if isinstance(other, Requirement):
if (self.name_ != other.name_) or (self.range is None) \
or (other.range is None):
return False
elif self.conflict:
return False if other.conflict \
else self.range_.issuperset(other.range_)
elif other.conflict:
return other.range_.issuperset(self.range_)
else:
return not self.range_.intersects(other.range_)
else:
if (self.name_ != other.name_) or (self.range is None):
return False
if self.conflict:
return (other.version_ in self.range_)
else:
return (other.version_ not in self.range_) | Returns True if this requirement conflicts with another `Requirement`
or `VersionedObject`. |
def get_package_manager(self, target=None):
package_manager = None
if target:
target_package_manager_field = target.payload.get_field('package_manager')
if target_package_manager_field:
package_manager = target_package_manager_field.value
return self.node_distribution.get_package_manager(package_manager=package_manager) | Returns package manager for target argument or global config. |
def start_map(name,
handler_spec,
reader_spec,
mapper_parameters,
shard_count=None,
output_writer_spec=None,
mapreduce_parameters=None,
base_path=None,
queue_name=None,
eta=None,
countdown=None,
hooks_class_name=None,
_app=None,
in_xg_transaction=False):
if shard_count is None:
shard_count = parameters.config.SHARD_COUNT
if mapper_parameters:
mapper_parameters = dict(mapper_parameters)
mr_params = map_job.JobConfig._get_default_mr_params()
if mapreduce_parameters:
mr_params.update(mapreduce_parameters)
if base_path:
mr_params["base_path"] = base_path
mr_params["queue_name"] = util.get_queue_name(queue_name)
mapper_spec = model.MapperSpec(handler_spec,
reader_spec,
mapper_parameters,
shard_count,
output_writer_spec=output_writer_spec)
if in_xg_transaction and not db.is_in_transaction():
logging.warning("Expects an opened xg transaction to start mapreduce "
"when transactional is True.")
return handlers.StartJobHandler._start_map(
name,
mapper_spec,
mr_params,
queue_name=mr_params["queue_name"],
eta=eta,
countdown=countdown,
hooks_class_name=hooks_class_name,
_app=_app,
in_xg_transaction=in_xg_transaction) | Start a new, mapper-only mapreduce.
Deprecated! Use map_job.start instead.
If a value can be specified both from an explicit argument and from
a dictionary, the value from the explicit argument wins.
Args:
name: mapreduce name. Used only for display purposes.
handler_spec: fully qualified name of mapper handler function/class to call.
reader_spec: fully qualified name of mapper reader to use
mapper_parameters: dictionary of parameters to pass to mapper. These are
mapper-specific and also used for reader/writer initialization.
Should have format {"input_reader": {}, "output_writer":{}}. Old
deprecated style does not have sub dictionaries.
shard_count: number of shards to create.
mapreduce_parameters: dictionary of mapreduce parameters relevant to the
whole job.
base_path: base path of mapreduce library handler specified in app.yaml.
"/mapreduce" by default.
queue_name: taskqueue queue name to be used for mapreduce tasks.
see util.get_queue_name.
eta: absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: time in seconds into the future that this MR should execute.
Defaults to zero.
hooks_class_name: fully qualified name of a hooks.Hooks subclass.
in_xg_transaction: controls what transaction scope to use to start this MR
job. If True, there has to be an already opened cross-group transaction
scope. MR will use one entity group from it.
If False, MR will create an independent transaction to start the job
regardless of any existing transaction scopes.
Returns:
mapreduce id as string. |
def trunc_list(s: List) -> List:
if len(s) > max_list_size:
i = max_list_size // 2
j = i - 1
s = s[:i] + [ELLIPSIS] + s[-j:]
return s | Truncate lists to maximum length. |
def output_component(graph, edge_stack, u, v):
edge_list = []
while len(edge_stack) > 0:
edge_id = edge_stack.popleft()
edge_list.append(edge_id)
edge = graph.get_edge(edge_id)
tpl_a = (u, v)
tpl_b = (v, u)
if tpl_a == edge['vertices'] or tpl_b == edge['vertices']:
break
return edge_list | Helper function to pop edges off the stack and produce a list of them. |
async def query_firmware(self):
_version = await self.request.get(join_path(self._base_path, "/fwversion"))
_fw = _version.get("firmware")
if _fw:
_main = _fw.get("mainProcessor")
if _main:
self._main_processor_version = self._make_version(_main)
_radio = _fw.get("radio")
if _radio:
self._radio_version = self._make_version(_radio) | Query the firmware versions. |
def iter_previewers(self, previewers=None):
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
previewers = previewers or \
self.app.config.get('PREVIEWER_PREFERENCE', [])
for item in previewers:
if item in self.previewers:
yield self.previewers[item] | Get previewers ordered by PREVIEWER_PREVIEWERS_ORDER. |
def _strftime(pattern, time_struct=time.localtime()):
try:
return time.strftime(pattern, time_struct)
except OSError:
dt = datetime.datetime.fromtimestamp(_mktime(time_struct))
original = dt.year
current = datetime.datetime.now().year
dt = dt.replace(year=current)
ts = dt.timestamp()
if _isdst(dt):
ts -= 3600
string = time.strftime(pattern, time.localtime(ts))
string = string.replace(str(current), str(original))
return string | Custom strftime because Windows is shit again. |
def _sequence_range_check(self, result, last):
removed = False
first = result[-2]
v1 = ord(first[1:2] if len(first) > 1 else first)
v2 = ord(last[1:2] if len(last) > 1 else last)
if v2 < v1:
result.pop()
result.pop()
removed = True
else:
result.append(last)
return removed | If range backwards, remove it.
A bad range will cause the regular expression to fail,
so we need to remove it, but return that we removed it
so the caller can know the sequence wasn't empty.
Caller will have to craft a sequence that makes sense
if empty at the end with either an impossible sequence
for inclusive sequences or a sequence that matches
everything for an exclusive sequence. |
def check_valid_time_and_sort(df, timescol, days=5, warning=True):
timediff = (df[timescol].max() - df[timescol].min()).days
if timediff < days:
return df.sort_values(timescol).reset_index(drop=True).reset_index()
else:
if warning:
sys.stderr.write(
"\nWarning: data generated is from more than {} days.\n".format(str(days)))
sys.stderr.write("Likely this indicates you are combining multiple runs.\n")
sys.stderr.write(
"Plots based on time are invalid and therefore truncated to first {} days.\n\n"
.format(str(days)))
logging.warning("Time plots truncated to first {} days: invalid timespan: {} days"
.format(str(days), str(timediff)))
return df[df[timescol] < timedelta(days=days)] \
.sort_values(timescol) \
.reset_index(drop=True) \
.reset_index() | Check if the data contains reads created within the same `days` timeframe.
if not, print warning and only return part of the data which is within `days` days
Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot |
def del_actor(self, actor):
if _debug: TCPClientDirector._debug("del_actor %r", actor)
del self.clients[actor.peer]
if self.serviceElement:
self.sap_request(del_actor=actor)
if actor.peer in self.reconnect:
connect_task = FunctionTask(self.connect, actor.peer)
connect_task.install_task(_time() + self.reconnect[actor.peer]) | Remove an actor when the socket is closed. |
def list_locks(root=None):
locks = {}
_locks = os.path.join(root, os.path.relpath(LOCKS, os.path.sep)) if root else LOCKS
try:
with salt.utils.files.fopen(_locks) as fhr:
items = salt.utils.stringutils.to_unicode(fhr.read()).split('\n\n')
for meta in [item.split('\n') for item in items]:
lock = {}
for element in [el for el in meta if el]:
if ':' in element:
lock.update(dict([tuple([i.strip() for i in element.split(':', 1)]), ]))
if lock.get('solvable_name'):
locks[lock.pop('solvable_name')] = lock
except IOError:
pass
except Exception:
log.warning('Detected a problem when accessing %s', _locks)
return locks | List current package locks.
root
operate on a different root directory.
Return a dict containing the locked package with attributes::
{'<package>': {'case_sensitive': '<case_sensitive>',
'match_type': '<match_type>'
'type': '<type>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locks |
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance | Update and return an existing `Snippet` instance, given the validated data. |
def on_message(self, message):
self._record_activity()
if hasattr(self, 'ws'):
self.ws.write_message(message, binary=isinstance(message, bytes)) | Called when we receive a message from our client.
We proxy it to the backend. |
def exclude(self, **filters):
exclude = {'-%s' % key: value for key, value in filters.items()}
return self.filter(**exclude) | Applies query filters for excluding matching records from result set.
Args:
**filters: Query filters as keyword arguments.
Returns:
Self. Queryset object.
Examples:
>>> Person.objects.exclude(age=None)
>>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16) |
def mu_so(species, motif, spin_state):
try:
sp = get_el_sp(species)
n = sp.get_crystal_field_spin(coordination=motif, spin_config=spin_state)
return np.sqrt(n * (n + 2))
except AttributeError:
return None | Calculates the spin-only magnetic moment for a
given species. Only supports transition metals.
:param species: str or Species
:param motif: "oct" or "tet"
:param spin_state: "high" or "low"
:return: spin-only magnetic moment in Bohr magnetons |
def get_svg_layers(svg_sources):
layers = []
width, height = None, None
def extract_length(attr):
'Extract length in pixels.'
match = CRE_MM_LENGTH.match(attr)
if match:
return INKSCAPE_PPmm.magnitude * float(match.group('length'))
else:
return float(attr)
for svg_source_i in svg_sources:
xml_root = etree.parse(svg_source_i)
svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]
width = max(extract_length(svg_root.attrib['width']), width)
height = max(extract_length(svg_root.attrib['height']), height)
layers += svg_root.xpath('//svg:g[@inkscape:groupmode="layer"]',
namespaces=INKSCAPE_NSMAP)
for i, layer_i in enumerate(layers):
layer_i.attrib['id'] = 'layer%d' % (i + 1)
return (width, height), layers | Collect layers from input svg sources.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
Returns
-------
(width, height), layers : (int, int), list
The first item in the tuple is the shape of the largest layer, and the
second item is a list of ``Element`` objects (from :mod:`lxml.etree`
module), one per SVG layer. |
def _get_queue_name(cls, queue_name=None):
if queue_name is None and cls.queue_name is None:
raise LimpydJobsException("Queue's name not defined")
if queue_name is None:
return cls.queue_name
return queue_name | Return the given queue_name if defined, else the class's one.
If both are None, raise an Exception |
async def runRuntLift(self, full, valu=None, cmpr=None):
func = self._runtLiftFuncs.get(full)
if func is None:
raise s_exc.NoSuchLift(mesg='No runt lift implemented for requested property.',
full=full, valu=valu, cmpr=cmpr)
async for buid, rows in func(full, valu, cmpr):
yield buid, rows | Execute a runt lift function.
Args:
full (str): Property to lift by.
valu:
cmpr:
Returns:
bytes, list: Yields bytes, list tuples where the list contains a series of
key/value pairs which are used to construct a Node object. |
def expanduser(path):
if '~' not in path:
return path
if os.name == "nt":
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
drive = os.environ.get('HOMEDRIVE', '')
userhome = os.path.join(drive, os.environ['HOMEPATH'])
else:
return path
else:
userhome = os.path.expanduser('~')
def _expanduser(path):
return EXPANDUSER_RE.sub(
lambda m: m.groups()[0] + userhome + m.groups()[1],
path)
return os.path.normpath(_expanduser(path)) | Expand '~' to home directory in the given string.
Note that this function deliberately differs from the builtin
os.path.expanduser() on Linux systems, which expands strings such as
'~sclaus' to that user's homedir. This is problematic in rez because the
string '~packagename' may inadvertently convert to a homedir, if a package
happens to match a username. |
def parse_delta(__string: str) -> datetime.timedelta:
if not __string:
return datetime.timedelta(0)
match = re.fullmatch(r
, __string, re.VERBOSE)
if not match:
raise ValueError('Unable to parse delta {!r}'.format(__string))
match_dict = {k: int(v) if v else 0 for k, v in match.groupdict().items()}
return datetime.timedelta(**match_dict) | Parse ISO-8601 duration string.
Args:
__string: Duration string to parse
Returns:
Parsed delta object |
def _explain(self, tree):
self._explaining = True
self._call_list = []
old_call = self.connection.call
def fake_call(command, **kwargs):
if command == "describe_table":
return old_call(command, **kwargs)
self._call_list.append((command, kwargs))
raise ExplainSignal
self.connection.call = fake_call
try:
ret = self._run(tree[1])
try:
list(ret)
except TypeError:
pass
finally:
self.connection.call = old_call
self._explaining = False | Set up the engine to do a dry run of a query |
def update(self):
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_state_params(self._id)
if data:
self.__odometer = data['odometer']
data = self._controller.get_gui_params(self._id)
if data:
if data['gui_distance_units'] == "mi/hr":
self.measurement = 'LENGTH_MILES'
else:
self.measurement = 'LENGTH_KILOMETERS'
self.__rated = (data['gui_range_display'] == "Rated") | Update the odometer and the unit of measurement based on GUI. |
def visit_BinOp(self, node):
res = combine(node.op, self.visit(node.left), self.visit(node.right))
return self.add(node, res) | Combine operands ranges for given operator.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('''
... def foo():
... a = 2
... c = 3
... d = a - c''')
>>> pm = passmanager.PassManager("test")
>>> res = pm.gather(RangeValues, node)
>>> res['d']
Interval(low=-1, high=-1) |
def searchType(libtype):
libtype = compat.ustr(libtype)
if libtype in [compat.ustr(v) for v in SEARCHTYPES.values()]:
return libtype
if SEARCHTYPES.get(libtype) is not None:
return SEARCHTYPES[libtype]
raise NotFound('Unknown libtype: %s' % libtype) | Returns the integer value of the library string type.
Parameters:
libtype (str): LibType to lookup (movie, show, season, episode, artist, album, track,
collection)
Raises:
:class:`plexapi.exceptions.NotFound`: Unknown libtype |
def getChildren(self, returned_properties=None):
children_tag = ("rtc_cm:com.ibm.team.workitem.linktype."
"parentworkitem.children")
rp = returned_properties
return (self.rtc_obj
._get_paged_resources("Children",
workitem_id=self.identifier,
customized_attr=children_tag,
page_size="10",
returned_properties=rp)) | Get all the children workitems of this workitem
If no children, None will be returned.
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: a :class:`rtcclient.workitem.Workitem` object
:rtype: rtcclient.workitem.Workitem |
def to_file(self, path):
xmp_path = path + '.xmp'
if os.path.exists(xmp_path):
os.unlink(xmp_path)
md_path = path
md = GExiv2.Metadata()
try:
md.open_path(md_path)
except GLib.GError:
md_path = xmp_path
with open(md_path, 'w') as of:
of.write(
)
md = GExiv2.Metadata()
md.open_path(md_path)
md.register_xmp_namespace(
'https://github.com/jim-easterbrook/pyctools', 'pyctools')
for tag, value in self.data.items():
if md.get_tag_type(tag) in ('XmpBag', 'XmpSeq'):
md.set_tag_multiple(tag, value)
else:
md.set_tag_string(tag, value)
if self.comment is not None:
md.set_comment(self.comment)
md.save_file(md_path) | Write metadata to an image, video or XMP sidecar file.
:param str path: The image/video file path name. |
def remove(mod, persist=False, comment=True):
pre_mods = lsmod()
res = __salt__['cmd.run_all']('kldunload {0}'.format(mod),
python_shell=False)
if res['retcode'] == 0:
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
else:
return 'Error removing module {0}: {1}'.format(mod, res['stderr']) | Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /boot/loader.conf
comment
If persist is set don't remove line from /boot/loader.conf but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove vmm |
def report_alerts(alerts, output_format='table'):
num_alerts = len(alerts)
if output_format == 'json':
click.echo(json.dumps(alerts, indent=4))
else:
console.info('Issues found: {0}'.format(num_alerts))
if num_alerts > 0:
click.echo(tabulate([[a['alert'], a['risk'], a['cweid'], a['url']] for a in alerts],
headers=['Alert', 'Risk', 'CWE ID', 'URL'], tablefmt='grid')) | Print our alerts in the given format. |
def draw_text(self, video_name, out, start, end, x, y, text,
color='0xFFFFFF', show_background=0,
background_color='0x000000', size=16):
cfilter = (r"[0:0]drawtext=fontfile=/Library/Fonts/AppleGothic.ttf:"
r"x={x}:y={y}:fontcolor='{font_color}':"
r"box={show_background}:"
r"boxcolor='{background_color}':"
r"text='{text}':fontsize={size}:"
r"enable='between(t,{start},{end})'[vout];"
r"[0:1]apad=pad_len=0[aout]")\
.format(x=x, y=y, font_color=color,
show_background=show_background,
background_color=background_color, text=text, start=start,
end=end, size=size)
command = ['ffmpeg', '-i', video_name, '-c:v', 'huffyuv', '-y',
'-filter_complex', cfilter, '-an', '-y',
'-map', '[vout]',
'-map', '[aout]',
out]
if self.verbose:
print 'Drawing text "{0}" onto {1} output as {2}'.format(
text,
video_name,
out,
)
print ' '.join(command)
call(command) | Draws text over a video
@param video_name : name of video input file
@param out : name of video output file
@param start : start timecode to draw text hh:mm:ss
@param end : end timecode to draw text hh:mm:ss
@param x : x position of text (px)
@param y : y position of text (px)
@param text : text content to draw
@param color : text color
@param show_background : boolean to show a background box behind the
text
@param background_color : color of background box |
def find(self, value):
value = str(value).lower()
rtn_dict = RegistryDictionary()
for key, item in self.items():
if value in key.lower():
rtn_dict[key] = item
return rtn_dict | returns a dictionary of items based on the a lowercase search
args:
value: the value to search by |
def make_dbm():
try:
data_dir = config.get('coilmq', 'qstore.dbm.data_dir')
cp_ops = config.getint('coilmq', 'qstore.dbm.checkpoint_operations')
cp_timeout = config.getint('coilmq', 'qstore.dbm.checkpoint_timeout')
except ConfigParser.NoOptionError as e:
raise ConfigError('Missing configuration parameter: %s' % e)
if not os.path.exists(data_dir):
raise ConfigError('DBM directory does not exist: %s' % data_dir)
if not os.access(data_dir, os.W_OK | os.R_OK):
raise ConfigError('Cannot read and write DBM directory: %s' % data_dir)
store = DbmQueue(data_dir, checkpoint_operations=cp_ops,
checkpoint_timeout=cp_timeout)
return store | Creates a DBM queue store, pulling config values from the CoilMQ configuration. |
def user_deleted(sender, **kwargs):
if kwargs.get('created'):
write('user_variations', {'variation': 1}, tags={'action': 'created'})
write('user_count', {'total': User.objects.count()}) | collect metrics about new users signing up |
def saveSettings(self):
try:
self.saveProfile()
except Exception as ex:
logger.warn(ex)
if DEBUGGING:
raise
finally:
self._settingsSaved = True | Saves the persistent settings. Only saves the profile. |
def _copy_stream(src, dest, length=0):
if length == 0:
shutil.copyfileobj(src, dest)
return
bytes_left = length
while bytes_left > 0:
buf_size = min(_BUFFER_SIZE, bytes_left)
buf = src.read(buf_size)
dest.write(buf)
bytes_left -= buf_size | Similar to shutil.copyfileobj, but supports limiting data size.
As for why this is required, refer to
https://www.python.org/dev/peps/pep-0333/#input-and-error-streams
Yes, there are WSGI implementations which do not support EOFs, and
believe me, you don't want to debug this.
Args:
src: source file-like object
dest: destination file-like object
length: optional file size hint
If not 0, exactly length bytes will be written.
If 0, write will continue until EOF is encountered. |
def get_host_ip(logHost):
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((logHost, 80))
ip = s.getsockname()[0]
return ip
except Exception:
return '127.0.0.1'
finally:
if s:
s.close() | If it is not match your local ip, you should fill the PutLogsRequest
parameter source by yourself. |
def resizeEvent(self, event):
super(XOverlayWidget, self).resizeEvent(event)
self.adjustSize() | Handles a resize event for this overlay, centering the central widget if
one is found.
:param event | <QtCore.QEvent> |
def get_statements(self):
edges = _get_dict_from_list('edges', self.cx)
for edge in edges:
edge_type = edge.get('i')
if not edge_type:
continue
stmt_type = _stmt_map.get(edge_type)
if stmt_type:
id = edge['@id']
source_agent = self._node_agents.get(edge['s'])
target_agent = self._node_agents.get(edge['t'])
if not source_agent or not target_agent:
logger.info("Skipping edge %s->%s: %s" %
(self._node_names[edge['s']],
self._node_names[edge['t']], edge))
continue
ev = self._create_evidence(id)
if stmt_type == Complex:
stmt = stmt_type([source_agent, target_agent], evidence=ev)
else:
stmt = stmt_type(source_agent, target_agent, evidence=ev)
self.statements.append(stmt)
return self.statements | Convert network edges into Statements.
Returns
-------
list of Statements
Converted INDRA Statements. |
def update_reg(self, addr, mask, new_val):
shift = _mask_to_shift(mask)
val = self.read_reg(addr)
val &= ~mask
val |= (new_val << shift) & mask
self.write_reg(addr, val)
return val | Update register at 'addr', replace the bits masked out by 'mask'
with new_val. new_val is shifted left to match the LSB of 'mask'
Returns just-written value of register. |
def make_primitive_smoothed(cas_coords, smoothing_level=2):
try:
s_primitive = make_primitive(cas_coords)
for x in range(smoothing_level):
s_primitive = make_primitive(s_primitive)
except ValueError:
raise ValueError(
'Smoothing level {0} too high, try reducing the number of rounds'
' or give a longer Chain (curent length = {1}).'.format(
smoothing_level, len(cas_coords)))
return s_primitive | Generates smoothed primitive from a list of coordinates.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int, optional
Number of times to run the averaging.
Returns
-------
s_primitive : list(numpy.array)
Each array has length 3.
Raises
------
ValueError
If the smoothing level is too great compared to the length
of cas_coords. |
def sphcyl(radius, colat, slon):
radius = ctypes.c_double(radius)
colat = ctypes.c_double(colat)
slon = ctypes.c_double(slon)
r = ctypes.c_double()
lon = ctypes.c_double()
z = ctypes.c_double()
libspice.sphcyl_c(radius, colat, slon, ctypes.byref(r), ctypes.byref(lon),
ctypes.byref(z))
return r.value, lon.value, z.value | This routine converts from spherical coordinates to cylindrical
coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphcyl_c.html
:param radius: Distance of point from origin.
:type radius: float
:param colat: Polar angle (co-latitude in radians) of point.
:type colat: float
:param slon: Azimuthal angle (longitude) of point (radians).
:type slon: float
:return:
Distance of point from z axis,
angle (radians) of point from XZ plane,
Height of point above XY plane.
:rtype: tuple |
def _do_highlight(content, query, tag='em'):
for term in query:
term = term.decode('utf-8')
for match in re.findall('[^A-Z]+', term):
match_re = re.compile(match, re.I)
content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content)
return content | Highlight `query` terms in `content` with html `tag`.
This method assumes that the input text (`content`) does not contain
any special formatting. That is, it does not contain any html tags
or similar markup that could be screwed up by the highlighting.
Required arguments:
`content` -- Content to search for instances of `text`
`text` -- The text to be highlighted |
def register_error_handler(self, error: Union[Type[Exception], int], func: Callable) -> None:
self.record_once(lambda state: state.app.register_error_handler(error, func, self.name)) | Add an error handler function to the blueprint.
This is designed to be used on the blueprint directly, and
has the same arguments as
:meth:`~quart.Quart.register_error_handler`. An example usage,
.. code-block:: python
def not_found():
...
blueprint = Blueprint(__name__)
blueprint.register_error_handler(404, not_found) |
def VerifyStructure(self, parser_mediator, lines):
return (re.match(self._VERIFICATION_REGEX, lines) or
re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None | Verifies that this is a syslog-formatted file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise. |
def option_changed(self, option, value):
setattr(self, to_text_string(option), value)
self.shellwidget.set_namespace_view_settings()
self.refresh_table() | Option has changed |
def needs_path(f):
@wraps(f)
def wrapped(pathlike, *args, **kwargs):
path = pathlib.Path(pathlike)
return f(path, *args, **kwargs)
return wrapped | Wraps a function that accepts path-like to give it a pathlib.Path |
def from_rgb(cls, r: int, g: int, b: int) -> 'ColorCode':
c = cls()
c._init_rgb(r, g, b)
return c | Return a ColorCode from a RGB tuple. |
def set_default_color_scheme(name, replace=True):
assert name in sh.COLOR_SCHEME_NAMES
set_color_scheme(name, sh.get_color_scheme(name), replace=replace) | Reset color scheme to default values |
def to_ipa(s, delimiter=' ', all_readings=False, container='[]'):
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False)
ipa = pinyin_to_ipa(numbered_pinyin)
return ipa | Convert a string's Chinese characters to IPA.
*s* is a string containing Chinese characters.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched. |
def is_supergroup(self, subgroup):
warnings.warn("This is not fully functional. Only trivial subsets are "
"tested right now. ")
return set(subgroup.symmetry_ops).issubset(self.symmetry_ops) | True if this group is a supergroup of the supplied group.
Args:
subgroup (SymmetryGroup): Subgroup to test.
Returns:
True if this group is a supergroup of the supplied group. |
def remove(self):
if isfile(self.pid_file):
try:
remove(self.pid_file)
except Exception as e:
self.die('Failed to remove PID file: {}'.format(str(e)))
else:
return True | Remove the PID file. |
def fetch_platform_informations(self, callback):
self._protocolVersion = -1
self._callback = callback
self._request_protocol_version() | Fetch platform info from the firmware
Should be called at the earliest in the connection sequence |
def in_interactions_iter(self, nbunch=None, t=None):
if nbunch is None:
nodes_nbrs_pred = self._pred.items()
else:
nodes_nbrs_pred = [(n, self._pred[n]) for n in self.nbunch_iter(nbunch)]
for n, nbrs in nodes_nbrs_pred:
for nbr in nbrs:
if t is not None:
if self.__presence_test(nbr, n, t):
yield (nbr, n, {"t": [t]})
else:
if nbr in self._pred[n]:
yield (nbr, n, self._pred[n][nbr]) | Return an iterator over the in interactions present in a given snapshot.
Edges are returned as tuples in the order (node, neighbor).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
t : snapshot id (default=None)
If None the the method returns an iterator over the edges of the flattened graph.
Returns
-------
edge_iter : iterator
An iterator of (u,v) tuples of interaction.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-interaction.
Examples
--------
>>> G = dn.DynDiGraph()
>>> G.add_interaction(0,1, 0)
>>> G.add_interaction(1,2, 0)
>>> G.add_interaction(2,3,1)
>>> [e for e in G.in_interactions_iter(t=0)]
[(0, 1), (1, 2)]
>>> list(G.in_interactions_iter())
[(0, 1), (1, 2), (2, 3)] |
def get_notifications(self, new=True):
url = (self._imgur._base_url + "/3/account/{0}/"
"notifications".format(self.name))
resp = self._imgur._send_request(url, params=locals(), needs_auth=True)
msgs = [Message(msg_dict, self._imgur, has_fetched=True) for msg_dict
in resp['messages']]
replies = [Comment(com_dict, self._imgur, has_fetched=True) for
com_dict in resp['replies']]
return {'messages': msgs, 'replies': replies} | Return all the notifications for this user. |
def arrayuniqify(X, retainorder=False):
s = X.argsort()
X = X[s]
D = np.append([True],X[1:] != X[:-1])
if retainorder:
DD = np.append(D.nonzero()[0],len(X))
ind = [min(s[x:DD[i+1]]) for (i,x) in enumerate(DD[:-1])]
ind.sort()
return ind
else:
return [D,s] | Very fast uniqify routine for numpy arrays.
**Parameters**
**X** : numpy array
Determine the unique elements of this numpy array.
**retainorder** : Boolean, optional
Whether or not to return indices corresponding to unique
values of `X` that also sort the values. Default value is
`False`, in which case `[D,s]` is returned. This can be
used to produce a uniqified version of `X` by simply
taking::
X[s][D]
or::
X[s[D.nonzero()[0]]]
**Returns**
**D** : numpy array
List of "first differences" in the sorted verion of `X`.
Returned when `retainorder` is `False` (default).
**s** : numpy array
Permutation that will sort `X`. Returned when
`retainorder` is `False` (default).
**ind** : numpy array
List of indices that correspond to unique values of `X`,
without sorting those values. Returned when `retainorder`
is `True`.
**See Also:**
:func:`tabular.fast.recarrayuniqify` |
def extract_operations(self, migrations):
operations = []
for migration in migrations:
for operation in migration.operations:
if isinstance(operation, RunSQL):
statements = sqlparse.parse(dedent(operation.sql))
for statement in statements:
operation = SqlObjectOperation.parse(statement)
if operation:
operations.append(operation)
if self.verbosity >= 2:
self.stdout.write(" > % -100s (%s)" % (operation, migration))
return operations | Extract SQL operations from the given migrations |
async def send_notification(self, method, args=()):
message = self.connection.send_notification(Notification(method, args))
await self._send_message(message) | Send an RPC notification over the network. |
def _grains():
try:
host = __pillar__['proxy']['host']
if host:
username, password = _find_credentials(host)
protocol = __pillar__['proxy'].get('protocol')
port = __pillar__['proxy'].get('port')
ret = salt.modules.vsphere.system_info(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
GRAINS_CACHE.update(ret)
except KeyError:
pass
return GRAINS_CACHE | Get the grains from the proxied device. |
def create(self, doc_details):
title = '%s.create' % self.__class__.__name__
if self.model:
doc_details = self.model.validate(doc_details, path_to_root='', object_title='%s(doc_details={...}' % title)
from copy import deepcopy
new_record = deepcopy(doc_details)
url = self.bucket_url + '/'
response = requests.post(url, json=new_record)
if response.status_code not in (200, 201):
response = response.json()
raise Exception('%s() error: %s' % (title, response))
response = response.json()
new_record['_id'] = response['id']
new_record['_rev'] = response['rev']
return new_record | a method to create a new document in the collection
:param doc_details: dictionary with document details and user id value
:return: dictionary with document details and _id and _rev values |
def _process_hints(self, analyzed_addrs):
while self._pending_function_hints:
f = self._pending_function_hints.pop()
if f not in analyzed_addrs:
new_state = self.project.factory.entry_state(mode='fastpath')
new_state.ip = new_state.solver.BVV(f, self.project.arch.bits)
if new_state.arch.name in ('MIPS32', 'MIPS64'):
new_state.registers.store('t9', f)
new_path_wrapper = CFGJob(f,
new_state,
self._context_sensitivity_level
)
self._insert_job(new_path_wrapper)
self._register_analysis_job(f, new_path_wrapper)
l.debug('Picking a function 0x%x from pending function hints.', f)
self.kb.functions.function(new_path_wrapper.func_addr, create=True)
break | Process function hints in the binary.
:return: None |
def since(self, timestamp=None, version=None, deleted=False):
qset = self
if timestamp is not None:
if isinstance(timestamp, numbers.Real):
timestamp = datetime.datetime.fromtimestamp(timestamp)
qset = qset.filter(
models.Q(created__gt=timestamp) |
models.Q(updated__gt=timestamp)
)
if version is not None:
qset = qset.filter(version__gt=version)
if not deleted:
qset = qset.undeleted()
return qset | Queries the database for objects updated since timestamp or version
Arguments:
timestamp <DateTime=None|int=None> if specified return all objects modified since
that specified time. If integer is submitted it is treated like a unix timestamp
version <int=None> if specified return all objects with a version greater
then the one specified
deleted <bool=False> if true include soft-deleted objects in the result
Either timestamp or version needs to be provided |
def validate(in_, options=None):
obj_json = json.load(in_)
results = validate_parsed_json(obj_json, options)
return results | Validate objects from JSON data in a textual stream.
:param in_: A textual stream of JSON data.
:param options: Validation options
:return: An ObjectValidationResults instance, or a list of such. |
def add_it(workbench, file_list, labels):
md5s = []
for filename in file_list:
if filename != '.DS_Store':
with open(filename, 'rb') as pe_file:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')
workbench.add_node(md5, md5[:6], labels)
md5s.append(md5)
return md5s | Add the given file_list to workbench as samples, also add them as nodes.
Args:
workbench: Instance of Workbench Client.
file_list: list of files.
labels: labels for the nodes.
Returns:
A list of md5s. |
def _dedup(items, insensitive):
deduped = []
if insensitive:
i_deduped = []
for item in items:
lowered = item.lower()
if lowered not in i_deduped:
deduped.append(item)
i_deduped.append(lowered)
else:
for item in items:
if item not in deduped:
deduped.append(item)
return deduped | Deduplicate an item list, and preserve order.
For case-insensitive lists, drop items if they case-insensitively match
a prior item. |
def save_data(self, trigger_id, **data):
title, content = super(ServiceEvernote, self).save_data(trigger_id, **data)
trigger = Evernote.objects.get(trigger_id=trigger_id)
note_store = self._notestore(trigger_id, data)
if isinstance(note_store, evernote.api.client.Store):
note = self._notebook(trigger, note_store)
note = self._attributes(note, data)
content = self._footer(trigger, data, content)
note.title = limit_content(title, 255)
note = self._content(note, content)
return EvernoteMgr.create_note(note_store, note, trigger_id, data)
else:
return note_store | let's save the data
don't want to handle empty title nor content
otherwise this will produce an Exception by
the Evernote's API
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean |
def name(self):
return ffi.string(lib.EnvGetDefmessageHandlerName(
self._env, self._cls, self._idx)).decode() | MessageHandler name. |
def _jit_predict_fun(model_predict, num_devices):
def predict(x, params=(), rng=None):
if num_devices == 1:
return backend.jit(model_predict)(x, params, rng=rng)
@functools.partial(backend.pmap, axis_name="batch")
def mapped_predict(x, params, rng):
return model_predict(x, params, rng=rng)
pred = mapped_predict(
reshape_by_device(x, num_devices),
params,
jax_random.split(rng, num_devices))
if not isinstance(x, (list, tuple)):
batch_size = x.shape[0]
return np.reshape(pred, [batch_size] + list(pred.shape[2:]))
batch_size = x[0].shape[0]
return [np.reshape(p, [batch_size] + list(p.shape[2:])) for p in pred]
return predict | Use jit on model_predict if required. |
def log_weights(self):
m = self.kernel.feature_log_prob_[self._match_class_pos()]
u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]
return self._prob_inverse_transform(m - u) | Log weights as described in the FS framework. |
def create_assembly_instance(self, assembly_uri, part_uri, configuration):
payload = {
"documentId": part_uri["did"],
"elementId": part_uri["eid"],
"versionId": part_uri["wvm"],
"isAssembly": False,
"isWholePartStudio": True,
"configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration)
}
return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] +
'/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload) | Insert a configurable part into an assembly.
Args:
- assembly (dict): eid, wid, and did of the assembly into which will be inserted
- part (dict): eid and did of the configurable part
- configuration (dict): the configuration
Returns:
- requests.Response: Onshape response data |
def flaskify(response, headers=None, encoder=None):
status_code = response.status
data = response.errors or response.message
mimetype = 'text/plain'
if isinstance(data, list) or isinstance(data, dict):
mimetype = 'application/json'
data = json.dumps(data, cls=encoder)
return flask.Response(
response=data, status=status_code, headers=headers, mimetype=mimetype) | Format the response to be consumeable by flask.
The api returns mostly JSON responses. The format method converts the dicts
into a json object (as a string), and the right response is returned (with
the valid mimetype, charset and status.)
Args:
response (Response): The dictionary object to convert into a json
object. If the value is a string, a dictionary is created with the
key "message".
headers (dict): optional headers for the flask response.
encoder (Class): The class of the encoder (if any).
Returns:
flask.Response: The flask response with formatted data, headers, and
mimetype. |
def add_letter_to_axis(ax, let, col, x, y, height):
if len(let) == 2:
colors = [col, "white"]
elif len(let) == 1:
colors = [col]
else:
raise ValueError("3 or more Polygons are not supported")
for polygon, color in zip(let, colors):
new_polygon = affinity.scale(
polygon, yfact=height, origin=(0, 0, 0))
new_polygon = affinity.translate(
new_polygon, xoff=x, yoff=y)
patch = PolygonPatch(
new_polygon, edgecolor=color, facecolor=color)
ax.add_patch(patch)
return | Add 'let' with position x,y and height height to matplotlib axis 'ax'. |
def init_sources(path):
for f in dir_list(path):
if(os.path.splitext(f)[1][1:] == config.source_ext):
print "Source file discovered: %s" % (f)
script = Script(f)
if (script.filename not in config.sources.keys()):
config.sources[script.path] = script
parse.parse_dependencies(script,script) | initializes array of groups and their associated js files |
def create_sync_ops(self, host_device):
sync_ops = []
host_params = self.params_device[host_device]
for device, params in (self.params_device).iteritems():
if device == host_device:
continue
for k in self.params_names:
if isinstance(params[k], tf.Variable):
sync_ops += [tf.assign(params[k], host_params[k])]
return sync_ops | Create an assignment operation for each weight on all devices. The
weight is assigned the value of the copy on the `host_device'. |
def _handle_ssh_callback(self, submission_id, host, port, password):
if host is not None:
obj = {
"ssh_host": host,
"ssh_port": port,
"ssh_password": password
}
self._database.submissions.update_one({"_id": submission_id}, {"$set": obj}) | Handles the creation of a remote ssh server |
def get_thread_id(self):
if self._id is None:
for thread_id, obj in list(threading._active.items()):
if obj is self:
self._id = thread_id
return self._id | Return thread id |
def GetTopicsTree(self):
if self.topics is None:
return None
if self.topics:
res, ui = chmlib.chm_resolve_object(self.file, self.topics)
if (res != chmlib.CHM_RESOLVE_SUCCESS):
return None
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
sys.stderr.write('GetTopicsTree: file size = 0\n')
return None
return text | Reads and returns the topics tree.
This auxiliary function reads and returns the topics tree file
contents for the CHM archive. |
def print_images(self, *printable_images):
printable_image = reduce(lambda x, y: x.append(y), list(printable_images))
self.print_image(printable_image) | This method allows printing several images in one shot. This is useful if the client code does not want the
printer to make pause during printing |
def delete_rows(self, condition, info_str=None):
self.df['num'] = list(range(len(self.df)))
df_data = self.df
if len(df_data[condition]) > 0:
inds = df_data[condition]['num']
for ind in inds[::-1]:
df_data = self.delete_row(ind)
if info_str:
print("-I- Deleting {}. ".format(info_str), end=' ')
print('deleting row {}'.format(str(ind)))
df_data.sort_index(inplace=True)
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data | delete all rows with condition==True
inplace
Parameters
----------
condition : pandas DataFrame indexer
all self.df rows that meet this condition will be deleted
info_str : str
description of the kind of rows to be deleted,
e.g "specimen rows with blank method codes"
Returns
--------
df_data : pandas DataFrame
updated self.df |
def _parseStats(self, lines, parse_slabs = False):
info_dict = {}
info_dict['slabs'] = {}
for line in lines:
mobj = re.match('^STAT\s(\w+)\s(\S+)$', line)
if mobj:
info_dict[mobj.group(1)] = util.parse_value(mobj.group(2), True)
continue
elif parse_slabs:
mobj = re.match('STAT\s(\w+:)?(\d+):(\w+)\s(\S+)$', line)
if mobj:
(slab, key, val) = mobj.groups()[-3:]
if not info_dict['slabs'].has_key(slab):
info_dict['slabs'][slab] = {}
info_dict['slabs'][slab][key] = util.parse_value(val, True)
return info_dict | Parse stats output from memcached and return dictionary of stats-
@param lines: Array of lines of input text.
@param parse_slabs: Parse slab stats if True.
@return: Stats dictionary. |
def is_default_port(self):
if self.port is None:
return False
default = DEFAULT_PORTS.get(self.scheme)
if default is None:
return False
return self.port == default | A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise. |
def y_axis_transform(compound, new_origin=None,
point_on_y_axis=None,
point_on_xy_plane=None):
x_axis_transform(compound, new_origin=new_origin,
point_on_x_axis=point_on_y_axis,
point_on_xy_plane=point_on_xy_plane)
rotate_around_z(compound, np.pi / 2) | Move a compound such that the y-axis lies on specified points.
Parameters
----------
compound : mb.Compound
The compound to move.
new_origin : mb.Compound or like-like of size 3, optional, default=[0.0, 0.0, 0.0]
Where to place the new origin of the coordinate system.
point_on_y_axis : mb.Compound or list-like of size 3, optional, default=[0.0, 1.0, 0.0]
A point on the new y-axis.
point_on_xy_plane : mb.Compound or list-like of size 3, optional, default=[0.0, 1.0, 0.0]
A point on the new xy-plane. |
def create_account(
self,
balance=0,
address=None,
concrete_storage=False,
dynamic_loader=None,
creator=None,
) -> Account:
address = address if address else self._generate_new_address(creator)
new_account = Account(
address,
balance=balance,
dynamic_loader=dynamic_loader,
concrete_storage=concrete_storage,
)
self._put_account(new_account)
return new_account | Create non-contract account.
:param address: The account's address
:param balance: Initial balance for the account
:param concrete_storage: Interpret account storage as concrete
:param dynamic_loader: used for dynamically loading storage from the block chain
:return: The new account |
def list_editors(self, node=None):
return [editor_node.editor for editor_node in self.list_editor_nodes(node) if editor_node.editor] | Returns the Model editors.
:param node: Node to start walking from.
:type node: AbstractNode or AbstractCompositeNode or Object
:return: Editors.
:rtype: list |
def parse_extension_item_param(
header: str, pos: int, header_name: str
) -> Tuple[ExtensionParameter, int]:
name, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
value: Optional[str] = None
if peek_ahead(header, pos) == "=":
pos = parse_OWS(header, pos + 1)
if peek_ahead(header, pos) == '"':
pos_before = pos
value, pos = parse_quoted_string(header, pos, header_name)
if _token_re.fullmatch(value) is None:
raise InvalidHeaderFormat(
header_name, "invalid quoted header content", header, pos_before
)
else:
value, pos = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
return (name, value), pos | Parse a single extension parameter from ``header`` at the given position.
Return a ``(name, value)`` pair and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. |
def find_similar(self, *args, **kwargs):
if self.session is not None and self.autosession:
self.commit()
return self.stable.find_similar(*args, **kwargs) | Find similar articles.
With autosession off, use the index state *before* current session started,
so that changes made in the session will not be visible here. With autosession
on, close the current session first (so that session changes *are* committed
and visible). |
def set_thread_params(
self, enable=None, count=None, count_offload=None, stack_size=None, no_wait=None):
self._set('enable-threads', enable, cast=bool)
self._set('no-threads-wait', no_wait, cast=bool)
self._set('threads', count)
self._set('offload-threads', count_offload)
if count:
self._section.print_out('Threads per worker: %s' % count)
self._set('threads-stacksize', stack_size)
return self._section | Sets threads related params.
:param bool enable: Enable threads in the embedded languages.
This will allow to spawn threads in your app.
.. warning:: Threads will simply *not work* if this option is not enabled.
There will likely be no error, just no execution of your thread code.
:param int count: Run each worker in prethreaded mode with the specified number
of threads per worker.
.. warning:: Do not use with ``gevent``.
.. note:: Enables threads automatically.
:param int count_offload: Set the number of threads (per-worker) to spawn
for offloading. Default: 0.
These threads run such tasks in a non-blocking/evented way allowing
for a huge amount of concurrency. Various components of the uWSGI stack
are offload-friendly.
.. note:: Try to set it to the number of CPU cores to take advantage of SMP.
* http://uwsgi-docs.readthedocs.io/en/latest/OffloadSubsystem.html
:param int stack_size: Set threads stacksize.
:param bool no_wait: Do not wait for threads cancellation on quit/reload. |
def scientific(number, operation, number2=None, logbase=10):
if operation == 'log':
return math.log(number, logbase)
elif operation == 'acos':
return math.acos(number)
elif operation == 'asin':
return math.asin(number)
elif operation == 'atan':
return math.atan(number)
elif operation == 'cos':
return math.cos(number)
elif operation == 'hypot':
return math.hypot(number, number2)
elif operation == 'sin':
return math.sin(number)
elif operation == 'tan':
return math.tan(number) | Solve scientific operations manually |
def get_rotation_program(pauli_term: PauliTerm) -> Program:
meas_basis_change = Program()
for index, gate in pauli_term:
if gate == 'X':
meas_basis_change.inst(RY(-np.pi / 2, index))
elif gate == 'Y':
meas_basis_change.inst(RX(np.pi / 2, index))
elif gate == 'Z':
pass
else:
raise ValueError()
return meas_basis_change | Generate a rotation program so that the pauli term is diagonal.
:param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations.
:return: The rotation program. |
def clean_perms(self):
logging.info('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(ab_models.PermissionView)
.filter(or_(
ab_models.PermissionView.permission == None,
ab_models.PermissionView.view_menu == None,
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logging.info('Deleted {} faulty permissions'.format(deleted_count)) | FAB leaves faulty permissions that need to be cleaned up |
def attempt_dev_link_via_import(self, egg):
try:
imported = __import__(egg)
except ImportError:
self.logger.warn("Tried importing '%s', but that also didn't work.", egg)
self.logger.debug("For reference, sys.path is %s", sys.path)
return
self.logger.info("Importing %s works, however", egg)
try:
probable_location = os.path.dirname(imported.__file__)
except:
self.logger.exception("Determining the location failed, however")
return
filesystem_egg_link = os.path.join(
self.dev_egg_dir,
'%s.egg-link' % egg)
f = open(filesystem_egg_link, 'w')
f.write(probable_location)
f.close()
self.logger.info('Using sysegg %s for %s', probable_location, egg)
self.added.append(filesystem_egg_link)
return True | Create egg-link to FS location if an egg is found through importing.
Sometimes an egg *is* installed, but without a proper egg-info file.
So we attempt to import the egg in order to return a link anyway.
TODO: currently it only works with simple package names like
"psycopg2" and "mapnik". |
def authorize_url(self, state=''):
url = 'https://openapi.youku.com/v2/oauth2/authorize?'
params = {
'client_id': self.client_id,
'response_type': 'code',
'state': state,
'redirect_uri': self.redirect_uri
}
return url + urlencode(params) | return user authorize url |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.