code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def delete(community):
deleteform = DeleteCommunityForm(formdata=request.values)
ctx = mycommunities_ctx()
ctx.update({
'deleteform': deleteform,
'is_new': False,
'community': community,
})
if deleteform.validate_on_submit():
community.delete()
db.session.commit()
flash("Community was deleted.", category='success')
return redirect(url_for('.index'))
else:
flash("Community could not be deleted.", category='warning')
return redirect(url_for('.edit', community_id=community.id)) | Delete a community. |
def write_log(self, message):
if self.stream_log and not self.ended:
self.stream_log.write(message)
return True | Proxy method for GeneralLogger. |
def convert(cls, tree):
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
if isinstance(tree, MetricalTree):
return cls(tree._cat, children, tree._dep, tree._lstress)
elif isinstance(tree, DependencyTree):
return cls(tree._cat, children, tree._dep)
else:
return cls(tree._label, children)
else:
return tree | Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree. |
def create_report(self):
data = self.create_data()
try:
json_string = json.dumps(data)
except UnicodeDecodeError as e:
log.error('ERROR: While preparing JSON:', exc_info=e)
self.debug_bad_encoding(data)
raise
log_string = re.sub(r'"repo_token": "(.+?)"',
'"repo_token": "[secure]"', json_string)
log.debug(log_string)
log.debug('==\nReporting %s files\n==\n', len(data['source_files']))
for source_file in data['source_files']:
log.debug('%s - %s/%s', source_file['name'],
sum(filter(None, source_file['coverage'])),
len(source_file['coverage']))
return json_string | Generate json dumped report for coveralls api. |
def count_init(self):
if self.hidden_state_trajectories is None:
raise RuntimeError('HMM model does not have a hidden state trajectory.')
n = [traj[0] for traj in self.hidden_state_trajectories]
return np.bincount(n, minlength=self.nstates) | Compute the counts at the first time step
Returns
-------
n : ndarray(nstates)
n[i] is the number of trajectories starting in state i |
def f_inv(self, z, max_iterations=250, y=None):
z = z.copy()
y = np.ones_like(z)
it = 0
update = np.inf
while np.abs(update).sum() > 1e-10 and it < max_iterations:
fy = self.f(y)
fgrady = self.fgrad_y(y)
update = (fy - z) / fgrady
y -= self.rate * update
it += 1
return y | Calculate the numerical inverse of f. This should be
overwritten for specific warping functions where the
inverse can be found in closed form.
:param max_iterations: maximum number of N.R. iterations |
def get_error(self):
col_offset = -1
if self.node is not None:
try:
col_offset = self.node.col_offset
except AttributeError:
pass
try:
exc_name = self.exc.__name__
except AttributeError:
exc_name = str(self.exc)
if exc_name in (None, 'None'):
exc_name = 'UnknownError'
out = [" %s" % self.expr]
if col_offset > 0:
out.append(" %s^^^" % ((col_offset)*' '))
out.append(str(self.msg))
return (exc_name, '\n'.join(out)) | Retrieve error data. |
def rAsciiLine(ifile):
_line = ifile.readline().strip()
while len(_line) == 0:
_line = ifile.readline().strip()
return _line | Returns the next non-blank line in an ASCII file. |
def mime_type(self, type_: Optional[MimeType] = None) -> str:
key = self._validate_enum(item=type_, enum=MimeType)
types = MIME_TYPES[key]
return self.random.choice(types) | Get a random mime type from list.
:param type_: Enum object MimeType.
:return: Mime type. |
def inverse(self):
result = self.__class__()
result.forward = copy.copy(self.reverse)
result.reverse = copy.copy(self.forward)
return result | Returns the inverse bijection. |
def imports(symbol_file, input_names, param_file=None, ctx=None):
sym = symbol.load(symbol_file)
if isinstance(input_names, str):
input_names = [input_names]
inputs = [symbol.var(i) for i in input_names]
ret = SymbolBlock(sym, inputs)
if param_file is not None:
ret.collect_params().load(param_file, ctx=ctx)
return ret | Import model previously saved by `HybridBlock.export` or
`Module.save_checkpoint` as a SymbolBlock for use in Gluon.
Parameters
----------
symbol_file : str
Path to symbol file.
input_names : list of str
List of input variable names
param_file : str, optional
Path to parameter file.
ctx : Context, default None
The context to initialize SymbolBlock on.
Returns
-------
SymbolBlock
SymbolBlock loaded from symbol and parameter files.
Examples
--------
>>> net1 = gluon.model_zoo.vision.resnet18_v1(
... prefix='resnet', pretrained=True)
>>> net1.hybridize()
>>> x = mx.nd.random.normal(shape=(1, 3, 32, 32))
>>> out1 = net1(x)
>>> net1.export('net1', epoch=1)
>>>
>>> net2 = gluon.SymbolBlock.imports(
... 'net1-symbol.json', ['data'], 'net1-0001.params')
>>> out2 = net2(x) |
def list_roles(self, mount_point=DEFAULT_MOUNT_POINT):
api_path = '/v1/auth/{mount_point}/roles'.format(mount_point=mount_point)
response = self._adapter.list(
url=api_path
)
return response.json().get('data') | List all the roles that are registered with the plugin.
Supported methods:
LIST: /auth/{mount_point}/roles. Produces: 200 application/json
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The "data" key from the JSON response of the request.
:rtype: dict |
def takeoff(self):
self.send(at.REF(at.REF.input.start)) | Sends the takeoff command. |
def getStringTrackedDeviceProperty(self, unDeviceIndex, prop):
fn = self.function_table.getStringTrackedDeviceProperty
pError = ETrackedPropertyError()
unRequiredBufferLen = fn( unDeviceIndex, prop, None, 0, byref(pError) )
if unRequiredBufferLen == 0:
return b""
pchBuffer = ctypes.create_string_buffer(unRequiredBufferLen)
fn( unDeviceIndex, prop, pchBuffer, unRequiredBufferLen, byref(pError) )
if pError.value != TrackedProp_Success:
raise OpenVRError(str(pError))
sResult = bytes(pchBuffer.value)
return sResult | Returns a string property. If the device index is not valid or the property is not a string type this function will
return 0. Otherwise it returns the length of the number of bytes necessary to hold this string including the trailing
null. Strings will always fit in buffers of k_unMaxPropertyStringSize characters. |
def show(self, username):
filter = ['(objectclass=posixAccount)', "(uid={})".format(username)]
return self.client.search(filter) | Return a specific user's info in LDIF format. |
def split_arg_string(string):
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv | Given an argument string this attempts to split it into small parts. |
def batch_persist(dfs, tables, *args, **kwargs):
from .delay import Delay
if 'async' in kwargs:
kwargs['async_'] = kwargs['async']
execute_keys = ('ui', 'async_', 'n_parallel', 'timeout', 'close_and_notify')
execute_kw = dict((k, v) for k, v in six.iteritems(kwargs) if k in execute_keys)
persist_kw = dict((k, v) for k, v in six.iteritems(kwargs) if k not in execute_keys)
delay = Delay()
persist_kw['delay'] = delay
for df, table in izip(dfs, tables):
if isinstance(table, tuple):
table, partition = table
else:
partition = None
df.persist(table, partition=partition, *args, **persist_kw)
return delay.execute(**execute_kw) | Persist multiple DataFrames into ODPS.
:param dfs: DataFrames to persist.
:param tables: Table names to persist to. Use (table, partition) tuple to store to a table partition.
:param args: args for Expr.persist
:param kwargs: kwargs for Expr.persist
:Examples:
>>> DataFrame.batch_persist([df1, df2], ['table_name1', ('table_name2', 'partition_name2')], lifecycle=1) |
def delete(self, del_id):
if MReply2User.delete(del_id):
output = {'del_zan': 1}
else:
output = {'del_zan': 0}
return json.dump(output, self) | Delete the id |
def latlon_round(latlon, spacing=1000):
g = latlon_to_grid(latlon)
g.easting = (g.easting // spacing) * spacing
g.northing = (g.northing // spacing) * spacing
return g.latlon() | round to nearest grid corner |
def load_transactions(input_file, **kwargs):
delimiter = kwargs.get('delimiter', '\t')
for transaction in csv.reader(input_file, delimiter=delimiter):
yield transaction if transaction else [''] | Load transactions and returns a generator for transactions.
Arguments:
input_file -- An input file.
Keyword arguments:
delimiter -- The delimiter of the transaction. |
def update(self):
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False | Called before matching and building to keep the compiled rules
in the correct order after things changed. |
def DeleteCampaignFeed(client, campaign_feed):
campaign_feed_service = client.GetService('CampaignFeedService', 'v201809')
operation = {
'operand': campaign_feed,
'operator': 'REMOVE'
}
campaign_feed_service.mutate([operation]) | Deletes a campaign feed.
Args:
client: an AdWordsClient instance.
campaign_feed: the campaign feed to delete. |
def ruamel_structure(data, validator=None):
if isinstance(data, dict):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedMap(
[
(ruamel_structure(key), ruamel_structure(value))
for key, value in data.items()
]
)
elif isinstance(data, list):
if len(data) == 0:
raise exceptions.CannotBuildDocumentsFromEmptyDictOrList(
"Document must be built with non-empty dicts and lists"
)
return CommentedSeq([ruamel_structure(item) for item in data])
elif isinstance(data, bool):
return u"yes" if data else u"no"
elif isinstance(data, (int, float)):
return str(data)
else:
if not is_string(data):
raise exceptions.CannotBuildDocumentFromInvalidData(
(
"Document must be built from a combination of:\n"
"string, int, float, bool or nonempty list/dict\n\n"
"Instead, found variable with type '{}': '{}'"
).format(type(data).__name__, data)
)
return data | Take dicts and lists and return a ruamel.yaml style
structure of CommentedMaps, CommentedSeqs and
data.
If a validator is presented and the type is unknown,
it is checked against the validator to see if it will
turn it back in to YAML. |
def save(self, path="speech"):
if self._data is None:
raise Exception("There's nothing to save")
extension = "." + self.__params["format"]
if os.path.splitext(path)[1] != extension:
path += extension
with open(path, "wb") as f:
for d in self._data:
f.write(d)
return path | Save data in file.
Args:
path (optional): A path to save file. Defaults to "speech".
File extension is optional. Absolute path is allowed.
Returns:
The path to the saved file. |
def date_time_between_dates(
self,
datetime_start=None,
datetime_end=None,
tzinfo=None):
if datetime_start is None:
datetime_start = datetime.now(tzinfo)
if datetime_end is None:
datetime_end = datetime.now(tzinfo)
timestamp = self.generator.random.randint(
datetime_to_timestamp(datetime_start),
datetime_to_timestamp(datetime_end),
)
try:
if tzinfo is None:
pick = datetime.fromtimestamp(timestamp, tzlocal())
pick = pick.astimezone(tzutc()).replace(tzinfo=None)
else:
pick = datetime.fromtimestamp(timestamp, tzinfo)
except OverflowError:
raise OverflowError(
"You specified an end date with a timestamp bigger than the maximum allowed on this"
" system. Please specify an earlier date.",
)
return pick | Takes two DateTime objects and returns a random datetime between the two
given datetimes.
Accepts DateTime objects.
:param datetime_start: DateTime
:param datetime_end: DateTime
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime |
def get_queryset(self):
kwargs = {}
if self.start_at:
kwargs.update({'%s__gte' % self.date_field: self.start_at})
return super(DateRangeMixin, self).get_queryset().filter(**kwargs) | Implements date range filtering on ``created_at`` |
def pair_tree_creator(meta_id):
chunks = []
for x in range(0, len(meta_id)):
if x % 2:
continue
if (len(meta_id) - 1) == x:
chunk = meta_id[x]
else:
chunk = meta_id[x: x + 2]
chunks.append(chunk)
return os.sep + os.sep.join(chunks) + os.sep | Splits string into a pairtree path. |
def _init_predictor(self, input_shapes, type_dict=None):
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec | Initialize the predictor module for running prediction. |
def get_image(path, search_path):
if path.startswith('@'):
return StaticImage(path[1:], search_path)
if path.startswith('//') or '://' in path:
return RemoteImage(path, search_path)
if os.path.isabs(path):
file_path = utils.find_file(os.path.relpath(
path, '/'), config.content_folder)
else:
file_path = utils.find_file(path, search_path)
if not file_path:
return ImageNotFound(path, search_path)
record = _get_asset(file_path)
if record.is_asset:
return FileAsset(record, search_path)
return LocalImage(record, search_path) | Get an Image object. If the path is given as absolute, it will be
relative to the content directory; otherwise it will be relative to the
search path.
path -- the image's filename
search_path -- a search path for the image (string or list of strings) |
async def get(self, key, default=None, loads_fn=None, namespace=None, _conn=None):
start = time.monotonic()
loads = loads_fn or self._serializer.loads
ns_key = self.build_key(key, namespace=namespace)
value = loads(await self._get(ns_key, encoding=self.serializer.encoding, _conn=_conn))
logger.debug("GET %s %s (%.4f)s", ns_key, value is not None, time.monotonic() - start)
return value if value is not None else default | Get a value from the cache. Returns default if not found.
:param key: str
:param default: obj to return when key is not found
:param loads_fn: callable alternative to use as loads function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: obj loaded
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout |
def unique(enumeration):
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration | Class decorator that ensures only unique members exist in an enumeration. |
def _gen_delta_per_sec(self, path, value_delta, time_delta, multiplier,
prettyname, device):
if time_delta < 0:
return
value = (value_delta / time_delta) * multiplier
if value > 0.0:
self._replace_and_publish(path, prettyname, value, device) | Calulates the difference between to point, and scales is to per second. |
def files_info(self, area_uuid, file_list):
path = "/area/{uuid}/files_info".format(uuid=area_uuid)
file_list = [urlparse.quote(filename) for filename in file_list]
response = self._make_request('put', path=path, json=file_list)
return response.json() | Get information about files
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param list file_list: The names the files in the Upload Area about which we want information
:return: an array of file information dicts
:rtype: list of dicts
:raises UploadApiException: if information could not be obtained |
def serialize(self, include_class=True, save_dynamic=False, **kwargs):
registry = kwargs.pop('registry', None)
if registry is None:
registry = dict()
if not registry:
root = True
registry.update({'__root__': self.uid})
else:
root = False
key = self.uid
if key not in registry:
registry.update({key: None})
registry.update({key: super(HasUID, self).serialize(
registry=registry,
include_class=include_class,
save_dynamic=save_dynamic,
**kwargs
)})
if root:
return registry
return key | Serialize nested HasUID instances to a flat dictionary
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* You may also specify a **registry** - This is the flat dictionary
where UID/HasUID pairs are stored. By default, no registry need
be provided; a new dictionary will be created.
* Any other keyword arguments will be passed through to the Property
serializers. |
def set_maxrad(self,newrad):
if not isinstance(newrad, Quantity):
newrad = newrad * u.arcsec
for pop in self.poplist:
if not pop.is_specific:
try:
pop.maxrad = newrad
except AttributeError:
pass | Sets max allowed radius in populations.
Doesn't operate via the :class:`stars.Constraint`
protocol; rather just rescales the sky positions
for the background objects and recalculates
sky area, etc. |
def get_family_nodes(self, family_id, ancestor_levels, descendant_levels, include_siblings):
return objects.FamilyNode(self.get_family_node_ids(
family_id=family_id,
ancestor_levels=ancestor_levels,
descendant_levels=descendant_levels,
include_siblings=include_siblings)._my_map, runtime=self._runtime, proxy=self._proxy) | Gets a portion of the hierarchy for the given family.
arg: family_id (osid.id.Id): the ``Id`` to query
arg: ancestor_levels (cardinal): the maximum number of
ancestor levels to include. A value of 0 returns no
parents in the node.
arg: descendant_levels (cardinal): the maximum number of
descendant levels to include. A value of 0 returns no
children in the node.
arg: include_siblings (boolean): ``true`` to include the
siblings of the given node, ``false`` to omit the
siblings
return: (osid.relationship.FamilyNode) - a family node
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def FreqDist_chart (self):
pdata = {}
for idx, s_name in enumerate(self.tagdir_data['FreqDistribution']):
pdata[s_name] = {}
for x, y in self.tagdir_data['FreqDistribution'][s_name].items():
try:
pdata[s_name][math.log(float(x))] = y
except ValueError:
pass
pconfig = {
'id': 'FreqDistribution',
'title': 'Frequency Distribution',
'ylab': 'Fraction of Reads',
'xlab': 'Log10(Distance between regions)',
'data_labels': ['Reads', 'Percent'],
'smooth_points': 500,
'smooth_points_sumcounts': False,
'yLog' : True
}
return linegraph.plot(pdata, pconfig) | Make the petag.FreqDistribution_1000 plot |
def OnAdjustVolume(self, event):
self.volume = self.player.audio_get_volume()
if event.GetWheelRotation() < 0:
self.volume = max(0, self.volume-10)
elif event.GetWheelRotation() > 0:
self.volume = min(200, self.volume+10)
self.player.audio_set_volume(self.volume) | Changes video volume |
def peek(rlp, index, sedes=None):
ll = decode_lazy(rlp)
if not isinstance(index, Iterable):
index = [index]
for i in index:
if isinstance(ll, Atomic):
raise IndexError('Too many indices given')
ll = ll[i]
if sedes:
return sedes.deserialize(ll)
else:
return ll | Get a specific element from an rlp encoded nested list.
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
necessary parts of the string.
Usage example::
>>> import rlp
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
1
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
3
:param rlp: the rlp string
:param index: the index of the element to peek at (can be a list for
nested data)
:param sedes: a sedes used to deserialize the peeked at object, or `None`
if no deserialization should be performed
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
levels) |
def fake_KATCP_client_resource_container_factory(
KATCPClientResourceContainerClass, fake_options, resources_spec,
*args, **kwargs):
allow_any_request = fake_options.get('allow_any_request', False)
class FakeKATCPClientResourceContainer(KATCPClientResourceContainerClass):
def __init__(self, *args, **kwargs):
self.fake_client_resource_managers = {}
super(FakeKATCPClientResourceContainer, self).__init__(*args, **kwargs)
def client_resource_factory(self, res_spec, parent, logger):
real_instance = (super(FakeKATCPClientResourceContainer, self)
.client_resource_factory(res_spec, parent, logger) )
fkcr, fkcr_manager = fake_KATCP_client_resource_factory(
real_instance.__class__, fake_options,
res_spec, parent=self, logger=logger)
self.fake_client_resource_managers[
resource.escape_name(fkcr.name)] = fkcr_manager
return fkcr
fkcrc = FakeKATCPClientResourceContainer(resources_spec, *args, **kwargs)
fkcrc_manager = FakeKATCPClientResourceContainerManager(fkcrc)
return (fkcrc, fkcrc_manager) | Create a fake KATCPClientResourceContainer-like class and a fake-manager
Parameters
----------
KATCPClientResourceContainerClass : class
Subclass of :class:`katcp.resource_client.KATCPClientResourceContainer`
fake_options : dict
Options for the faking process. Keys:
allow_any_request : bool, default False
(TODO not implemented behaves as if it were True)
resources_spec, *args, **kwargs : passed to KATCPClientResourceContainerClass
A subclass of the passed-in KATCPClientResourceClassContainer is created that replaces the
KATCPClientResource child instances with fakes using fake_KATCP_client_resource_factory()
based on the KATCPClientResource class used by `KATCPClientResourceContainerClass`.
Returns
-------
(fake_katcp_client_resource_container, fake_katcp_client_resource_container_manager):
fake_katcp_client_resource_container : instance of faked subclass of
KATCPClientResourceContainerClass
fake_katcp_client_resource_manager : :class:`FakeKATCPClientResourceContainerManager`
instance
Bound to the `fake_katcp_client_resource_container` instance. |
def get_subspace(self, dims):
subspace = []
k = 0
for variable in self.space_expanded:
if k in dims:
subspace.append(variable)
k += variable.dimensionality_in_model
return subspace | Extracts subspace from the reference of a list of variables in the inputs
of the model. |
async def open_websocket_client(sock: anyio.abc.SocketStream,
addr,
path: str,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
ws = await create_websocket_client(
sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols)
try:
yield ws
finally:
await ws.close() | Create a websocket on top of a socket. |
def _linux_nqn():
ret = []
initiator = '/etc/nvme/hostnqn'
try:
with salt.utils.files.fopen(initiator, 'r') as _nvme:
for line in _nvme:
line = line.strip()
if line.startswith('nqn.'):
ret.append(line)
except IOError as ex:
if ex.errno != errno.ENOENT:
log.debug("Error while accessing '%s': %s", initiator, ex)
return ret | Return NVMe NQN from a Linux host. |
def get_all_instances(include_fastboot=False):
if include_fastboot:
serial_list = list_adb_devices() + list_fastboot_devices()
return get_instances(serial_list)
return get_instances(list_adb_devices()) | Create AndroidDevice instances for all attached android devices.
Args:
include_fastboot: Whether to include devices in bootloader mode or not.
Returns:
A list of AndroidDevice objects each representing an android device
attached to the computer. |
def _convert_markup_basic(self, soup):
meta = soup.new_tag('meta', charset='UTF-8')
soup.insert(0, meta)
css = "".join([
INSTRUCTIONS_HTML_INJECTION_PRE,
self._mathjax_cdn_url,
INSTRUCTIONS_HTML_INJECTION_AFTER])
css_soup = BeautifulSoup(css)
soup.append(css_soup)
while soup.find('text'):
soup.find('text').name = 'p'
while soup.find('heading'):
heading = soup.find('heading')
heading.name = 'h%s' % heading.attrs.get('level', '1')
while soup.find('code'):
soup.find('code').name = 'pre'
while soup.find('list'):
list_ = soup.find('list')
type_ = list_.attrs.get('bullettype', 'numbers')
list_.name = 'ol' if type_ == 'numbers' else 'ul' | Perform basic conversion of instructions markup. This includes
replacement of several textual markup tags with their HTML equivalents.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup |
def create_feature_map(features, feature_indices, output_dir):
feature_map = []
for name, info in feature_indices:
transform_name = features[name]['transform']
source_column = features[name]['source_column']
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
feature_map.append((info['index_start'], name))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, _ = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
for i, word in enumerate(vocab):
if transform_name == ONE_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s=%s' % (source_column, word)))
elif transform_name == MULTI_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s has "%s"' % (source_column, word)))
elif transform_name == IMAGE_TRANSFORM:
for i in range(info['size']):
feature_map.append((info['index_start'] + i, '%s image feature %d' % (source_column, i)))
return feature_map | Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)] |
def hasFeature(featureList, feature):
for f in featureList:
if f[0] == feature or Features[f[0]] == feature:
return f[1] | return the controlCode for a feature or None
@param feature: feature to look for
@param featureList: feature list as returned by L{getFeatureRequest()}
@return: feature value or None |
def update(self, other, **kwargs):
from itertools import chain
if hasattr(other, 'items'):
other = other.items()
for (k, v) in chain(other, kwargs.items()):
if (
k not in self or
self[k] != v
):
self[k] = v | Version of ``update`` that doesn't clobber the database so much |
def get_thermostat_state_by_name(self, name):
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None) | Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object |
def handle_captcha(self, query_params: dict,
html: str,
login_data: dict) -> requests.Response:
check_url = get_base_url(html)
captcha_url = '{}?s={}&sid={}'.format(self.CAPTCHA_URI,
query_params['s'],
query_params['sid'])
login_data['captcha_sid'] = query_params['sid']
login_data['captcha_key'] = input(self.CAPTCHA_INPUT_PROMPT
.format(captcha_url))
return self.post(check_url, login_data) | Handling CAPTCHA request |
def channels_history(self, room_id, **kwargs):
return self.__call_api_get('channels.history', roomId=room_id, kwargs=kwargs) | Retrieves the messages from a channel. |
def create_cluster_meta(cluster_groups):
meta = ClusterMeta()
meta.add_field('group')
cluster_groups = cluster_groups or {}
data = {c: {'group': v} for c, v in cluster_groups.items()}
meta.from_dict(data)
return meta | Return a ClusterMeta instance with cluster group support. |
def convert_stress_to_mass(q, width, length, gravity):
mass = q * width * length / gravity
return mass | Converts a foundation stress to an equivalent mass.
:param q: applied stress [Pa]
:param width: foundation width [m]
:param length: foundation length [m]
:param gravity: applied gravitational acceleration [m/s2]
:return: |
def is_instance_avg_req_latency_too_high(self, inst_id):
avg_lat, avg_lat_others = self.getLatencies()
if not avg_lat or not avg_lat_others:
return False
d = avg_lat - avg_lat_others
if d < self.Omega:
return False
if inst_id == self.instances.masterId:
logger.info("{}{} found difference between master's and "
"backups's avg latency {} to be higher than the "
"threshold".format(MONITORING_PREFIX, self, d))
logger.trace(
"{}'s master's avg request latency is {} and backup's "
"avg request latency is {}".format(self, avg_lat, avg_lat_others))
return True | Return whether the average request latency of an instance is
greater than the acceptable threshold |
def _infer_precision(base_precision, bins):
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision | Infer an appropriate precision for _round_frac |
def gene_id_check(genes, errors, columns, row_number):
message = ("Gene '{value}' in column {col} and row {row} does not "
"appear in the metabolic model.")
for column in columns:
if "gene" in column['header'] and column['value'] not in genes:
message = message.format(
value=column['value'],
row=row_number,
col=column['number'])
errors.append({
'code': 'bad-value',
'message': message,
'row-number': row_number,
'column-number': column['number'],
}) | Validate gene identifiers against a known set.
Parameters
----------
genes : set
The known set of gene identifiers.
errors :
Passed by goodtables.
columns :
Passed by goodtables.
row_number :
Passed by goodtables. |
def match_keyword(self, keyword, string_match_type=DEFAULT_STRING_MATCH_TYPE, match=True):
match_value = self._get_string_match_value(keyword, string_match_type)
for field_name in self._keyword_fields:
if field_name not in self._keyword_terms:
self._keyword_terms[field_name] = {'$in': list()}
self._keyword_terms[field_name]['$in'].append(match_value) | Adds a keyword to match.
Multiple keywords can be added to perform a boolean ``OR`` among
them. A keyword may be applied to any of the elements defined in
this object such as the display name, description or any method
defined in an interface implemented by this object.
arg: keyword (string): keyword to match
arg: string_match_type (osid.type.Type): the string match
type
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``keyword`` is not of
``string_match_type``
raise: NullArgument - ``keyword`` or ``string_match_type`` is
``null``
raise: Unsupported -
``supports_string_match_type(string_match_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.* |
def write_iocs(self, directory=None, source=None):
if not source:
source = self.iocs_10
if len(source) < 1:
log.error('no iocs available to write out')
return False
if not directory:
directory = os.getcwd()
if os.path.isfile(directory):
log.error('cannot writes iocs to a directory')
return False
source_iocs = set(source.keys())
source_iocs = source_iocs.difference(self.pruned_11_iocs)
source_iocs = source_iocs.difference(self.null_pruned_iocs)
if not source_iocs:
log.error('no iocs available to write out after removing pruned/null iocs')
return False
utils.safe_makedirs(directory)
output_dir = os.path.abspath(directory)
log.info('Writing IOCs to %s' % (str(output_dir)))
for iocid in source_iocs:
ioc_obj = source[iocid]
ioc_obj.write_ioc_to_file(output_dir=output_dir, force=True)
return True | Serializes IOCs to a directory.
:param directory: Directory to write IOCs to. If not provided, the current working directory is used.
:param source: Dictionary contianing iocid -> IOC mapping. Defaults to self.iocs_10. This is not normally modifed by a user for this class.
:return: |
def _group(tlist, cls, match,
valid_prev=lambda t: True,
valid_next=lambda t: True,
post=None,
extend=True,
recurse=True
):
tidx_offset = 0
pidx, prev_ = None, None
for idx, token in enumerate(list(tlist)):
tidx = idx - tidx_offset
if token.is_whitespace:
continue
if recurse and token.is_group and not isinstance(token, cls):
_group(token, cls, match, valid_prev, valid_next, post, extend)
if match(token):
nidx, next_ = tlist.token_next(tidx)
if prev_ and valid_prev(prev_) and valid_next(next_):
from_idx, to_idx = post(tlist, pidx, tidx, nidx)
grp = tlist.group_tokens(cls, from_idx, to_idx, extend=extend)
tidx_offset += to_idx - from_idx
pidx, prev_ = from_idx, grp
continue
pidx, prev_ = tidx, token | Groups together tokens that are joined by a middle token. i.e. x < y |
def dumps(obj, preserve=False):
f = StringIO()
dump(obj, f, preserve)
return f.getvalue() | Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result |
def read_id_list(filepath):
if not filepath:
return None
id_list = []
with open(filepath) as f:
for line in f:
line = line.rstrip()
if not re.match('^[0-9]{8}$', line):
raise('Each line in whitelist or blacklist is expected '
'to contain an eight digit ID, and nothing else.')
else:
id_list.append(line)
return id_list | Get project member id from a file.
:param filepath: This field is the path of file to read. |
def blacked_out(self):
if self.state is ConnectionStates.DISCONNECTED:
if time.time() < self.last_attempt + self._reconnect_backoff:
return True
return False | Return true if we are disconnected from the given node and can't
re-establish a connection yet |
def content_get(self, cid, nid=None):
r = self.request(
method="content.get",
data={"cid": cid},
nid=nid
)
return self._handle_error(r, "Could not get post {}.".format(cid)) | Get data from post `cid` in network `nid`
:type nid: str
:param nid: This is the ID of the network (or class) from which
to query posts. This is optional and only to override the existing
`network_id` entered when created the class
:type cid: str|int
:param cid: This is the post ID which we grab
:returns: Python object containing returned data |
def request(self, message, timeout=False, *args, **kwargs):
if not self.connection_pool.full():
self.connection_pool.put(self._register_socket())
_socket = self.connection_pool.get()
if timeout or timeout is None:
_socket.settimeout(timeout)
data = self.send_and_receive(_socket, message, *args, **kwargs)
if self.connection.proto in Socket.streams:
_socket.shutdown(socket.SHUT_RDWR)
return Response(data, None, None) | Populate connection pool, send message, return BytesIO, and cleanup |
def stylize(ax, name, feature):
ax.set_ylabel(feature)
ax.set_title(name, fontsize='small') | Stylization modifications to the plots |
def current_arg_text(self) -> str:
if self._current_arg_text is None:
self._current_arg_text = Message(
self.current_arg).extract_plain_text()
return self._current_arg_text | Plain text part in the current argument, without any CQ codes. |
def VariantDir(self, variant_dir, src_dir, duplicate=1):
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.Dir(src_dir)
if not isinstance(variant_dir, SCons.Node.Node):
variant_dir = self.Dir(variant_dir)
if src_dir.is_under(variant_dir):
raise SCons.Errors.UserError("Source directory cannot be under variant directory.")
if variant_dir.srcdir:
if variant_dir.srcdir == src_dir:
return
raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir))
variant_dir.link(src_dir, duplicate) | Link the supplied variant directory to the source directory
for purposes of building files. |
def CallFunction(self):
if self._xmlrpc_proxy is None:
return None
rpc_call = getattr(self._xmlrpc_proxy, self._RPC_FUNCTION_NAME, None)
if rpc_call is None:
return None
try:
return rpc_call()
except (
expat.ExpatError, SocketServer.socket.error,
xmlrpclib.Fault) as exception:
logger.warning('Unable to make RPC call with error: {0!s}'.format(
exception))
return None | Calls the function via RPC. |
def name_tree(tree):
existing_names = Counter((_.name for _ in tree.traverse() if _.name))
if sum(1 for _ in tree.traverse()) == len(existing_names):
return
i = 0
existing_names = Counter()
for node in tree.traverse('preorder'):
name = node.name if node.is_leaf() else ('root' if node.is_root() else None)
while name is None or name in existing_names:
name = '{}{}'.format('t' if node.is_leaf() else 'n', i)
i += 1
node.name = name
existing_names[name] += 1 | Names all the tree nodes that are not named or have non-unique names, with unique names.
:param tree: tree to be named
:type tree: ete3.Tree
:return: void, modifies the original tree |
def attr(self, name, lineno=None):
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) | Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno) |
def get_user(self) -> FacebookUser:
return FacebookUser(
self._event['sender']['id'],
self.get_page_id(),
self._facebook,
self,
) | Generate a Facebook user instance |
def find_table_links(self):
html = urlopen(self.model_url).read()
doc = lh.fromstring(html)
href_list = [area.attrib['href'] for area in doc.cssselect('map area')]
tables = self._inception_table_links(href_list)
return tables | When given a url, this function will find all the available table names
for that EPA dataset. |
def get_input_stream(environ, safe_fallback=True):
stream = environ['wsgi.input']
content_length = get_content_length(environ)
if environ.get('wsgi.input_terminated'):
return stream
if content_length is None:
return safe_fallback and _empty_stream or stream
return LimitedStream(stream, content_length) | Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases. |
def stop_animation(self, sprites):
if isinstance(sprites, list) is False:
sprites = [sprites]
for sprite in sprites:
self.tweener.kill_tweens(sprite) | stop animation without firing on_complete |
def scp(cls, project):
if project is None:
_scp(None)
cls.oncpchange.emit(gcp())
elif not project.is_main:
if project.main is not _current_project:
_scp(project.main, True)
cls.oncpchange.emit(project.main)
_scp(project)
cls.oncpchange.emit(project)
else:
_scp(project, True)
cls.oncpchange.emit(project)
sp = project[:]
_scp(sp)
cls.oncpchange.emit(sp) | Set the current project
Parameters
----------
project: Project or None
The project to set. If it is None, the current subproject is set
to empty. If it is a sub project (see:attr:`Project.is_main`),
the current subproject is set to this project. Otherwise it
replaces the current main project
See Also
--------
scp: The global version for setting the current project
gcp: Returns the current project
project: Creates a new project |
def marshal(self, values):
if values is not None:
return [super(EntityCollection, self).marshal(v) for v in values] | Turn a list of entities into a list of dictionaries.
:param values: The entities to serialize.
:type values: List[stravalib.model.BaseEntity]
:return: List of dictionaries of attributes
:rtype: List[Dict[str, Any]] |
def generate(self, signature_data):
result = Result()
for rule in self.pipeline:
rule_name = rule.__class__.__name__
try:
if rule.predicate(signature_data, result):
rule.action(signature_data, result)
except Exception as exc:
if self.error_handler:
self.error_handler(
signature_data,
exc_info=sys.exc_info(),
extra={'rule': rule_name}
)
result.info(rule_name, 'Rule failed: %s', exc)
return result | Takes data and returns a signature
:arg dict signature_data: data to use to generate a signature
:returns: ``Result`` instance |
def build(self):
if self.json['sys']['type'] == 'Array':
return self._build_array()
return self._build_item(self.json) | Creates the objects from the JSON response. |
def validate(self, value, model=None, context=None):
length = len(str(value))
params = dict(min=self.min, max=self.max)
if self.min and self.max is None:
if length < self.min:
return Error(self.too_short, params)
if self.max and self.min is None:
if length > self.max:
return Error(self.too_long, params)
if self.min and self.max:
if length < self.min or length > self.max:
return Error(self.not_in_range, params)
return Error() | Validate
Perform value validation against validation settings and return
simple result object
:param value: str, value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult |
def _search(self):
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results | Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend. |
def get_new_names_by_old():
newdict = {}
for label_type, label_names in Labels.LABEL_NAMES.items():
for oldname in label_names[1:]:
newdict[oldname] = Labels.LABEL_NAMES[label_type][0]
return newdict | Return dictionary, new label name indexed by old label name. |
def check_sim_out(self):
now = time.time()
if now - self.last_sim_send_time < 0.02 or self.rc_channels_scaled is None:
return
self.last_sim_send_time = now
servos = []
for ch in range(1,9):
servos.append(self.scale_channel(ch, getattr(self.rc_channels_scaled, 'chan%u_scaled' % ch)))
servos.extend([0,0,0, 0,0,0])
buf = struct.pack('<14H', *servos)
try:
self.sim_out.send(buf)
except socket.error as e:
if not e.errno in [ errno.ECONNREFUSED ]:
raise
return | check if we should send new servos to flightgear |
def osPaste(self):
from .InputEmulation import Keyboard
k = Keyboard()
k.keyDown("{CTRL}")
k.type("v")
k.keyUp("{CTRL}") | Triggers the OS "paste" keyboard shortcut |
def randmatrix(m, n, random_seed=None):
val = np.sqrt(6.0 / (m + n))
np.random.seed(random_seed)
return np.random.uniform(-val, val, size=(m, n)) | Creates an m x n matrix of random values drawn using
the Xavier Glorot method. |
def empowerment(iface, priority=0):
def _deco(cls):
cls.powerupInterfaces = (
tuple(getattr(cls, 'powerupInterfaces', ())) +
((iface, priority),))
implementer(iface)(cls)
return cls
return _deco | Class decorator for indicating a powerup's powerup interfaces.
The class will also be declared as implementing the interface.
@type iface: L{zope.interface.Interface}
@param iface: The powerup interface.
@type priority: int
@param priority: The priority the powerup will be installed at. |
def add_reporter(self, reporter):
with self._lock:
reporter.init(list(self.metrics.values()))
self._reporters.append(reporter) | Add a MetricReporter |
def create_env(self):
virtualenv(self.env, _err=sys.stderr)
os.mkdir(self.env_bin) | Create a virtual environment. |
def _SyncAttributes(self):
for attribute, value_array in iteritems(self.new_attributes):
if not attribute.versioned or self.age_policy == NEWEST_TIME:
value = value_array[-1]
self.synced_attributes[attribute] = [
LazyDecoder(decoded=value, age=value.age)
]
else:
synced_value_array = self.synced_attributes.setdefault(attribute, [])
for value in value_array:
synced_value_array.append(LazyDecoder(decoded=value, age=value.age))
synced_value_array.sort(key=lambda x: x.age, reverse=True)
self.new_attributes = {}
self._to_delete.clear()
self._dirty = False
self._new_version = False | Sync the new attributes to the synced attribute cache.
This maintains object validity. |
def notify(self, instance=None, **kwargs):
notified = {}
for notification_cls in self.registry.values():
notification = notification_cls()
if notification.notify(instance=instance, **kwargs):
notified.update({notification_cls.name: instance._meta.label_lower})
return notified | A wrapper to call notification.notify for each notification
class associated with the given model instance.
Returns a dictionary of {notification.name: model, ...}
including only notifications sent. |
def login(self):
authtype = self.lookup(self.profile, 'authtype')
if authtype is None:
cert = self.lookup(self.profile, 'cert')
if cert and os.path.isfile(os.path.expanduser(cert)):
authtype = 'ssl'
if authtype == 'kerberos':
result = yield self._gssapi_login()
elif authtype == 'ssl':
result = yield self._ssl_login()
else:
raise NotImplementedError('unsupported auth: %s' % authtype)
self.session_id = result['session-id']
self.session_key = result['session-key']
self.callnum = 0
defer.returnValue(True) | Return True if we successfully logged into this Koji hub.
We support GSSAPI and SSL Client authentication (not the old-style
krb-over-xmlrpc krbLogin method).
:returns: deferred that when fired returns True |
def setUpClassDef(self, service):
assert isinstance(service, WSDLTools.Service), \
'expecting WSDLTools.Service instance'
s = self._services[service.name].classdef
print >>s, 'class %s(%s):' %(self.getClassName(service.name), self.base_class_name)
print >>s, '%ssoapAction = {}' % self.getIndent(level=1)
print >>s, '%swsAction = {}' % self.getIndent(level=1)
print >>s, '%sroot = {}' % self.getIndent(level=1) | use soapAction dict for WS-Action input, setup wsAction
dict for grabbing WS-Action output values. |
def add_codes(err_cls):
class ErrorsWithCodes(object):
def __getattribute__(self, code):
msg = getattr(err_cls, code)
return '[{code}] {msg}'.format(code=code, msg=msg)
return ErrorsWithCodes() | Add error codes to string messages via class attribute names. |
def restore(self, state):
storage_data = state.get(u'storage_data', [])
streaming_data = state.get(u'streaming_data', [])
if len(storage_data) > self.storage_length or len(streaming_data) > self.streaming_length:
raise ArgumentError("Cannot restore InMemoryStorageEngine, too many readings",
storage_size=len(storage_data), storage_max=self.storage_length,
streaming_size=len(streaming_data), streaming_max=self.streaming_length)
self.storage_data = [IOTileReading.FromDict(x) for x in storage_data]
self.streaming_data = [IOTileReading.FromDict(x) for x in streaming_data] | Restore the state of this InMemoryStorageEngine from a dict. |
def _call(self, x, out=None):
wrapped_x = self.prod_op.domain.element([x], cast=False)
return self.prod_op(wrapped_x, out=out) | Evaluate all operators in ``x`` and broadcast. |
def restart(self, *args, **kwargs):
self.stop()
try:
self.start(*args, **kwargs)
except IOError:
raise | Restart the daemon |
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
return sock.getsockname()[1] | Finds a free port. |
def file_or_token(value):
if isfile(value):
with open(value) as fd:
return fd.read().strip()
if any(char in value for char in '/\\.'):
raise ValueError()
return value | If value is a file path and the file exists its contents are stripped and returned,
otherwise value is returned. |
def unwatch_zone(self, zone_id):
self._watched_zones.remove(zone_id)
return (yield from
self._send_cmd("WATCH %s OFF" % (zone_id.device_str(), ))) | Remove a zone from the watchlist. |
def assistant_from_yaml(cls, source, y, superassistant, fully_loaded=True,
role=settings.DEFAULT_ASSISTANT_ROLE):
name = os.path.splitext(os.path.basename(source))[0]
yaml_checker.check(source, y)
assistant = yaml_assistant.YamlAssistant(name, y, source, superassistant,
fully_loaded=fully_loaded, role=role)
return assistant | Constructs instance of YamlAssistant loaded from given structure y, loaded
from source file source.
Args:
source: path to assistant source file
y: loaded yaml structure
superassistant: superassistant of this assistant
Returns:
YamlAssistant instance constructed from y with source file source
Raises:
YamlError: if the assistant is malformed |
def get_file_row_generator(file_path, separator, encoding=None):
with open(file_path, encoding=encoding) as file_object:
for line in file_object:
words = line.strip().split(separator)
yield words | Reads an separated value file row by row.
Inputs: - file_path: The path of the separated value format file.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- encoding: The encoding used in the stored text.
Yields: - words: A list of strings corresponding to each of the file's rows. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.