code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def get_onchain_locksroots(
chain: 'BlockChainService',
canonical_identifier: CanonicalIdentifier,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
) -> Tuple[Locksroot, Locksroot]:
payment_channel = chain.payment_channel(canonical_identifier=canonical_identifier)
token_network = payment_channel.token_network
participants_details = token_network.detail_participants(
participant1=participant1,
participant2=participant2,
channel_identifier=canonical_identifier.channel_identifier,
block_identifier=block_identifier,
)
our_details = participants_details.our_details
our_locksroot = our_details.locksroot
partner_details = participants_details.partner_details
partner_locksroot = partner_details.locksroot
return our_locksroot, partner_locksroot | Return the locksroot for `participant1` and `participant2` at `block_identifier`. |
def assert_instance_created(self, model_class, **kwargs):
return _InstanceContext(
self.assert_instance_does_not_exist,
self.assert_instance_exists,
model_class,
**kwargs
) | Checks if a model instance was created in the database.
For example::
>>> with self.assert_instance_created(Article, slug='lorem-ipsum'):
... Article.objects.create(slug='lorem-ipsum') |
def delete(self):
r
parent = self.parent
if parent.expr._supports_contents():
parent.remove(self)
return
for arg in parent.args:
if self.expr in arg.contents:
arg.contents.remove(self.expr) | r"""Delete this node from the parse tree.
Where applicable, this will remove all descendants of this node from
the parse tree.
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textit{\color{blue}{Silly}}\textit{keep me!}''')
>>> soup.textit.color.delete()
>>> soup
\textit{}\textit{keep me!}
>>> soup.textit.delete()
>>> soup
\textit{keep me!} |
def format_payload(enc, **kwargs):
payload = {'enc': enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload['load'] = load
return package(payload) | Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict. |
def get_metadata(self, loadbalancer, node=None, raw=False):
if node:
uri = "/loadbalancers/%s/nodes/%s/metadata" % (
utils.get_id(loadbalancer), utils.get_id(node))
else:
uri = "/loadbalancers/%s/metadata" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
meta = body.get("metadata", [])
if raw:
return meta
ret = dict([(itm["key"], itm["value"]) for itm in meta])
return ret | Returns the current metadata for the load balancer. If 'node' is
provided, returns the current metadata for that node. |
def set_default_keychain(keychain, domain="user", user=None):
cmd = "security default-keychain -d {0} -s {1}".format(domain, keychain)
return __salt__['cmd.run'](cmd, runas=user) | Set the default keychain
keychain
The location of the keychain to set as default
domain
The domain to use valid values are user|system|common|dynamic, the default is user
user
The user to set the default keychain as
CLI Example:
.. code-block:: bash
salt '*' keychain.set_keychain /Users/fred/Library/Keychains/login.keychain |
def get_modelnames() -> List[str]:
return sorted(str(fn.split('.')[0])
for fn in os.listdir(models.__path__[0])
if (fn.endswith('.py') and (fn != '__init__.py'))) | Return a sorted |list| containing all application model names.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_modelnames()) # doctest: +ELLIPSIS
[...'dam_v001', 'dam_v002', 'dam_v003', 'dam_v004', 'dam_v005',...] |
def when_file_changed(*filenames, **kwargs):
def _register(action):
handler = Handler.get(action)
handler.add_predicate(partial(any_file_changed, filenames, **kwargs))
return action
return _register | Register the decorated function to run when one or more files have changed.
:param list filenames: The names of one or more files to check for changes
(a callable returning the name is also accepted).
:param str hash_type: The type of hash to use for determining if a file has
changed. Defaults to 'md5'. Must be given as a kwarg. |
def use_options(allowed):
def update_docstring(f):
_update_option_docstring(f, allowed)
@functools.wraps(f)
def check_options(*args, **kwargs):
options = kwargs.get('options', {})
not_allowed = [
option for option in options if option not in allowed
and not option.startswith('_')
]
if not_allowed:
logging.warning(
'The following options are not supported by '
'this function and will likely result in '
'undefined behavior: {}.'.format(not_allowed)
)
return f(*args, **kwargs)
return check_options
return update_docstring | Decorator that logs warnings when unpermitted options are passed into its
wrapped function.
Requires that wrapped function has an keyword-only argument named
`options`. If wrapped function has {options} in its docstring, fills in
with the docs for allowed options.
Args:
allowed (list str): list of option keys allowed. If the wrapped
function is called with an option not in allowed, log a warning.
All values in allowed must also be present in `defaults`.
Returns:
Wrapped function with options validation.
>>> @use_options(['title'])
... def test(*, options={}): return options['title']
>>> test(options={'title': 'Hello'})
'Hello'
>>> # test(options={'not_allowed': 123}) # Also logs error message
'' |
def validate(self, data):
if data is not None and not isinstance(data, dict):
raise serializers.ValidationError("Invalid data")
try:
profiles = [ev["profile"] for ev in data.get("encoded_videos", [])]
if len(profiles) != len(set(profiles)):
raise serializers.ValidationError("Invalid data: duplicate profiles")
except KeyError:
raise serializers.ValidationError("profile required for deserializing")
except TypeError:
raise serializers.ValidationError("profile field needs to be a profile_name (str)")
course_videos = [(course_video, image) for course_video, image in data.get('courses', []) if course_video]
data['courses'] = course_videos
return data | Check that the video data is valid. |
def show(self, ax:plt.Axes=None, figsize:tuple=(3,3), title:Optional[str]=None, hide_axis:bool=True,
cmap:str='tab20', alpha:float=0.5, **kwargs):
"Show the `ImageSegment` on `ax`."
ax = show_image(self, ax=ax, hide_axis=hide_axis, cmap=cmap, figsize=figsize,
interpolation='nearest', alpha=alpha, vmin=0)
if title: ax.set_title(title) | Show the `ImageSegment` on `ax`. |
def _remove_existing_jobs(data):
new_data = []
guids = [datum['job']['job_guid'] for datum in data]
state_map = {
guid: state for (guid, state) in Job.objects.filter(
guid__in=guids).values_list('guid', 'state')
}
for datum in data:
job = datum['job']
if not state_map.get(job['job_guid']):
new_data.append(datum)
else:
current_state = state_map[job['job_guid']]
if current_state == 'completed' or (
job['state'] == 'pending' and
current_state == 'running'):
continue
new_data.append(datum)
return new_data | Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point. |
def assign_agent_to_resource(self, agent_id, resource_id):
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
resource = collection.find_one({'_id': ObjectId(resource_id.get_identifier())})
try:
ResourceAgentSession(
self._catalog_id, self._proxy, self._runtime).get_resource_by_agent(agent_id)
except errors.NotFound:
pass
else:
raise errors.AlreadyExists()
if 'agentIds' not in resource:
resource['agentIds'] = [str(agent_id)]
else:
resource['agentIds'].append(str(agent_id))
collection.save(resource) | Adds an existing ``Agent`` to a ``Resource``.
arg: agent_id (osid.id.Id): the ``Id`` of the ``Agent``
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
raise: AlreadyExists - ``agent_id`` is already assigned to
``resource_id``
raise: NotFound - ``agent_id`` or ``resource_id`` not found
raise: NullArgument - ``agent_id`` or ``resource_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def overlaps(self,junc,tolerance=0):
if not self.left.overlaps(junc.left,padding=tolerance): return False
if not self.right.overlaps(junc.right,padding=tolerance): return False
return True | see if junction overlaps with tolerance |
def fig_to_src(figure, image_format='png', dpi=80):
if image_format == 'png':
f = io.BytesIO()
figure.savefig(f, format=image_format, dpi=dpi)
f.seek(0)
return png_to_src(f.read())
elif image_format == 'svg':
f = io.StringIO()
figure.savefig(f, format=image_format, dpi=dpi)
f.seek(0)
return svg_to_src(f.read()) | Convert a matplotlib figure to an inline HTML image.
:param matplotlib.figure.Figure figure: Figure to display.
:param str image_format: png (default) or svg
:param int dpi: dots-per-inch for raster graphics.
:rtype: str |
def get_dir_walker(recursive, topdown=True, followlinks=False):
if recursive:
walk = partial(os.walk, topdown=topdown, followlinks=followlinks)
else:
def walk(path, topdown=topdown, followlinks=followlinks):
try:
yield next(os.walk(path, topdown=topdown, followlinks=followlinks))
except NameError:
yield os.walk(path, topdown=topdown, followlinks=followlinks).next()
return walk | Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function. |
def syscal_save_to_config_txt(filename, configs, spacing=1):
print('Number of measurements: ', configs.shape[0])
number_of_electrodes = configs.max().astype(int)
with open(filename, 'w') as fid:
_syscal_write_electrode_coords(fid, spacing, number_of_electrodes)
_syscal_write_quadpoles(fid, configs.astype(int)) | Write configurations to a Syscal ascii file that can be read by the
Electre Pro program.
Parameters
----------
filename: string
output filename
configs: numpy.ndarray
Nx4 array with measurement configurations A-B-M-N |
def _bind(self):
self._bind_as(self.settings.BIND_DN, self.settings.BIND_PASSWORD, sticky=True) | Binds to the LDAP server with AUTH_LDAP_BIND_DN and
AUTH_LDAP_BIND_PASSWORD. |
def parameters(self, sequence, value_means, value_ranges, arrangement):
self._params['sequence'] = sequence
self._params['value_means'] = value_means
self._params['value_ranges'] = value_ranges
self._params['arrangement'] = arrangement
if any(x <= 0 for x in self._params['value_ranges']):
raise ValueError("range values must be greater than zero")
self._params['variable_parameters'] = []
for i in range(len(self._params['value_means'])):
self._params['variable_parameters'].append(
"".join(['var', str(i)]))
if len(set(arrangement).intersection(
self._params['variable_parameters'])) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
if len(self._params['value_ranges']) != len(
self._params['value_means']):
raise ValueError("argument mismatch!") | Relates the individual to be evolved to the full parameter string.
Parameters
----------
sequence: str
Full amino acid sequence for specification object to be
optimized. Must be equal to the number of residues in the
model.
value_means: list
List containing mean values for parameters to be optimized.
value_ranges: list
List containing ranges for parameters to be optimized.
Values must be positive.
arrangement: list
Full list of fixed and variable parameters for model
building. Fixed values are the appropriate value. Values
to be varied should be listed as 'var0', 'var1' etc,
and must be in ascending numerical order.
Variables can be repeated if required. |
def get_calculated_display_values(self, immediate: bool=False) -> DisplayValues:
if not immediate or not self.__is_master or not self.__last_display_values:
if not self.__current_display_values and self.__data_item:
self.__current_display_values = DisplayValues(self.__data_item.xdata, self.sequence_index, self.collection_index, self.slice_center, self.slice_width, self.display_limits, self.complex_display_type, self.__color_map_data)
def finalize(display_values):
self.__last_display_values = display_values
self.display_values_changed_event.fire()
self.__current_display_values.on_finalize = finalize
return self.__current_display_values
return self.__last_display_values | Return the display values.
Return the current (possibly uncalculated) display values unless 'immediate' is specified.
If 'immediate', return the existing (calculated) values if they exist. Using the 'immediate' values
avoids calculation except in cases where the display values haven't already been calculated. |
def log(array, cutoff):
arr = numpy.copy(array)
arr[arr < cutoff] = cutoff
return numpy.log(arr) | Compute the logarithm of an array with a cutoff on the small values |
def save(self, value, redis, *, commit=True):
value = self.prepare(value)
if value is not None:
redis.hset(self.obj.key(), self.name, value)
else:
redis.hdel(self.obj.key(), self.name)
if self.index:
key = self.key()
if self.name in self.obj._old:
redis.hdel(key, self.obj._old[self.name])
if value is not None:
redis.hset(key, value, self.obj.id) | Sets this fields value in the databse |
def currentuser(self):
request = requests.get(
'{0}/api/v3/user'.format(self.host),
headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
return request.json() | Returns the current user parameters. The current user is linked to the secret token
:return: a list with the current user properties |
def system(self):
if self._base == 2:
return "NIST"
elif self._base == 10:
return "SI"
else:
raise ValueError("Instances mathematical base is an unsupported value: %s" % (
str(self._base))) | The system of units used to measure an instance |
def _set_overlay_verify(name, overlay_path, config_path):
global DEBUG
if os.path.exists(config_path):
print("Config path already exists! Not moving forward")
print("config_path: {0}".format(config_path))
return -1
os.makedirs(config_path)
with open(config_path + "/dtbo", 'wb') as outfile:
with open(overlay_path, 'rb') as infile:
shutil.copyfileobj(infile, outfile)
time.sleep(0.2)
if name == "CUST":
return 0
elif name == "PWM0":
if os.path.exists(PWMSYSFSPATH):
if DEBUG:
print("PWM IS LOADED!")
return 0
else:
if DEBUG:
print("ERROR LOAIDNG PWM0")
return 1
elif name == "SPI2":
if os.listdir(SPI2SYSFSPATH) != "":
if DEBUG:
print("SPI2 IS LOADED!")
return 0
else:
if DEBUG:
print("ERROR LOADING SPI2")
return 0 | _set_overlay_verify - Function to load the overlay and verify it was setup properly |
def disengage(self):
if self._driver and self._driver.is_connected():
self._driver.home()
self._engaged = False | Home the magnet |
def add(self, properties):
new_nic = super(FakedNicManager, self).add(properties)
partition = self.parent
if 'virtual-switch-uri' in new_nic.properties:
vswitch_uri = new_nic.properties['virtual-switch-uri']
try:
vswitch = self.hmc.lookup_by_uri(vswitch_uri)
except KeyError:
raise InputError("The virtual switch specified in the "
"'virtual-switch-uri' property does not "
"exist: {!r}".format(vswitch_uri))
connected_uris = vswitch.properties['connected-vnic-uris']
if new_nic.uri not in connected_uris:
connected_uris.append(new_nic.uri)
if 'device-number' not in new_nic.properties:
devno = partition.devno_alloc()
new_nic.properties['device-number'] = devno
assert 'nic-uris' in partition.properties
partition.properties['nic-uris'].append(new_nic.uri)
return new_nic | Add a faked NIC resource.
Parameters:
properties (dict):
Resource properties.
Special handling and requirements for certain properties:
* 'element-id' will be auto-generated with a unique value across
all instances of this resource type, if not specified.
* 'element-uri' will be auto-generated based upon the element ID,
if not specified.
* 'class' will be auto-generated to 'nic',
if not specified.
* Either 'network-adapter-port-uri' (for backing ROCE adapters) or
'virtual-switch-uri'(for backing OSA or Hipersockets adapters) is
required to be specified.
* 'device-number' will be auto-generated with a unique value
within the partition in the range 0x8000 to 0xFFFF, if not
specified.
This method also updates the 'nic-uris' property in the parent
faked Partition resource, by adding the URI for the faked NIC
resource.
This method also updates the 'connected-vnic-uris' property in the
virtual switch referenced by 'virtual-switch-uri' property,
and sets it to the URI of the faked NIC resource.
Returns:
:class:`zhmcclient_mock.FakedNic`: The faked NIC resource.
Raises:
:exc:`zhmcclient_mock.InputError`: Some issue with the input
properties. |
def process(self, metric):
for rule in self.rules:
rule.process(metric, self) | process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None |
def memory(self):
class GpuMemoryInfo(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
c_memory = GpuMemoryInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory)))
return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used} | Memory information in bytes
Example:
>>> print(ctx.device(0).memory())
{'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L}
Returns:
total/used/free memory in bytes |
def floating_ip_disassociate(self, server_name, floating_ip):
nt_ks = self.compute_conn
server_ = self.server_by_name(server_name)
server = nt_ks.servers.get(server_.__dict__['id'])
server.remove_floating_ip(floating_ip)
return self.floating_ip_list()[floating_ip] | Disassociate a floating IP from server
.. versionadded:: 2016.3.0 |
def cli(env, identifier, name, all, note):
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
capture = vsi.capture(vs_id, name, all, note)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['vs_id', capture['guestId']])
table.add_row(['date', capture['createDate'][:10]])
table.add_row(['time', capture['createDate'][11:19]])
table.add_row(['transaction', formatting.transaction_status(capture)])
table.add_row(['transaction_id', capture['id']])
table.add_row(['all_disks', all])
env.fout(table) | Capture one or all disks from a virtual server to a SoftLayer image. |
def get_FORCE_SETS_lines(dataset, forces=None):
if 'first_atoms' in dataset:
return _get_FORCE_SETS_lines_type1(dataset, forces=forces)
elif 'forces' in dataset:
return _get_FORCE_SETS_lines_type2(dataset) | Generate FORCE_SETS string
See the format of dataset in the docstring of
Phonopy.set_displacement_dataset. Optionally for the type-1 (traditional)
format, forces can be given. In this case, sets of forces are
unnecessary to be stored in the dataset. |
def datasets(data = 'all', type = None, uuid = None, query = None, id = None,
limit = 100, offset = None, **kwargs):
args = {'q': query, 'type': type, 'limit': limit, 'offset': offset}
data_choices = ['all', 'organization', 'contact', 'endpoint',
'identifier', 'tag', 'machinetag', 'comment',
'constituents', 'document', 'metadata', 'deleted',
'duplicate', 'subDataset', 'withNoEndpoint']
check_data(data, data_choices)
if len2(data) ==1:
return datasets_fetch(data, uuid, args, **kwargs)
else:
return [datasets_fetch(x, uuid, args, **kwargs) for x in data] | Search for datasets and dataset metadata.
:param data: [str] The type of data to get. Default: ``all``
:param type: [str] Type of dataset, options include ``OCCURRENCE``, etc.
:param uuid: [str] UUID of the data node provider. This must be specified if data
is anything other than ``all``.
:param query: [str] Query term(s). Only used when ``data = 'all'``
:param id: [int] A metadata document id.
References http://www.gbif.org/developer/registry#datasets
Usage::
from pygbif import registry
registry.datasets(limit=5)
registry.datasets(type="OCCURRENCE")
registry.datasets(uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='contact', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657", id=598)
registry.datasets(data=['deleted','duplicate'])
registry.datasets(data=['deleted','duplicate'], limit=1) |
def popen_multiple(commands, command_args, *args, **kwargs):
for i, command in enumerate(commands):
cmd = [command] + command_args
try:
return subprocess.Popen(cmd, *args, **kwargs)
except OSError:
if i == len(commands) - 1:
raise | Like `subprocess.Popen`, but can try multiple commands in case
some are not available.
`commands` is an iterable of command names and `command_args` are
the rest of the arguments that, when appended to the command name,
make up the full first argument to `subprocess.Popen`. The
other positional and keyword arguments are passed through. |
def setup(app):
app.add_domain(EverettDomain)
app.add_directive('autocomponent', AutoComponentDirective)
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True
} | Register domain and directive in Sphinx. |
def received_new(self, msg):
logger.info("Receiving msg, delivering to Lamson...")
logger.debug("Relaying msg to lamson: From: %s, To: %s",
msg['From'], msg['To'])
self._relay.deliver(msg) | As new messages arrive, deliver them to the lamson relay. |
def salt_call():
import salt.cli.call
if '' in sys.path:
sys.path.remove('')
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run() | Directly call a salt command in the modules, does not require a running
salt minion to run. |
def read(self, filename):
try:
with open(filename, 'r') as _file:
self._filename = filename
self.readstream(_file)
return True
except IOError:
self._filename = None
return False | Reads the file specified and tokenizes the data for parsing. |
def integrate_fluxes(self):
fluxes = self.sequences.fluxes
for flux in fluxes.numerics:
points = getattr(fluxes.fastaccess, '_%s_points' % flux.name)
coefs = self.numconsts.a_coefs[self.numvars.idx_method-1,
self.numvars.idx_stage,
:self.numvars.idx_method]
flux(self.numvars.dt *
numpy.dot(coefs, points[:self.numvars.idx_method])) | Perform a dot multiplication between the fluxes and the
A coefficients associated with the different stages of the
actual method.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.idx_method = 2
>>> model.numvars.idx_stage = 1
>>> model.numvars.dt = 0.5
>>> points = numpy.asarray(fluxes.fastaccess._q_points)
>>> points[:4] = 15., 2., -999., 0.
>>> model.integrate_fluxes()
>>> from hydpy import round_
>>> from hydpy import pub
>>> round_(numpy.asarray(model.numconsts.a_coefs)[1, 1, :2])
0.375, 0.125
>>> fluxes.q
q(2.9375) |
def put(self, key, value, format=None, append=False, **kwargs):
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs) | Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
data_columns : list of columns to create as data columns, or True to
use all columns. See
`here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table' |
def _get_logging_id(self):
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
) | Get logging identifier. |
def select_token(request, scopes='', new=False):
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
'tokens': tokens,
'base_template': app_settings.ESI_BASE_TEMPLATE,
}
return render(r, 'esi/select_token.html', context=context)
return _token_list(request) | Presents the user with a selection of applicable tokens for the requested view. |
def is_format_selected(image_format, formats, progs):
intersection = formats & Settings.formats
mode = _is_program_selected(progs)
result = (image_format in intersection) and mode
return result | Determine if the image format is selected by command line arguments. |
def validate(config):
if not isinstance(config, list):
return False, 'Configuration for napalm beacon must be a list.'
for mod in config:
fun = mod.keys()[0]
fun_cfg = mod.values()[0]
if not isinstance(fun_cfg, dict):
return False, 'The match structure for the {} execution function output must be a dictionary'.format(fun)
if fun not in __salt__:
return False, 'Execution function {} is not availabe!'.format(fun)
return True, 'Valid configuration for the napal beacon!' | Validate the beacon configuration. |
def truncate_rows(A, nz_per_row):
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
if isspmatrix_csr(A):
A = A.copy()
Aformat = A.format
A = A.tocsr()
nz_per_row = int(nz_per_row)
pyamg.amg_core.truncate_rows_csr(A.shape[0], nz_per_row, A.indptr,
A.indices, A.data)
A.eliminate_zeros()
if Aformat == 'bsr':
A = A.tobsr(blocksize)
else:
A = A.asformat(Aformat)
return A | Truncate the rows of A by keeping only the largest in magnitude entries in each row.
Parameters
----------
A : sparse_matrix
nz_per_row : int
Determines how many entries in each row to keep
Returns
-------
A : sparse_matrix
Each row has been truncated to at most nz_per_row entries
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import truncate_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[-0.24, -0.5 , 0. , 0. ],
... [ 1. , -1.1 , 0.49, 0.1 ],
... [ 0. , 0.4 , 1. , 0.5 ]]) )
>>> truncate_rows(A, 2).todense()
matrix([[-0.24, -0.5 , 0. , 0. ],
[ 1. , -1.1 , 0. , 0. ],
[ 0. , 0. , 1. , 0.5 ]]) |
def difference(self, other):
if not self.is_valid_range(other):
msg = "Unsupported type to test for difference '{.__class__.__name__}'"
raise TypeError(msg.format(other))
if not self or not other or not self.overlap(other):
return self
elif self in other:
return self.empty()
elif other in self and not (self.startswith(other) or self.endswith(other)):
raise ValueError("Other range must not be within this range")
elif self.endsbefore(other):
return self.replace(upper=other.lower, upper_inc=not other.lower_inc)
elif self.startsafter(other):
return self.replace(lower=other.upper, lower_inc=not other.upper_inc)
else:
return self.empty() | Compute the difference between this and a given range.
>>> intrange(1, 10).difference(intrange(10, 15))
intrange([1,10))
>>> intrange(1, 10).difference(intrange(5, 10))
intrange([1,5))
>>> intrange(1, 5).difference(intrange(5, 10))
intrange([1,5))
>>> intrange(1, 5).difference(intrange(1, 10))
intrange(empty)
The difference can not be computed if the resulting range would be split
in two separate ranges. This happens when the given range is completely
within this range and does not start or end at the same value.
>>> intrange(1, 15).difference(intrange(5, 10))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Other range must not be within this range
This does not modify the range in place.
This is the same as the ``-`` operator for two ranges in PostgreSQL.
:param other: Range to difference against.
:return: A new range that is the difference between this and `other`.
:raises ValueError: If difference bethween this and `other` can not be
computed. |
def get_backend_path(service):
for backend in _get_backends():
try:
if backend.service_allowed(service):
return "%s.%s" % (backend.__class__.__module__, backend.__class__.__name__)
except AttributeError:
raise NotImplementedError("%s.%s.service_allowed() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None | Return the dotted path of the matching backend. |
def setup_webserver():
run('sudo apt-get update')
install_packages(packages_webserver)
execute(custom.latex)
execute(setup.solarized)
execute(setup.vim)
execute(setup.tmux)
checkup_git_repo_legacy(url='git@github.com:letsencrypt/letsencrypt.git')
execute(setup.service.fdroid)
execute(setup.service.owncloud)
from fabfile import dfh, check_reboot
dfh()
check_reboot() | Run setup tasks to set up a nicely configured webserver.
Features:
* owncloud service
* fdroid repository
* certificates via letsencrypt
* and more
The task is defined in file fabsetup_custom/fabfile_addtitions/__init__.py
and could be customized by Your own needs. More info: README.md |
def _from_string(cls, serialized):
course_key = CourseLocator._from_string(serialized)
parsed_parts = cls.parse_url(serialized)
block_id = parsed_parts.get('block_id', None)
if block_id is None:
raise InvalidKeyError(cls, serialized)
return cls(course_key, parsed_parts.get('block_type'), block_id) | Requests CourseLocator to deserialize its part and then adds the local deserialization of block |
def create(cls, name, frame_type='eth2', value1=None, comment=None):
json = {'frame_type': frame_type,
'name': name,
'value1': int(value1, 16),
'comment': comment}
return ElementCreator(cls, json) | Create an ethernet service
:param str name: name of service
:param str frame_type: ethernet frame type, eth2
:param str value1: hex code representing ethertype field
:param str comment: optional comment
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: EthernetService |
def register_event(self, *names):
for name in names:
if name in self.__events:
continue
self.__events[name] = Event(name) | Registers new events after instance creation
Args:
*names (str): Name or names of the events to register |
def is_absolute(self):
return self.namespace and self.ext and self.scheme and self.path | Validates that uri contains all parts except version |
def pop(self, option, default=None):
val = self[option]
del self[option]
return (val is None and default) or val | Just like `dict.pop` |
def get_downsample_pct(in_bam, target_counts, data):
total = sum(x.aligned for x in idxstats(in_bam, data))
with pysam.Samfile(in_bam, "rb") as work_bam:
n_rgs = max(1, len(work_bam.header.get("RG", [])))
rg_target = n_rgs * target_counts
if total > rg_target:
pct = float(rg_target) / float(total)
if pct < 0.9:
return pct | Retrieve percentage of file to downsample to get to target counts.
Avoids minimal downsample which is not especially useful for
improving QC times; 90& or more of reads. |
def kinematic_flux(vel, b, perturbation=False, axis=-1):
r
kf = np.mean(vel * b, axis=axis)
if not perturbation:
kf -= np.mean(vel, axis=axis) * np.mean(b, axis=axis)
return np.atleast_1d(kf) | r"""Compute the kinematic flux from two time series.
Compute the kinematic flux from the time series of two variables `vel`
and b. Note that to be a kinematic flux, at least one variable must be
a component of velocity.
Parameters
----------
vel : array_like
A component of velocity
b : array_like
May be a component of velocity or a scalar variable (e.g. Temperature)
perturbation : bool, optional
`True` if the `vel` and `b` variables are perturbations. If `False`, perturbations
will be calculated by removing the mean value from each variable. Defaults to `False`.
Returns
-------
array_like
The corresponding kinematic flux
Other Parameters
----------------
axis : int, optional
The index of the time axis, along which the calculations will be
performed. Defaults to -1
Notes
-----
A kinematic flux is computed as
.. math:: \overline{u^{\prime} s^{\prime}}
where at the prime notation denotes perturbation variables, and at least
one variable is perturbation velocity. For example, the vertical kinematic
momentum flux (two velocity components):
.. math:: \overline{u^{\prime} w^{\prime}}
or the vertical kinematic heat flux (one velocity component, and one
scalar):
.. math:: \overline{w^{\prime} T^{\prime}}
If perturbation variables are passed into this function (i.e.
`perturbation` is True), the kinematic flux is computed using the equation
above.
However, the equation above can be rewritten as
.. math:: \overline{us} - \overline{u}~\overline{s}
which is computationally more efficient. This is how the kinematic flux
is computed in this function if `perturbation` is False.
For more information on the subject, please see [Garratt1994]_. |
def remove_data_flows_with_data_port_id(self, data_port_id):
if not self.is_root_state and not self.is_root_state_of_library:
data_flow_ids_to_remove = []
for data_flow_id, data_flow in self.parent.data_flows.items():
if data_flow.from_state == self.state_id and data_flow.from_key == data_port_id or \
data_flow.to_state == self.state_id and data_flow.to_key == data_port_id:
data_flow_ids_to_remove.append(data_flow_id)
for data_flow_id in data_flow_ids_to_remove:
self.parent.remove_data_flow(data_flow_id)
data_flow_ids_to_remove = []
for data_flow_id, data_flow in self.data_flows.items():
if data_flow.from_state == self.state_id and data_flow.from_key == data_port_id or \
data_flow.to_state == self.state_id and data_flow.to_key == data_port_id:
data_flow_ids_to_remove.append(data_flow_id)
for data_flow_id in data_flow_ids_to_remove:
self.remove_data_flow(data_flow_id) | Remove an data ports whose from_key or to_key equals the passed data_port_id
:param int data_port_id: the id of a data_port of which all data_flows should be removed, the id can be a input or
output data port id |
def dummy_image(filetype='gif'):
GIF = 'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7'
tmp_file = tempfile.NamedTemporaryFile(suffix='.%s' % filetype)
tmp_file.write(base64.b64decode(GIF))
return open(tmp_file.name, 'rb') | Generate empty image in temporary file for testing |
def phi( n ):
assert isinstance( n, integer_types )
if n < 3: return 1
result = 1
ff = factorization( n )
for f in ff:
e = f[1]
if e > 1:
result = result * f[0] ** (e-1) * ( f[0] - 1 )
else:
result = result * ( f[0] - 1 )
return result | Return the Euler totient function of n. |
def SetDecryptedStreamSize(self, decrypted_stream_size):
if self._is_open:
raise IOError('Already open.')
if decrypted_stream_size < 0:
raise ValueError((
'Invalid decrypted stream size: {0:d} value out of '
'bounds.').format(decrypted_stream_size))
self._decrypted_stream_size = decrypted_stream_size | Sets the decrypted stream size.
This function is used to set the decrypted stream size if it can be
determined separately.
Args:
decrypted_stream_size (int): size of the decrypted stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decrypted stream size is invalid. |
def register_magnitude_model(self, pid):
if self.assignments['forward_model'] is None:
self.assignments['forward_model'] = [None, None]
self.assignments['forward_model'][0] = pid | Set a given parameter model to the forward magnitude model |
def get(self):
email = {}
if self.name is not None:
email["name"] = self.name
if self.email is not None:
email["email"] = self.email
return email | Get a JSON-ready representation of this Email.
:returns: This Email, ready for use in a request body.
:rtype: dict |
def iscomplex(polynomial):
if isinstance(polynomial, (int, float)):
return False
if isinstance(polynomial, complex):
return True
polynomial = polynomial.expand()
for monomial in polynomial.as_coefficients_dict():
for variable in monomial.as_coeff_mul()[1]:
if isinstance(variable, complex) or variable == I:
return True
return False | Returns whether the polynomial has complex coefficients
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: bool -- whether there is a complex coefficient. |
def query_string_parser(search_pattern):
if not hasattr(current_oaiserver, 'query_parser'):
query_parser = current_app.config['OAISERVER_QUERY_PARSER']
if isinstance(query_parser, six.string_types):
query_parser = import_string(query_parser)
current_oaiserver.query_parser = query_parser
return current_oaiserver.query_parser('query_string', query=search_pattern) | Elasticsearch query string parser. |
def prepare_request_uri(self, uri, redirect_uri=None, scope=None,
state=None, **kwargs):
return prepare_grant_uri(uri, self.client_id, self.response_type,
redirect_uri=redirect_uri, state=state, scope=scope, **kwargs) | Prepare the implicit grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format, per `Appendix B`_:
:param redirect_uri: OPTIONAL. The redirect URI must be an absolute URI
and it should have been registerd with the OAuth
provider prior to use. As described in `Section 3.1.2`_.
:param scope: OPTIONAL. The scope of the access request as described by
Section 3.3`_. These may be any string but are commonly
URIs or various categories such as ``videos`` or ``documents``.
:param state: RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in `Section 10.12`_.
:param kwargs: Extra arguments to include in the request URI.
In addition to supplied parameters, OAuthLib will append the ``client_id``
that was provided in the constructor as well as the mandatory ``response_type``
argument, set to ``token``::
>>> from oauthlib.oauth2 import MobileApplicationClient
>>> client = MobileApplicationClient('your_id')
>>> client.prepare_request_uri('https://example.com')
'https://example.com?client_id=your_id&response_type=token'
>>> client.prepare_request_uri('https://example.com', redirect_uri='https://a.b/callback')
'https://example.com?client_id=your_id&response_type=token&redirect_uri=https%3A%2F%2Fa.b%2Fcallback'
>>> client.prepare_request_uri('https://example.com', scope=['profile', 'pictures'])
'https://example.com?client_id=your_id&response_type=token&scope=profile+pictures'
>>> client.prepare_request_uri('https://example.com', foo='bar')
'https://example.com?client_id=your_id&response_type=token&foo=bar'
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12 |
def save_as_nifti_file(data: np.ndarray, affine: np.ndarray,
path: Union[str, Path]) -> None:
if not isinstance(path, str):
path = str(path)
img = Nifti1Pair(data, affine)
nib.nifti1.save(img, path) | Create a Nifti file and save it.
Parameters
----------
data
Brain data.
affine
Affine of the image, usually inherited from an existing image.
path
Output filename. |
def create_or_update_group_by_name(self, name, group_type="internal", metadata=None, policies=None, member_group_ids=None,
member_entity_ids=None, mount_point=DEFAULT_MOUNT_POINT):
if metadata is None:
metadata = {}
if not isinstance(metadata, dict):
error_msg = 'unsupported metadata argument provided "{arg}" ({arg_type}), required type: dict"'
raise exceptions.ParamValidationError(error_msg.format(
arg=metadata,
arg_type=type(metadata),
))
if group_type not in ALLOWED_GROUP_TYPES:
error_msg = 'unsupported group_type argument provided "{arg}", allowed values: ({allowed_values})'
raise exceptions.ParamValidationError(error_msg.format(
arg=group_type,
allowed_values=ALLOWED_GROUP_TYPES,
))
params = {
'type': group_type,
'metadata': metadata,
'policies': policies,
'member_group_ids': member_group_ids,
'member_entity_ids': member_entity_ids,
}
api_path = '/v1/{mount_point}/group/name/{name}'.format(
mount_point=mount_point,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response | Create or update a group by its name.
Supported methods:
POST: /{mount_point}/group/name/{name}. Produces: 200 application/json
:param name: Name of the group.
:type name: str | unicode
:param group_type: Type of the group, internal or external. Defaults to internal.
:type group_type: str | unicode
:param metadata: Metadata to be associated with the group.
:type metadata: dict
:param policies: Policies to be tied to the group.
:type policies: str | unicode
:param member_group_ids: Group IDs to be assigned as group members.
:type member_group_ids: str | unicode
:param member_entity_ids: Entity IDs to be assigned as group members.
:type member_entity_ids: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response |
def human_size_to_bytes(human_size):
size_exp_map = {'K': 1, 'M': 2, 'G': 3, 'T': 4, 'P': 5}
human_size_str = six.text_type(human_size)
match = re.match(r'^(\d+)([KMGTP])?$', human_size_str)
if not match:
raise ValueError(
'Size must be all digits, with an optional unit type '
'(K, M, G, T, or P)'
)
size_num = int(match.group(1))
unit_multiplier = 1024 ** size_exp_map.get(match.group(2), 0)
return size_num * unit_multiplier | Convert human-readable units to bytes |
def qset(self, name, index, value):
index = get_integer('index', index)
return self.execute_command('qset', name, index, value) | Set the list element at ``index`` to ``value``.
:param string name: the queue name
:param int index: the specified index, can < 0
:param string value: the element value
:return: Unknown
:rtype: True |
def _save_states(self, state, serialized_readers_entity):
mr_id = state.key().id_or_name()
fresh_state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(fresh_state, mr_id):
return False
if fresh_state.active_shards != 0:
logging.warning(
"Mapreduce %s already has active shards. Looks like spurious task "
"execution.", mr_id)
return None
config = util.create_datastore_write_config(state.mapreduce_spec)
db.put([state, serialized_readers_entity], config=config)
return True | Run transaction to save state.
Args:
state: a model.MapreduceState entity.
serialized_readers_entity: a model._HugeTaskPayload entity containing
json serialized input readers.
Returns:
False if a fatal error is encountered and this task should be dropped
immediately. True if transaction is successful. None if a previous
attempt of this same transaction has already succeeded. |
def get_default_version(env):
if 'MSVS' not in env or not SCons.Util.is_Dict(env['MSVS']):
versions = [vs.version for vs in get_installed_visual_studios()]
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', [])
if 'MSVS_VERSION' not in env:
if versions:
env['MSVS_VERSION'] = versions[0]
else:
debug('get_default_version: WARNING: no installed versions found, '
'using first in SupportedVSList (%s)'%SupportedVSList[0].version)
env['MSVS_VERSION'] = SupportedVSList[0].version
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION'] | Returns the default version string to use for MSVS.
If no version was requested by the user through the MSVS environment
variable, query all the available visual studios through
get_installed_visual_studios, and take the highest one.
Return
------
version: str
the default version. |
def _setBatchSystemEnvVars(self):
for envDict in (self._jobStore.getEnv(), self.config.environment):
for k, v in iteritems(envDict):
self._batchSystem.setEnv(k, v) | Sets the environment variables required by the job store and those passed on command line. |
def contains(self, order, cell, include_smaller=False):
order = self._validate_order(order)
cell = self._validate_cell(order, cell)
return self._compare_operation(order, cell, include_smaller, 'check') | Test whether the MOC contains the given cell.
If the include_smaller argument is true then the MOC is considered
to include a cell if it includes part of that cell (at a higher
order).
>>> m = MOC(1, (5,))
>>> m.contains(0, 0)
False
>>> m.contains(0, 1, True)
True
>>> m.contains(0, 1, False)
False
>>> m.contains(1, 4)
False
>>> m.contains(1, 5)
True
>>> m.contains(2, 19)
False
>>> m.contains(2, 21)
True |
def iter_entry_points(self, group, name=None):
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
) | Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order). |
def flush(self, multithread=True, **kwargs):
if self._write_buf.tell() > 0:
data = self._write_buf.getvalue()
self._write_buf = BytesIO()
if multithread:
self._async_upload_part_request(data, index=self._cur_part, **kwargs)
else:
self.upload_part(data, self._cur_part, **kwargs)
self._cur_part += 1
if len(self._http_threadpool_futures) > 0:
dxpy.utils.wait_for_all_futures(self._http_threadpool_futures)
try:
for future in self._http_threadpool_futures:
if future.exception() != None:
raise future.exception()
finally:
self._http_threadpool_futures = set() | Flushes the internal write buffer. |
def write(self, path=None, *args, **kwargs):
if path is None:
print(self.format(*args, **kwargs))
else:
with io.open(path, 'w', newline="") as f:
f.write(self.format(*args, **kwargs)) | Perform formatting and write the formatted string to a file or stdout.
Optional arguments can be used to format the editor's contents. If no
file path is given, prints to standard output.
Args:
path (str): Full file path (default None, prints to stdout)
*args: Positional arguments to format the editor with
**kwargs: Keyword arguments to format the editor with |
def kill(self, exc_info=None):
if self._being_killed:
_log.debug('already killing %s ... waiting for death', self)
try:
self._died.wait()
except:
pass
return
self._being_killed = True
if self._died.ready():
_log.debug('already stopped %s', self)
return
if exc_info is not None:
_log.info('killing %s due to %s', self, exc_info[1])
else:
_log.info('killing %s', self)
def safely_kill_extensions(ext_set):
try:
ext_set.kill()
except Exception as exc:
_log.warning('Extension raised `%s` during kill', exc)
safely_kill_extensions(self.entrypoints.all)
self._kill_worker_threads()
safely_kill_extensions(self.extensions.all)
self._kill_managed_threads()
self.started = False
if not self._died.ready():
self._died.send(None, exc_info) | Kill the container in a semi-graceful way.
Entrypoints are killed, followed by any active worker threads.
Next, dependencies are killed. Finally, any remaining managed threads
are killed.
If ``exc_info`` is provided, the exception will be raised by
:meth:`~wait``. |
async def get_agents(self, addr=True, agent_cls=None):
return await self.menv.get_agents(addr=True, agent_cls=None,
as_coro=True) | Get addresses of all agents in all the slave environments.
This is a managing function for
:meth:`creamas.mp.MultiEnvironment.get_agents`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, ``addr`` and ``agent_cls`` parameters are
omitted from the call to underlying multi-environment's
:meth:`get_agents`.
If :class:`aiomas.rpc.Proxy` objects from all the agents are
needed, call each slave environment manager's :meth:`get_agents`
directly. |
def get_current_url():
if current_app.config.get('SERVER_NAME') and (
request.environ['HTTP_HOST'].split(':', 1)[0] != current_app.config['SERVER_NAME'].split(':', 1)[0]):
return request.url
url = url_for(request.endpoint, **request.view_args)
query = request.query_string
if query:
return url + '?' + query.decode()
else:
return url | Return the current URL including the query string as a relative path. If the app uses
subdomains, return an absolute path |
def get_dois(query_str, count=100):
url = '%s/%s' % (elsevier_search_url, query_str)
params = {'query': query_str,
'count': count,
'httpAccept': 'application/xml',
'sort': '-coverdate',
'field': 'doi'}
res = requests.get(url, params)
if not res.status_code == 200:
return None
tree = ET.XML(res.content, parser=UTB())
doi_tags = tree.findall('atom:entry/prism:doi', elsevier_ns)
dois = [dt.text for dt in doi_tags]
return dois | Search ScienceDirect through the API for articles.
See http://api.elsevier.com/content/search/fields/scidir for constructing a
query string to pass here. Example: 'abstract(BRAF) AND all("colorectal
cancer")' |
def get_sentence(start=None, depth=7):
if not GRAMMAR:
return 'Please set a GRAMMAR file'
start = start if start else GRAMMAR.start()
if isinstance(start, Nonterminal):
productions = GRAMMAR.productions(start)
if not depth:
terminals = [p for p in productions if not isinstance(start, Nonterminal)]
if len(terminals):
production = terminals
production = random.choice(productions)
sentence = []
for piece in production.rhs():
sentence += get_sentence(start=piece, depth=depth-1)
return sentence
else:
return [start] | follow the grammatical patterns to generate a random sentence |
def get_fields(model):
try:
if hasattr(model, "knockout_fields"):
fields = model.knockout_fields()
else:
try:
fields = model_to_dict(model).keys()
except Exception as e:
fields = model._meta.get_fields()
return fields
except Exception as e:
logger.exception(e)
return [] | Returns a Model's knockout_fields,
or the default set of field names. |
def get(self, obj, key):
if key not in self._exposed:
raise MethodNotExposed()
rightFuncs = self._exposed[key]
T = obj.__class__
seen = {}
for subT in inspect.getmro(T):
for name, value in subT.__dict__.items():
for rightFunc in rightFuncs:
if value is rightFunc:
if name in seen:
raise MethodNotExposed()
return value.__get__(obj, T)
seen[name] = True
raise MethodNotExposed() | Retrieve 'key' from an instance of a class which previously exposed it.
@param key: a hashable object, previously passed to L{Exposer.expose}.
@return: the object which was exposed with the given name on obj's key.
@raise MethodNotExposed: when the key in question was not exposed with
this exposer. |
def clean_regex(regex):
ret_regex = regex
escape_chars = '[^$.?*+(){}'
ret_regex = ret_regex.replace('\\', '')
for c in escape_chars:
ret_regex = ret_regex.replace(c, '\\' + c)
while True:
old_regex = ret_regex
ret_regex = ret_regex.replace('||', '|')
if old_regex == ret_regex:
break
while len(ret_regex) >= 1 and ret_regex[-1] == '|':
ret_regex = ret_regex[:-1]
return ret_regex | Escape any regex special characters other than alternation.
:param regex: regex from datatables interface
:type regex: str
:rtype: str with regex to use with database |
def authenticate(self, request, **credentials):
from allauth.account.auth_backends import AuthenticationBackend
self.pre_authenticate(request, **credentials)
AuthenticationBackend.unstash_authenticated_user()
user = authenticate(request, **credentials)
alt_user = AuthenticationBackend.unstash_authenticated_user()
user = user or alt_user
if user and app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
cache.delete(cache_key)
else:
self.authentication_failed(request, **credentials)
return user | Only authenticates, does not actually login. See `login` |
def _percent_match(result, out, yP=None, *argl):
if len(argl) > 1:
if yP is None:
Xt = argl[1]
key = id(Xt)
if key in _splits:
yP = _splits[key][3]
if yP is not None:
import math
out["%"] = round(1.-sum(abs(yP - result))/float(len(result)), 3) | Returns the percent match for the specified prediction call; requires
that the data was split before using an analyzed method.
Args:
out (dict): output dictionary to save the result to. |
def upload(ui, repo, name, **opts):
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
raise hg_util.Abort(err)
if not cl.local:
raise hg_util.Abort("cannot upload non-local change")
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return 0 | upload diffs to the code review server
Uploads the current modifications for a given change to the server. |
def det_dataset(eb, passband, dataid, comp, time):
rvs = eb.get_dataset(kind='rv').datasets
if dataid == 'Undefined':
dataid = None
try:
eb._check_label(dataid)
rv_dataset = eb.add_dataset('rv', dataset=dataid, times=[])
except ValueError:
logger.warning("The name picked for the radial velocity curve is forbidden. Applying default name instead")
rv_dataset = eb.add_dataset('rv', times=[])
return rv_dataset | Since RV datasets can have values related to each component in phoebe2, but are component specific in phoebe1
, it is important to determine which dataset to add parameters to. This function will do that.
eb - bundle
rvpt - relevant phoebe 1 parameters |
def _on_move(self, event):
w = self.winfo_width()
x = min(max(event.x, 0), w)
self.coords('cursor', x, 0, x, self.winfo_height())
self._variable.set(round2((360. * x) / w)) | Make selection cursor follow the cursor. |
def match_any(patterns, name):
if not patterns:
return True
return any(match(pattern, name) for pattern in patterns) | Test if a name matches any of a list of patterns.
Will return `True` if ``patterns`` is an empty list.
Arguments:
patterns (list): A list of wildcard pattern, e.g ``["*.py",
"*.pyc"]``
name (str): A filename.
Returns:
bool: `True` if the name matches at least one of the patterns. |
def update_room_name(self):
try:
response = self.client.api.get_room_name(self.room_id)
if "name" in response and response["name"] != self.name:
self.name = response["name"]
return True
else:
return False
except MatrixRequestError:
return False | Updates self.name and returns True if room name has changed. |
def get_table_idbb_field(endianess, data):
bfld = struct.unpack(endianess + 'H', data[:2])[0]
proc_nbr = bfld & 0x7ff
std_vs_mfg = bool(bfld & 0x800)
selector = (bfld & 0xf000) >> 12
return (proc_nbr, std_vs_mfg, selector) | Return data from a packed TABLE_IDB_BFLD bit-field.
:param str endianess: The endianess to use when packing values ('>' or '<')
:param str data: The packed and machine-formatted data to parse
:rtype: tuple
:return: Tuple of (proc_nbr, std_vs_mfg) |
def ReadPathInfoHistory(self, client_id, path_type, components):
histories = self.ReadPathInfosHistories(client_id, path_type, [components])
return histories[components] | Reads a collection of hash and stat entry for given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path history for.
components: A tuple of path components corresponding to path to retrieve
information for.
Returns:
A list of `rdf_objects.PathInfo` ordered by timestamp in ascending order. |
def find_by_content_type(content_type):
for format in FORMATS:
if content_type in format.content_types:
return format
raise UnknownFormat('No format found with content type "%s"' % content_type) | Find and return a format by content type.
:param content_type: A string describing the internet media type of the format. |
def complete(self, query, current_url):
endpoint = self.session.get(self._token_key)
message = Message.fromPostArgs(query)
response = self.consumer.complete(message, endpoint, current_url)
try:
del self.session[self._token_key]
except KeyError:
pass
if (response.status in ['success', 'cancel'] and
response.identity_url is not None):
disco = Discovery(self.session,
response.identity_url,
self.session_key_prefix)
disco.cleanup(force=True)
return response | Called to interpret the server's response to an OpenID
request. It is called in step 4 of the flow described in the
consumer overview.
@param query: A dictionary of the query parameters for this
HTTP request.
@param current_url: The URL used to invoke the application.
Extract the URL from your application's web
request framework and specify it here to have it checked
against the openid.return_to value in the response. If
the return_to URL check fails, the status of the
completion will be FAILURE.
@returns: a subclass of Response. The type of response is
indicated by the status attribute, which will be one of
SUCCESS, CANCEL, FAILURE, or SETUP_NEEDED.
@see: L{SuccessResponse<openid.consumer.consumer.SuccessResponse>}
@see: L{CancelResponse<openid.consumer.consumer.CancelResponse>}
@see: L{SetupNeededResponse<openid.consumer.consumer.SetupNeededResponse>}
@see: L{FailureResponse<openid.consumer.consumer.FailureResponse>} |
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=None, sleep_time=20):
self.log.warn('DEPRECATION WARNING: use '
'validate_service_config_changed instead of '
'service_restarted due to known races.')
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False | Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted. |
def get_oauth_token_secret_name(self, provider):
for _provider in self.oauth_providers:
if _provider["name"] == provider:
return _provider.get("token_secret", "oauth_token_secret") | Returns the token_secret name for the oauth provider
if none is configured defaults to oauth_secret
this is configured using OAUTH_PROVIDERS and token_secret |
def _readfloatle(self, length, start):
startbyte, offset = divmod(start + self._offset, 8)
if not offset:
if length == 32:
f, = struct.unpack('<f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4)))
elif length == 64:
f, = struct.unpack('<d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8)))
else:
if length == 32:
f, = struct.unpack('<f', self._readbytes(32, start))
elif length == 64:
f, = struct.unpack('<d', self._readbytes(64, start))
try:
return f
except NameError:
raise InterpretError("floats can only be 32 or 64 bits long, "
"not {0} bits", length) | Read bits and interpret as a little-endian float. |
def build_tree_file_pathname(filename, directory_depth=8, pathname_separator_character=os.sep):
return build_tree_pathname(filename, directory_depth, pathname_separator_character) + filename | Return a file pathname which pathname is built of the specified
number of sub-directories, and where each directory is named after the
nth letter of the filename corresponding to the directory depth.
Examples::
>>> build_tree_file_pathname('foo.txt', 2, '/')
'f/o/foo.txt'
>>> build_tree_file_pathname('0123456789abcdef')
'0/1/2/3/4/5/6/7/0123456789abcdef'
@param filename: name of a file, with or without extension.
@param directory_depth: number of sub-directories to be generated.
@param pathname_separator_character: character to be used to separate
pathname components, such as '/' for POSIX and '\\' for
Windows. If not defined, the default is the character used by
the operating system ``os.sep``.
@return: a file pathname. |
def _special_value_size(em):
if em.tagName == 'input':
return convertToPositiveInt(em.getAttribute('size', 20), invalidDefault=20)
return em.getAttribute('size', '') | handle "size" property, which has different behaviour for input vs everything else |
def get_user_trades(self, limit=0, offset=0, sort='desc'):
self._log('get user trades')
res = self._rest_client.post(
endpoint='/user_transactions',
payload={
'book': self.name,
'limit': limit,
'offset': offset,
'sort': sort
}
)
return res[:limit] if len(res) > limit > 0 else res | Return user's trade history.
:param limit: Maximum number of trades to return. If set to 0 or lower,
all trades are returned (default: 0).
:type limit: int
:param offset: Number of trades to skip.
:type offset: int
:param sort: Method used to sort the results by date and time. Allowed
values are "desc" for descending order, and "asc" for ascending
order (default: "desc").
:type sort: str | unicode
:return: User's trade history.
:rtype: [dict] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.