code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def view(self, channel_names='auto',
gates=None,
diag_kw={}, offdiag_kw={},
gate_colors=None, **kwargs):
if channel_names == 'auto':
channel_names = list(self.channel_names)
def plot_region(channels, **kwargs):
if channels[0] == channels[1]:... | Generates a matrix of subplots allowing for a quick way
to examine how the sample looks in different channels.
Parameters
----------
channel_names : [list | 'auto']
List of channel names to plot.
offdiag_plot : ['histogram' | 'scatter']
Specifies the type... |
def serialize_basic(self, data, data_type, **kwargs):
custom_serializer = self._get_custom_serializers(data_type, **kwargs)
if custom_serializer:
return custom_serializer(data)
if data_type == 'str':
return self.serialize_unicode(data)
return eval(data_type)(data) | Serialize basic builting data type.
Serializes objects to str, int, float or bool.
Possible kwargs:
- is_xml bool : If set, adapt basic serializers without the need for basic_types_serializers
- basic_types_serializers dict[str, callable] : If set, use the callable as serializer
... |
def read_host_file(path):
res = []
for l in file(path).xreadlines():
hostname = l.strip()
if hostname:
res.append(hostname)
return res | Read the host file. Return a list of hostnames. |
def download(self, replace=False):
download_data(
self.url, self.signature, data_home=self.data_home,
replace=replace, extract=True
) | Download the dataset from the hosted Yellowbrick data store and save
it to the location specified by ``get_data_home``. The downloader
verifies the download completed successfully and safely by comparing
the expected signature with the SHA 256 signature of the downloaded
archive file.
... |
def delete_from_environment(self, environment, synchronous=True):
if isinstance(environment, Environment):
environment_id = environment.id
else:
environment_id = environment
response = client.delete(
'{0}/environments/{1}'.format(self.path(), environment_id),
... | Delete this content view version from an environment.
This method acts much like
:meth:`nailgun.entity_mixins.EntityDeleteMixin.delete`. The
documentation on that method describes how the deletion procedure works
in general. This method differs only in accepting an ``environment``
... |
def add_dependency(self, from_task_name, to_task_name):
logger.debug('Adding dependency from {0} to {1}'.format(from_task_name, to_task_name))
if not self.state.allow_change_graph:
raise DagobahError("job's graph is immutable in its current state: %s"
% self.st... | Add a dependency between two tasks. |
def register_for_app(
self, app_label=None, exclude_models=None, exclude_model_classes=None
):
models = []
exclude_models = exclude_models or []
app_config = django_apps.get_app_config(app_label)
for model in app_config.get_models():
if model._meta.label_lower in ... | Registers all models for this app_label. |
def make_urls_hyperlinks(text: str) -> str:
find_url = r
replace_url = r'<a href="\1">\1</a>'
find_email = re.compile(r'([.\w\-]+@(\w[\w\-]+\.)+[\w\-]+)')
replace_email = r'<a href="mailto:\1">\1</a>'
text = re.sub(find_url, replace_url, text)
text = re.sub(find_email, replace_email, text)
r... | Adds hyperlinks to text that appears to contain URLs.
See
- http://stackoverflow.com/questions/1071191
- ... except that double-replaces everything; e.g. try with
``text = "me@somewhere.com me@somewhere.com"``
- http://stackp.online.fr/?p=19 |
def generate_exports():
env = []
for name in list_installed_genomes():
try:
g = Genome(name)
env_name = re.sub(r'[^\w]+', "_", name).upper()
env.append("export {}={}".format(env_name, g.filename))
except:
pass
return env | Print export commands for setting environment variables. |
def unstaged():
with conf.within_proj_dir():
status = shell.run(
'git status --porcelain',
capture=True,
never_pretend=True
).stdout
results = []
for file_status in status.split(os.linesep):
if file_status.strip() and file_status[0] == ... | Return a list of unstaged files in the project repository.
Returns:
list[str]: The list of files not tracked by project git repo. |
def _saveState(self, path):
self.addSession()
self._save(str(self.n_sessions), path) | save current state and add a new state |
def del_kwnkb(mapper, connection, target):
if(target.kbtype == KnwKB.KNWKB_TYPES['taxonomy']):
if os.path.isfile(target.get_filename()):
os.remove(target.get_filename()) | Remove taxonomy file. |
def worker_stop(obj, worker_ids):
if len(worker_ids) == 0:
msg = 'Would you like to stop all workers?'
else:
msg = '\n{}\n\n{}'.format('\n'.join(worker_ids),
'Would you like to stop these workers?')
if click.confirm(msg, default=True, abort=True):
st... | Stop running workers.
\b
WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all. |
def _iter_grouped(self):
for indices in self._group_indices:
yield self._obj.isel(**{self._group_dim: indices}) | Iterate over each element in this group |
def compute_groups_matrix(groups):
if not groups:
return None
num_vars = len(groups)
unique_group_names = list(OrderedDict.fromkeys(groups))
number_of_groups = len(unique_group_names)
indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)])
output = np.zeros((num_vars, number... | Generate matrix which notes factor membership of groups
Computes a k-by-g matrix which notes factor membership of groups
where:
k is the number of variables (factors)
g is the number of groups
Also returns a g-length list of unique group_names whose positions
correspond to the order of ... |
def _calculate_expires(self):
self._backend_client.expires = None
now = datetime.utcnow()
self._backend_client.expires = now + timedelta(seconds=self._config.timeout) | Calculates the session expiry using the timeout |
def getShocks(self):
employed = self.eStateNow == 1.0
N = int(np.sum(employed))
newly_unemployed = drawBernoulli(N,p=self.UnempPrb,seed=self.RNG.randint(0,2**31-1))
self.eStateNow[employed] = 1.0 - newly_unemployed | Determine which agents switch from employment to unemployment. All unemployed agents remain
unemployed until death.
Parameters
----------
None
Returns
-------
None |
def remove_prefix(self, prefix):
if prefix not in self.__prefix_map:
return
ni = self.__lookup_prefix(prefix)
ni.prefixes.discard(prefix)
del self.__prefix_map[prefix]
if ni.preferred_prefix == prefix:
ni.preferred_prefix = next(iter(ni.prefixes), None) | Removes prefix from this set. This is a no-op if the prefix
doesn't exist in it. |
def create_alert_policy(self, policy_name):
policy_data = { 'policy': { 'incident_preference': 'PER_POLICY', 'name': policy_name } }
create_policy = requests.post(
'https://api.newrelic.com/v2/alerts_policies.json',
headers=self.auth_header,
data=json.dumps(policy_data))
create_policy.rais... | Creates an alert policy in NewRelic |
def units_convertible(units1, units2, reftimeistime=True):
try:
u1 = Unit(units1)
u2 = Unit(units2)
except ValueError:
return False
return u1.is_convertible(u2) | Return True if a Unit representing the string units1 can be converted
to a Unit representing the string units2, else False.
:param str units1: A string representing the units
:param str units2: A string representing the units |
def rows(self, *args) -> List[List[Well]]:
row_dict = self._create_indexed_dictionary(group=1)
keys = sorted(row_dict)
if not args:
res = [row_dict[key] for key in keys]
elif isinstance(args[0], int):
res = [row_dict[keys[idx]] for idx in args]
elif isinst... | Accessor function used to navigate through a labware by row.
With indexing one can treat it as a typical python nested list.
To access row A for example, simply write: labware.rows()[0]. This
will output ['A1', 'A2', 'A3', 'A4'...]
Note that this method takes args for backward-compatib... |
def ssh(ctx, cluster_id, key_file):
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
result = client.describe_cluster(ClusterId=cluster_id)
target_dns = result['Cluster']['MasterPublicDnsName']
ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10'
... | SSH login to EMR master node |
def trimsquants(self, col: str, sup: float):
try:
self.df = self._trimquants(col, None, sup)
except Exception as e:
self.err(e, self.trimsquants, "Can not trim superior quantiles") | Remove superior quantiles from the dataframe
:param col: column name
:type col: str
:param sup: superior quantile
:type sup: float
:example: ``ds.trimsquants("Col 1", 0.99)`` |
def plot_estimates(positions, estimates):
x = list(positions)
fig = plt.figure(figsize=(SUBPLOT_WIDTH * len(estimates), FIGURE_HEIGHT))
for i, (title, y) in enumerate(zip(ESTIMATE_TITLES, estimates)):
ax = fig.add_subplot(1, len(estimates), i + 1)
ax.plot(x, y, linewidth=LINE_WIDTH, c=LINE_C... | Plots density, and probability estimates.
Parameters
----------
positions : iterable of float
Paragraph positions for which densities, and probabilities were estimated.
estimates : six-tuple of (sequence of float)
Estimates of P(relevant), p(position), p(position | relevant), P(position... |
def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y):
r
points_obs = list(zip(xp, yp))
points_grid = generate_grid_coords(grid_x, grid_y)
img = natural_neighbor_to_points(points_obs, variable, points_grid)
return img.reshape(grid_x.shape) | r"""Generate a natural neighbor interpolation of the given points to a regular grid.
This assigns values to the given grid using the Liang and Hale [Liang2010]_.
approach.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of o... |
def get_analysis_data_by_title(self, ar_data, title):
analyses = ar_data.get("analyses", [])
for analysis in analyses:
if analysis.get("title") == title:
return analysis
return None | A template helper to pick an Analysis identified by the name of the
current Analysis Service.
ar_data is the dictionary structure which is returned by _ws_data |
def first_location_of_minimum(x):
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.argmin(x) / len(x) if len(x) > 0 else np.NaN | Returns the first location of the minimal value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float |
def _apply_replacement(error, found_file, file_lines):
fixed_lines = file_lines
fixed_lines[error[1].line - 1] = error[1].replacement
concatenated_fixed_lines = "".join(fixed_lines)
found_file.seek(0)
found_file.write(concatenated_fixed_lines)
found_file.truncate() | Apply a single replacement. |
def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings):
urls = urls or DEFAULT_URLS
if 'hosts' in settings:
raise DeprecationWarning('"hosts" is deprecated in favor of "urls".')
if not force_new:
key = _build_key(urls, timeout, **settings)
if key in _cached_elast... | Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `g... |
def update_video_image(edx_video_id, course_id, image_data, file_name):
try:
course_video = CourseVideo.objects.select_related('video').get(
course_id=course_id, video__edx_video_id=edx_video_id
)
except ObjectDoesNotExist:
error_message = u'VAL: CourseVideo not found for edx... | Update video image for an existing video.
NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise
a new file name is constructed based on uuid and extension from `file_name` value.
`image_data` will be None in case of course re-run and export.
Arguments:
image_dat... |
def deactivate_(self):
self.preDeactivate_()
self.active = False
self.image_dimensions = None
self.client = None | Init shmem variables to None |
def item_frequency(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
if (not isinstance(sa, tc.data_structures.sarray.SArray) or
sa.dtype != str):
raise ValueError("turicreate.visualization.item_frequency supports " +
"SArrays of dtype str")
title = _get_title(ti... | Plots an item frequency of the sarray provided as input, and returns the
resulting Plot object.
The function supports SArrays with dtype str.
Parameters
----------
sa : SArray
The data to get an item frequency for. Must have dtype str
xlabel : str (optional)
The text label for... |
def __callback(self, data):
method = self.__cb_message
if method is not None:
try:
method(data)
except Exception as ex:
_logger.exception("Error calling method: %s", ex) | Safely calls back a method
:param data: Associated stanza |
def set_value(self, dry_wet: LeakSensorState):
value = 0
if dry_wet == self._dry_wet_type:
value = 1
self._update_subscribers(value) | Set the value of the state to dry or wet. |
def tt_avg(self, print_output=True, output_file="tt.csv"):
avg = self.tt.mean(axis=2)
if print_output:
np.savetxt(output_file, avg, delimiter=",")
return avg | Compute average term-topic matrix, and print to file if
print_output=True. |
def dilate(self, size):
if size > 0:
from scipy.ndimage.morphology import binary_dilation
size = (size * 2) + 1
coords = self.coordinates
tmp = zeros(self.extent + size * 2)
coords = (coords - self.bbox[0:len(self.center)] + size)
tmp[coord... | Dilate a region using morphological operators.
Parameters
----------
size : int
Size of dilation in pixels |
def find_module(self, fullname, path=None):
basepaths = [""] + list(sys.path)
if fullname.startswith("."):
if path is None:
return None
fullname = fullname[1:]
basepaths.insert(0, path)
fullpath = os.path.join(*fullname.split("."))
for ... | Searches for a Coconut file of the given name and compiles it. |
def do_static_merge(cls, c_source, c_target):
c_target.extend(c_source)
c_source.parent = c_target.parent
cls.CLUSTERS.remove(c_source)
for m in c_source.mentions:
cls.MENTION_TO_CLUSTER[m] = c_target | By the time we're just folding in clusters, there's no need to maintain
self.INSTANCES and self.clusters, so we just call this method |
def prepare(self, context):
if __debug__:
log.debug("Preparing request context.", extra=dict(request=id(context)))
context.request = Request(context.environ)
context.response = Response(request=context.request)
context.environ['web.base'] = context.request.script_name
context.request.remainder = context.re... | Add the usual suspects to the context.
This adds `request`, `response`, and `path` to the `RequestContext` instance. |
def css(app, env):
srcdir = os.path.abspath(os.path.dirname(__file__))
cssfile = 'bolditalic.css'
csspath = os.path.join(srcdir, cssfile)
buildpath = os.path.join(app.outdir, '_static')
try:
os.makedirs(buildpath)
except OSError:
if not os.path.isdir(buildpath):
raise... | Add bolditalic CSS.
:param app: Sphinx application context.
:param env: Sphinx environment context. |
def submit_sms_conversion(self, message_id, delivered=True, timestamp=None):
params = {
"message-id": message_id,
"delivered": delivered,
"timestamp": timestamp or datetime.now(pytz.utc),
}
_format_date_param(params, "timestamp")
return self.post(self.... | Notify Nexmo that an SMS was successfully received.
:param message_id: The `message-id` str returned by the send_message call.
:param delivered: A `bool` indicating that the message was or was not successfully delivered.
:param timestamp: A `datetime` object containing the time the SMS arrived.... |
def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5)):
x = x.copy()
if x.ndim > 1:
sx = sort(x.T).T
else:
sx = sort(x)
try:
quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print_("Too few elements for quantil... | Returns a dictionary of requested quantiles from array
:Arguments:
x : Numpy array
An array containing MCMC samples
qlist : tuple or list
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5)) |
def build_job(name=None, parameters=None):
if not name:
raise SaltInvocationError('Required parameter \'name\' is missing')
server = _connect()
if not job_exists(name):
raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name))
try:
server.build_job(name, parameters)... | Initiate a build for the provided job.
:param name: The name of the job is check if it exists.
:param parameters: Parameters to send to the job.
:return: True is successful, otherwise raise an exception.
CLI Example:
.. code-block:: bash
salt '*' jenkins.build_job jobname |
def get_generation_code(self, **gencode):
channels, verts = self.coordinates
channels = ', '.join(["'{}'".format(ch) for ch in channels])
verts = list(verts)
if len(verts) == 1:
verts = verts[0]
if len(verts) == 1:
verts = verts[0]
verts = ... | Generates python code that can create the gate. |
def is_ip(string):
mo = re.match(r'(\d+)\.(\d+)\.(\d+)\.(\d+)', string)
if mo is None:
return False
for group in mo.groups():
if int(group) not in list(range(0, 256)):
return False
return True | Returns True if the given string is an IPv4 address, False otherwise.
:type string: string
:param string: Any string.
:rtype: bool
:return: True if the string is an IP address, False otherwise. |
def _desy_bookkeeping2marc(self, key, value):
if 'identifier' not in value:
return {
'a': value.get('expert'),
'd': value.get('date'),
's': value.get('status'),
}
self.setdefault('035', []).append({
'9': 'DESY',
'z': value['identifier']
}) | Populate the ``595_D`` MARC field.
Also populates the ``035`` MARC field through side effects. |
def canonicalize_half_turns(
half_turns: Union[sympy.Basic, float]
) -> Union[sympy.Basic, float]:
if isinstance(half_turns, sympy.Basic):
return half_turns
half_turns %= 2
if half_turns > 1:
half_turns -= 2
return half_turns | Wraps the input into the range (-1, +1]. |
def sar(patch, cols, splits, divs, ear=False):
def sar_y_func(spatial_table, all_spp):
return np.mean(spatial_table['n_spp'])
def ear_y_func(spatial_table, all_spp):
endemic_counter = 0
for spp in all_spp:
spp_in_cell = [spp in x for x in spatial_table['spp_set']]
... | Calculates an empirical species area or endemics area relationship
Parameters
----------
{0}
divs : str
Description of how to divide x_col and y_col. See notes.
ear : bool
If True, calculates an endemics area relationship
Returns
-------
{1} Result has 5 columns; div, x... |
def bundle_view(parser, token):
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two arguments"
" bundle and view_name" % bits[0])
bundle = parser.compile_filter(bits[1])
viewname = parser.compile_filter(bits[2])
... | Returns an string version of a bundle view. This is done by
calling the `get_string_from_view` method of the provided bundle.
This tag expects that the request object as well as the
the original url_params are available in the context.
Requires two arguments bundle and the name of the view
you wan... |
def __unpack_tgz(self, filename):
if isinstance(filename, string_types) and self.__isValidTGZ(filename) and tarfile.is_tarfile(filename):
with tarfile.open(filename, mode='r:gz') as t:
for name in t.getnames():
t.extract(name, self.plugin_abspath)
els... | Unpack the `tar.gz`, `tgz` compressed file format |
def _create_cipher(self, password, salt, IV):
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
pw = PBKDF2(password, salt, dkLen=self.block_size)
return AES.new(pw[:self.block_size], AES.MODE_CFB, IV) | Create the cipher object to encrypt or decrypt a payload. |
def reMutualReceptions(self):
planets = copy(const.LIST_SEVEN_PLANETS)
planets.remove(self.obj.id)
mrs = {}
for ID in planets:
mr = self.dyn.reMutualReceptions(self.obj.id, ID)
if mr:
mrs[ID] = mr
return mrs | Returns all mutual receptions with the object
and other planets, indexed by planet ID.
It only includes ruler and exaltation receptions. |
def is_twss(self, phrase):
featureset = self.extract_features(phrase)
return self.classifier.classify(featureset) | The magic function- this accepts a phrase and tells you if it
classifies as an entendre |
def _set_bounds(self, bounds):
min_value, max_value = bounds
self.min_value = None
self.max_value = None
self.min_value = min_value
self.max_value = max_value | Sets the boundaries for this parameter to min_value and max_value |
def find(self, location):
try:
content = self.store[location]
return StringIO(content)
except:
reason = 'location "%s" not in document store' % location
raise Exception, reason | Find the specified location in the store.
@param location: The I{location} part of a URL.
@type location: str
@return: An input stream to the document.
@rtype: StringIO |
def lock(self):
if self.cache.get(self.lock_name):
return False
else:
self.cache.set(self.lock_name, timezone.now(), self.timeout)
return True | This method sets a cache variable to mark current job as "already running". |
def abort_submission(namespace, workspace, submission_id):
uri = "workspaces/{0}/{1}/submissions/{2}".format(namespace,
workspace, submission_id)
return __delete(uri) | Abort running job in a workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
submission_id (str): Submission's unique identifier
Swagger:
https://api.firecloud.org/#!/Submissions/deleteSubmission |
def deep_merge(base, extra):
if extra is None:
return
for key, value in extra.items():
if value is None:
if key in base:
del base[key]
elif isinstance(base.get(key), dict) and isinstance(value, dict):
deep_merge(base[key], value)
else:
... | Deeply merge two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence. |
def create(self):
steps = [
(self.create_role, (), {}),
(self.create_vpc, (), {}),
(self.create_cluster, (), {}),
(self.create_node_group, (), {}),
(self.create_spot_nodes, (), {}),
(self.create_utilities, (), {}),
]
for ste... | Deploy a cluster on Amazon's EKS Service configured
for Jupyterhub Deployments. |
def get_tree(self, process_name):
for tree_name, tree in self.trees.items():
if process_name in tree:
return tree | return tree that is managing time-periods for given process |
def get_fw_policy(self, policy_id):
policy = None
try:
policy = self.neutronclient.show_firewall_policy(policy_id)
except Exception as exc:
LOG.error("Failed to get firewall plcy for id %(id)s "
"Exc %(exc)s",
{'id': policy_id, ... | Return the firewall policy, given its ID. |
def in_file(self, fn: str) -> Iterator[Statement]:
yield from self.__file_to_statements.get(fn, []) | Returns an iterator over all of the statements belonging to a file. |
def send_response(self, code, message=None):
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string()) | Add the response header to the headers buffer and log the
response code.
Also send two standard headers with the server software
version and the current date. |
def create_driver(self):
driver_type = self.config.get('Driver', 'type')
try:
if self.config.getboolean_optional('Server', 'enabled'):
self.logger.info("Creating remote driver (type = %s)", driver_type)
driver = self._create_remote_driver()
else:
... | Create a selenium driver using specified config properties
:returns: a new selenium driver
:rtype: selenium.webdriver.remote.webdriver.WebDriver |
def _tp__get_typed_properties(self):
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError | Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta. |
def _traverse_list(self, input_list, resolution_data, resolver_method):
for index, value in enumerate(input_list):
input_list[index] = self._traverse(value, resolution_data, resolver_method)
return input_list | Traverse a list to resolve intrinsic functions on every element
:param input_list: List of input
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified l... |
def lex(filename):
with io.open(filename, mode='r', encoding='utf-8') as f:
it = _lex_file_object(f)
it = _balance_braces(it, filename)
for token, line, quoted in it:
yield (token, line, quoted) | Generates tokens from an nginx config file |
def set_double_stack(socket_obj, double_stack=True):
try:
opt_ipv6_only = socket.IPV6_V6ONLY
except AttributeError:
if os.name == "nt":
opt_ipv6_only = 27
elif platform.system() == "Linux":
opt_ipv6_only = 26
else:
raise
socket_obj.setsocko... | Sets up the IPv6 double stack according to the operating system
:param socket_obj: A socket object
:param double_stack: If True, use the double stack, else only support IPv6
:raise AttributeError: Python or system doesn't support V6
:raise socket.error: Error setting up the double stack value |
def _parse_scram_response(response):
return dict(item.split(b"=", 1) for item in response.split(b",")) | Split a scram response into key, value pairs. |
def deprecation_warning(func, name):
@wraps(func)
def caller(*args, **kwargs):
logger = logging.getLogger(__name__)
instance = func(*args, **kwargs)
logger.warning(
"Distribution `chaospy.{}` has been renamed to ".format(name) +
"`chaospy.{}` and will be deprecate... | Add a deprecation warning do each distribution. |
def _put_overlay(self, overlay_name, overlay):
if not isinstance(overlay, dict):
raise TypeError("Overlay must be dict")
if set(self._identifiers()) != set(overlay.keys()):
raise ValueError("Overlay keys must be dataset identifiers")
self._storage_broker.put_overlay(overl... | Store overlay so that it is accessible by the given name.
:param overlay_name: name of the overlay
:param overlay: overlay must be a dictionary where the keys are
identifiers in the dataset
:raises: TypeError if the overlay is not a dictionary,
ValueErro... |
def enqueue(self, item_type, item):
with self.enlock:
self.queue[item_type].append(item) | Queue a new data item, make item iterable |
def db(self, connection_string=None):
connection_string = connection_string or self.settings["db"]
if not hasattr(self, "_db_conns"):
self._db_conns = {}
if not connection_string in self._db_conns:
self._db_conns[connection_string] = oz.sqlalchemy.session(connection_strin... | Gets the SQLALchemy session for this request |
def print_statistics(self):
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR) | Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible. |
def determine_struct_tree_subtype(self, data_type, obj):
if '.tag' not in obj:
raise bv.ValidationError("missing '.tag' key")
if not isinstance(obj['.tag'], six.string_types):
raise bv.ValidationError('expected string, got %s' %
bv.generic_typ... | Searches through the JSON-object-compatible dict using the data type
definition to determine which of the enumerated subtypes `obj` is. |
def l2_regression_loss(y, target, name=None):
with tf.name_scope(name, 'l2_regression', [y, target]) as scope:
y = tf.convert_to_tensor(y, name='y')
target = tf.convert_to_tensor(target, name='target')
return tf.sqrt(l2_regression_sq_loss(y, target, name=scope)) | Calculates the square root of the SSE between y and target.
Args:
y: the calculated values.
target: the desired values.
name: the name for this op, defaults to l2_regression
Returns:
A tensorflow op. |
def _readuint(self, length, start):
if not length:
raise InterpretError("Cannot interpret a zero length bitstring "
"as an integer.")
offset = self._offset
startbyte = (start + offset) // 8
endbyte = (start + offset + length - 1) // ... | Read bits and interpret as an unsigned int. |
def stream(self):
if self._stream is None:
self._stream = tempfile.NamedTemporaryFile(delete=False)
try:
self._stream.write(self.client.open(self.filename, view='data').read())
except:
pass
return self._stream | the stream to write the log content too.
@return: |
def delete_host_from_segment(ipaddress, networkaddress, auth, url):
host_id = get_host_id(ipaddress, networkaddress, auth, url)
remove_scope_ip(host_id, auth.creds, auth.url) | Function to abstract |
def intersect_regions(flist):
if len(flist) < 2:
raise Exception("Require at least two regions to perform intersection")
a = Region.load(flist[0])
for b in [Region.load(f) for f in flist[1:]]:
a.intersect(b)
return a | Construct a region which is the intersection of all regions described in the given
list of file names.
Parameters
----------
flist : list
A list of region filenames.
Returns
-------
region : :class:`AegeanTools.regions.Region`
The intersection of all regions, possibly empty... |
def set_property(self, name, value):
typeof = type(self.get_property(name))
self._interface.SetProperty(name,
translate_to_dbus_type(typeof, value)) | Helper to set a property value by name, translating to correct
dbus type
See also :py:meth:`get_property`
:param str name: The property name in the object's dictionary
whose value shall be set.
:param value: Properties new value to be assigned.
:return:
:rai... |
def process_like(proc):
newproc = copy.deepcopy(proc)
newproc.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z",
time.localtime())
return newproc | Make an exact clone of a process, including state and all subprocesses.
The creation date is updated.
:param proc: process
:type proc: :class:`~climlab.process.process.Process`
:return: new process identical to the given process
:rtype: :class:`~climlab.process.process.Proces... |
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix):
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return
for state in button.getElementsByTagName('state'):
state_name = state.attributes... | Adds strings pairs from a button xib element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
button(element): The button element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for intern... |
def set_role_config_groups(self, role_config_group_refs):
update = copy.copy(self)
update.roleConfigGroupRefs = role_config_group_refs
return self._do_update(update) | Updates the role config groups in a host template.
@param role_config_group_refs: List of role config group refs.
@return: An ApiHostTemplate object. |
def load_global_conf(cls, global_configuration):
logger.debug("Propagate global parameter for %s:", cls)
for prop, entry in global_configuration.properties.items():
if not entry.managed or not getattr(entry, 'class_inherit'):
continue
for (cls_dest, change_name) i... | Apply global Alignak configuration.
Some objects inherit some properties from the global configuration if they do not
define their own value. E.g. the global 'accept_passive_service_checks' is inherited
by the services as 'accept_passive_checks'
:param cls: parent object
:type ... |
def inter(a, b):
assert isinstance(a, stypes.SpiceCell)
assert isinstance(b, stypes.SpiceCell)
assert a.dtype == b.dtype
if a.dtype is 0:
c = stypes.SPICECHAR_CELL(max(a.size, b.size), max(a.length, b.length))
elif a.dtype is 1:
c = stypes.SPICEDOUBLE_CELL(max(a.size, b.size))
el... | Intersect two sets of any data type to form a third set.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inter_c.html
:param a: First input set.
:type a: spiceypy.utils.support_types.SpiceCell
:param b: Second input set.
:type b: spiceypy.utils.support_types.SpiceCell
:return: Intersec... |
def dummy_func(arg1, arg2, arg3=None, arg4=[1, 2, 3], arg5={}, **kwargs):
foo = kwargs.get('foo', None)
bar = kwargs.pop('bar', 4)
foo2 = kwargs['foo2']
foobar = str(foo) + str(bar) + str(foo2)
return foobar | test func for kwargs parseing |
def is_contextfree(self):
for lhs, rhs in self.rules:
if len(lhs) != 1:
return False
if lhs[0] not in self.nonterminals:
return False
return True | Returns True iff the grammar is context-free. |
def resend_presence(self):
if self.client.established:
return self.client.enqueue(self.make_stanza()) | Re-send the currently configured presence.
:return: Stanza token of the presence stanza or :data:`None` if the
stream is not established.
:rtype: :class:`~.stream.StanzaToken`
.. note::
:meth:`set_presence` automatically broadcasts the new presence if
an... |
def get_productivity_stats(self):
response = API.get_productivity_stats(self.api_token)
_fail_if_contains_errors(response)
return response.json() | Return the user's productivity stats.
:return: A JSON-encoded representation of the user's productivity
stats.
:rtype: A JSON-encoded object.
>>> from pytodoist import todoist
>>> user = todoist.login('john.doe@gmail.com', 'password')
>>> stats = user.get_productivi... |
def advance(self):
elem = next(self._iterable)
for deque in self._deques:
deque.append(elem) | Advance the base iterator, publish to constituent iterators. |
def _get_licences():
licenses = _LICENSES
for license in licenses:
print("{license_name} [{license_code}]".format(
license_name=licenses[license], license_code=license)) | Lists all the licenses on command line |
def create_static_profile_path(client_id):
profile_path = os.path.join(STATIC_FILES_PATH, str(client_id))
if not os.path.exists(profile_path):
os.makedirs(profile_path)
return profile_path | Create a profile path folder if not exist
@param client_id: ID of client user
@return string profile path |
def is_connected(self):
if self._is_open:
err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0)
if err == -1:
return False
else:
return True
else:
en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=sel... | Checks if the USB device is still connected |
def pixels(self, value: int) -> 'Gap':
raise_not_number(value)
self.gap = '{}px'.format(value)
return self | Set the margin in pixels. |
def _read_as_int(self, addr, numBytes):
buf = self.read_register(addr, numBytes)
if len(buf) >= 4:
return struct.unpack_from("<i", buf)[0]
else:
rtn = 0
for i, byte in enumerate(buf):
rtn |= byte << 8 * i
return rtn | Convenience method. Oftentimes we need to read a range of
registers to represent an int. This method will automatically read
@numBytes registers starting at @addr and convert the array into an int. |
def _get_key_value_config(self):
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found") | Retrieves the first found remote configuration. |
def _load_resource(self):
url = self._url
if self._params:
url += '?{0}'.format(six.moves.urllib_parse.urlencode(self._params))
r = getattr(self._session, self._meta.get_method.lower())(url)
if r.status_code == 404:
raise NotFoundException('Server returned 404 Not... | Load resource data from server |
def graphs(self):
result = Dummy()
for graph in graphs.__all__:
cls = getattr(graphs, graph)
setattr(result, cls.short_name, cls(self))
return result | Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument. |
async def remove_key(request: web.Request) -> web.Response:
keys_dir = CONFIG['wifi_keys_dir']
available_keys = os.listdir(keys_dir)
requested_hash = request.match_info['key_uuid']
if requested_hash not in available_keys:
return web.json_response(
{'message': 'No such key file {}'
... | Remove a key.
```
DELETE /wifi/keys/:id
-> 200 OK
{message: 'Removed key keyfile.pem'}
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.