code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def delete_instance(self, uid):
uri = "%s/%s" % (self.uri, uid)
response, instance = self.request("DELETE", uri)
return response.status == 204 | Delete an ObjectModel via a DELETE request
:param int uid: Unique id for the Model resource |
def system_image_type(self, system_image_type):
allowed_values = ["DOCKER_IMAGE", "VIRTUAL_MACHINE_RAW", "VIRTUAL_MACHINE_QCOW2", "LOCAL_WORKSPACE"]
if system_image_type not in allowed_values:
raise ValueError(
"Invalid value for `system_image_type` ({0}), must be one of {1}"
.format(system_image_type, allowed_values)
)
self._system_image_type = system_image_type | Sets the system_image_type of this BuildEnvironmentRest.
:param system_image_type: The system_image_type of this BuildEnvironmentRest.
:type: str |
def press_by_tooltip(self, tooltip):
for button in find_by_tooltip(world.browser, tooltip):
try:
button.click()
break
except:
pass
else:
raise AssertionError("No button with tooltip '{0}' found"
.format(tooltip)) | Click on a HTML element with a given tooltip.
This is very useful if you're clicking on icon buttons, etc. |
def retrieve(self, id) :
_, _, lead = self.http_client.get("/leads/{id}".format(id=id))
return lead | Retrieve a single lead
Returns a single lead available to the user, according to the unique lead ID provided
If the specified lead does not exist, this query returns an error
:calls: ``get /leads/{id}``
:param int id: Unique identifier of a Lead.
:return: Dictionary that support attriubte-style access and represent Lead resource.
:rtype: dict |
def update_config(self, config):
lock = Lock()
with lock:
config_responses = self.client.Config(
self.generate_config_request(config))
agent_config = next(config_responses)
return agent_config | Sends TraceConfig to the agent and gets agent's config in reply.
:type config: `~opencensus.proto.trace.v1.TraceConfig`
:param config: Trace config with sampling and other settings
:rtype: `~opencensus.proto.trace.v1.TraceConfig`
:returns: Trace config from agent. |
def describe_role(name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_role(name)
if not info:
return False
role = info.get_role_response.get_role_result.role
role['assume_role_policy_document'] = salt.utils.json.loads(_unquote(
role.assume_role_policy_document
))
for policy_key, policy in role['assume_role_policy_document'].items():
if policy_key == 'Statement':
for val in policy:
if 'Sid' in val and not val['Sid']:
del val['Sid']
return role
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to get %s information.', name)
return False | Get information for a role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.describe_role myirole |
def jx_type(column):
if column.es_column.endswith(EXISTS_TYPE):
return EXISTS
return es_type_to_json_type[column.es_type] | return the jx_type for given column |
def _write_json(obj, path):
with open(path, 'w') as f:
json.dump(obj, f) | Writes a serializeable object as a JSON file |
def update(self):
reqs = self.build_requests()
for r in reqs:
r.block = self.block
results = self.send_requests(*reqs)
self.callback(results) | Update the display |
def logloss(y, p):
p[p < EPS] = EPS
p[p > 1 - EPS] = 1 - EPS
return log_loss(y, p) | Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error |
def load_dic28():
dataset_path = _load('dic28')
X = _load_csv(dataset_path, 'data')
y = X.pop('label').values
graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml')))
graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml')))
graph = graph1.copy()
graph.add_nodes_from(graph2.nodes(data=True))
graph.add_edges_from(graph2.edges)
graph.add_edges_from(X[['graph1', 'graph2']].values)
graphs = {
'graph1': graph1,
'graph2': graph2,
}
return Dataset(load_dic28.__doc__, X, y, accuracy_score,
stratify=True, graph=graph, graphs=graphs) | DIC28 Dataset from Pajek.
This network represents connections among English words in a dictionary.
It was generated from Knuth's dictionary. Two words are connected by an
edge if we can reach one from the other by
- changing a single character (e. g., work - word)
- adding / removing a single character (e. g., ever - fever).
There exist 52,652 words (vertices in a network) having 2 up to 8 characters
in the dictionary. The obtained network has 89038 edges. |
def has_device_info(self, key):
if _debug: DeviceInfoCache._debug("has_device_info %r", key)
return key in self.cache | Return true iff cache has information about the device. |
async def list(self, *, filters: Mapping = None) -> List[Mapping]:
params = {"filters": clean_filters(filters)}
response = await self.docker._query_json(
"services", method="GET", params=params
)
return response | Return a list of services
Args:
filters: a dict with a list of filters
Available filters:
id=<service id>
label=<service label>
mode=["replicated"|"global"]
name=<service name> |
def invenio_query_factory(parser=None, walkers=None):
parser = parser or Main
walkers = walkers or [PypegConverter()]
walkers.append(ElasticSearchDSL())
def invenio_query(pattern):
query = pypeg2.parse(pattern, parser, whitespace="")
for walker in walkers:
query = query.accept(walker)
return query
return invenio_query | Create a parser returning Elastic Search DSL query instance. |
def update(self, typ, id, **kwargs):
return self._load(self._request(typ, id=id, method='PUT', data=kwargs)) | update just fields sent by keyword args |
def create_actions(MAIN):
actions = MAIN.action
actions['open_settings'] = QAction(QIcon(ICON['settings']), 'Settings',
MAIN)
actions['open_settings'].triggered.connect(MAIN.show_settings)
actions['close_wndw'] = QAction(QIcon(ICON['quit']), 'Quit', MAIN)
actions['close_wndw'].triggered.connect(MAIN.close)
actions['about'] = QAction('About WONAMBI', MAIN)
actions['about'].triggered.connect(MAIN.about)
actions['aboutqt'] = QAction('About Qt', MAIN)
actions['aboutqt'].triggered.connect(lambda: QMessageBox.aboutQt(MAIN)) | Create all the possible actions. |
def fixcode(**kwargs):
repo_dir = Path(__file__).parent.absolute()
source_dir = Path(repo_dir, package.__name__)
if source_dir.exists():
print("Source code locate at: '%s'." % source_dir)
print("Auto pep8 all python file ...")
source_dir.autopep8(**kwargs)
else:
print("Source code directory not found!")
unittest_dir = Path(repo_dir, "tests")
if unittest_dir.exists():
print("Unittest code locate at: '%s'." % unittest_dir)
print("Auto pep8 all python file ...")
unittest_dir.autopep8(**kwargs)
else:
print("Unittest code directory not found!")
print("Complete!") | auto pep8 format all python file in ``source code`` and ``tests`` dir. |
def item_length(self):
if (self.dtype not in [list, dict, array.array]):
raise TypeError("item_length() is only applicable for SArray of type list, dict and array.")
with cython_context():
return SArray(_proxy = self.__proxy__.item_length()) | Length of each element in the current SArray.
Only works on SArrays of dict, array, or list type. If a given element
is a missing value, then the output elements is also a missing value.
This function is equivalent to the following but more performant:
sa_item_len = sa.apply(lambda x: len(x) if x is not None else None)
Returns
-------
out_sf : SArray
A new SArray, each element in the SArray is the len of the corresponding
items in original SArray.
Examples
--------
>>> sa = SArray([
... {"is_restaurant": 1, "is_electronics": 0},
... {"is_restaurant": 1, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0},
... {"is_restaurant": 1, "is_electronics": 1},
... None])
>>> sa.item_length()
dtype: int
Rows: 6
[2, 3, 3, 1, 2, None] |
def close(self):
if self.session is not None:
self.session.cookies.clear()
self.session.close()
self.session = None | Close the current session, if still open. |
def delete_by_ids(self, ids):
try:
self.filter(id__in=ids).delete()
return True
except self.model.DoesNotExist:
return False | Delete objects by ids.
:param ids: list of objects ids to delete.
:return: True if objects were deleted. Otherwise, return False if no
objects were found or the delete was not successful. |
def get_overall_services_health(self) -> str:
services_health_status = self.get_services_health()
health_status = all(status == "Healthy" for status in
services_health_status.values())
if health_status:
overall_status = "Healthy"
else:
overall_status = "Unhealthy"
return overall_status | Get the overall health of all the services.
Returns:
str, overall health status |
def main(guess_a=1., guess_b=0., power=3, savetxt='None', verbose=False):
x, sol = solve(guess_a, guess_b, power)
assert sol.success
if savetxt != 'None':
np.savetxt(x, savetxt)
else:
if verbose:
print(sol)
else:
print(x) | Example demonstrating how to solve a system of non-linear equations defined as SymPy expressions.
The example shows how a non-linear problem can be given a command-line interface which may be
preferred by end-users who are not familiar with Python. |
def IsPipe(self):
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_PIPE | Determines if the file entry is a pipe.
Returns:
bool: True if the file entry is a pipe. |
def ekfind(query, lenout=_default_len_out):
query = stypes.stringToCharP(query)
lenout = ctypes.c_int(lenout)
nmrows = ctypes.c_int()
error = ctypes.c_int()
errmsg = stypes.stringToCharP(lenout)
libspice.ekfind_c(query, lenout, ctypes.byref(nmrows), ctypes.byref(error),
errmsg)
return nmrows.value, error.value, stypes.toPythonString(errmsg) | Find E-kernel data that satisfy a set of constraints.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekfind_c.html
:param query: Query specifying data to be found.
:type query: str
:param lenout: Declared length of output error message string.
:type lenout: int
:return:
Number of matching rows,
Flag indicating whether query parsed correctly,
Parse error description.
:rtype: tuple |
def css(self, mapping=None):
css = self._css
if mapping is None:
return css
elif isinstance(mapping, Mapping):
if css is None:
self._extra['css'] = css = {}
css.update(mapping)
return self
else:
return css.get(mapping) if css else None | Update the css dictionary if ``mapping`` is a dictionary, otherwise
return the css value at ``mapping``.
If ``mapping`` is not given, return the whole ``css`` dictionary
if available. |
def trading_dates(start, end, calendar='US'):
kw = dict(start=pd.Timestamp(start, tz='UTC').date(), end=pd.Timestamp(end, tz='UTC').date())
us_cal = getattr(sys.modules[__name__], f'{calendar}TradingCalendar')()
return pd.bdate_range(**kw).drop(us_cal.holidays(**kw)) | Trading dates for given exchange
Args:
start: start date
end: end date
calendar: exchange as string
Returns:
pd.DatetimeIndex: datetime index
Examples:
>>> bus_dates = ['2018-12-24', '2018-12-26', '2018-12-27']
>>> trd_dates = trading_dates(start='2018-12-23', end='2018-12-27')
>>> assert len(trd_dates) == len(bus_dates)
>>> assert pd.Series(trd_dates == pd.DatetimeIndex(bus_dates)).all() |
def delete(self, path):
path = sanitize_mount(path)
val = None
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).delete(path)
self.token = self.operational_token
else:
super(Client, self).delete(path)
return val | Wrap the hvac delete call, using the right token for
cubbyhole interactions. |
def expect_handshake(self, headers):
init_req = yield self.reader.get()
if init_req.message_type != Types.INIT_REQ:
raise errors.UnexpectedError(
"You need to shake my hand first. Got %s" % repr(init_req)
)
self._extract_handshake_headers(init_req)
self._handshake_performed = True
self.writer.put(
messages.InitResponseMessage(
PROTOCOL_VERSION, headers, init_req.id),
)
self._loop()
raise tornado.gen.Return(init_req) | Expect a handshake from the remote host.
:param headers:
Headers to respond with
:returns:
A future that resolves (with a value of None) when the handshake
is complete. |
def _print_memory(self, memory):
for addr, value in memory.items():
print(" 0x%08x : 0x%08x (%d)" % (addr, value, value)) | Print memory. |
def evaluate(self, x, y, flux, x_0, y_0):
x = (x - x_0 + 0.5 + self.prf_shape[1] // 2).astype('int')
y = (y - y_0 + 0.5 + self.prf_shape[0] // 2).astype('int')
y_sub, x_sub = subpixel_indices((y_0, x_0), self.subsampling)
x_bound = np.logical_or(x < 0, x >= self.prf_shape[1])
y_bound = np.logical_or(y < 0, y >= self.prf_shape[0])
out_of_bounds = np.logical_or(x_bound, y_bound)
x[x_bound] = 0
y[y_bound] = 0
result = flux * self._prf_array[int(y_sub), int(x_sub)][y, x]
result[out_of_bounds] = 0
return result | Discrete PRF model evaluation.
Given a certain position and flux the corresponding image of
the PSF is chosen and scaled to the flux. If x and y are
outside the boundaries of the image, zero will be returned.
Parameters
----------
x : float
x coordinate array in pixel coordinates.
y : float
y coordinate array in pixel coordinates.
flux : float
Model flux.
x_0 : float
x position of the center of the PRF.
y_0 : float
y position of the center of the PRF. |
def get(cls, issue_type):
if isinstance(issue_type, str):
obj = getattr(db, cls.__name__).find_one(cls.issue_type == issue_type)
elif isinstance(issue_type, int):
obj = getattr(db, cls.__name__).find_one(cls.issue_type_id == issue_type)
elif isinstance(issue_type, cls):
return issue_type
else:
obj = None
if not obj:
obj = cls()
obj.issue_type = issue_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj | Returns the IssueType object for `issue_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
issue_type (str,int,IssueType): Issue type name, id or class
Returns:
:obj:`IssueType` |
def reverse_transform_table(self, table, table_meta, missing=None):
if missing is None:
missing = self.missing
else:
self.missing = missing
warnings.warn(
DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning)
result = pd.DataFrame(index=table.index)
table_name = table_meta['name']
for field in table_meta['fields']:
new_column = self._reverse_transform_column(table, field, table_name)
if new_column is not None:
result[field['name']] = new_column
return result | Transform a `table` back to its original format.
Args:
table(pandas.DataFrame): Contents of the table to be transformed.
table_meta(dict): Metadata for the given table.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
pandas.DataFrame: Table in original format. |
def triple(subject, relation, obj):
return And([Group(subject)(SUBJECT), relation(RELATION), Group(obj)(OBJECT)]) | Build a simple triple in PyParsing that has a ``subject relation object`` format. |
def _classify_load_constant(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
matches = []
for dst_reg, dst_val in regs_fini.items():
if dst_reg not in written_regs:
continue
if dst_val == regs_init[dst_reg]:
continue
dst_val_ir = ReilImmediateOperand(dst_val, self._arch_regs_size[dst_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matches.append({
"src": [dst_val_ir],
"dst": [dst_reg_ir]
})
return matches | Classify load-constant gadgets. |
def parse_reaction_equation_string(equation, default_compartment):
def _translate_compartments(reaction, compartment):
left = (((c.in_compartment(compartment), v)
if c.compartment is None else (c, v))
for c, v in reaction.left)
right = (((c.in_compartment(compartment), v)
if c.compartment is None else (c, v))
for c, v in reaction.right)
return Reaction(reaction.direction, left, right)
eq = _REACTION_PARSER.parse(equation).normalized()
return _translate_compartments(eq, default_compartment) | Parse a string representation of a reaction equation.
Converts undefined compartments to the default compartment. |
def _lookup(self, timestamp):
idx = search_greater(self._values, timestamp)
if (idx < len(self._values)
and math.fabs(self._values[idx][0] - timestamp) < self.EPSILON):
return idx
return None | Return the index of the value associated with "timestamp" if any, else
None. Since the timestamps are floating-point values, they are
considered equal if their absolute difference is smaller than
self.EPSILON |
def lcm( *a ):
if len( a ) > 1: return reduce( lcm2, a )
if hasattr( a[0], "__iter__" ): return reduce( lcm2, a[0] )
return a[0] | Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 ) |
def do_min(environment, value, case_sensitive=False, attribute=None):
return _min_or_max(environment, value, min, case_sensitive, attribute) | Return the smallest item from the sequence.
.. sourcecode:: jinja
{{ [1, 2, 3]|min }}
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
:param attribute: Get the object with the max value of this attribute. |
def remove_non_magic_cols_from_table(self, ignore_cols=()):
unrecognized_cols = self.get_non_magic_cols()
for col in ignore_cols:
if col in unrecognized_cols:
unrecognized_cols.remove(col)
if unrecognized_cols:
print('-I- Removing non-MagIC column names from {}:'.format(self.dtype), end=' ')
for col in unrecognized_cols:
self.df.drop(col, axis='columns', inplace=True)
print(col, end=' ')
print("\n")
return unrecognized_cols | Remove all non-magic columns from self.df.
Changes in place.
Parameters
----------
ignore_cols : list-like
columns not to remove, whether they are proper
MagIC columns or not
Returns
---------
unrecognized_cols : list
any columns that were removed |
def write_jobfile(self, task, **kwargs):
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path | Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================ |
def _set_archive_name(package_name,
package_version,
python_versions,
platform,
build_tag=''):
package_name = package_name.replace('-', '_')
python_versions = '.'.join(python_versions)
archive_name_tags = [
package_name,
package_version,
python_versions,
'none',
platform,
]
if build_tag:
archive_name_tags.insert(2, build_tag)
archive_name = '{0}.wgn'.format('-'.join(archive_name_tags))
return archive_name | Set the format of the output archive file.
We should aspire for the name of the archive to be
as compatible as possible with the wheel naming convention
described here:
https://www.python.org/dev/peps/pep-0491/#file-name-convention,
as we're basically providing a "wheel" of our package. |
def _start_lock_renewer(self):
if self._lock_renewal_thread is not None:
raise AlreadyStarted("Lock refresh thread already started")
logger.debug(
"Starting thread to refresh lock every %s seconds",
self._lock_renewal_interval
)
self._lock_renewal_stop = threading.Event()
self._lock_renewal_thread = threading.Thread(
group=None,
target=self._lock_renewer,
kwargs={'lockref': weakref.ref(self),
'interval': self._lock_renewal_interval,
'stop': self._lock_renewal_stop}
)
self._lock_renewal_thread.setDaemon(True)
self._lock_renewal_thread.start() | Starts the lock refresher thread. |
def get_codeblock(language, text):
rst = "\n\n.. code-block:: " + language + "\n\n"
for line in text.splitlines():
rst += "\t" + line + "\n"
rst += "\n"
return rst | Generates rst codeblock for given text and language |
def interpolate_to_isosurface(level_var, interp_var, level, **kwargs):
r
bottom_up_search = kwargs.pop('bottom_up_search', True)
above, below, good = metpy.calc.find_bounding_indices(level_var, [level], axis=0,
from_below=bottom_up_search)
interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above]))
* (interp_var[below] - interp_var[above])) + interp_var[above]
interp_level[~good] = np.nan
minvar = (np.min(level_var, axis=0) >= level)
maxvar = (np.max(level_var, axis=0) <= level)
interp_level[0][minvar] = interp_var[-1][minvar]
interp_level[0][maxvar] = interp_var[0][maxvar]
return interp_level.squeeze() | r"""Linear interpolation of a variable to a given vertical level from given values.
This function assumes that highest vertical level (lowest pressure) is zeroth index.
A classic use of this function would be to compute the potential temperature on the
dynamic tropopause (2 PVU surface).
Parameters
----------
level_var: array_like (P, M, N)
Level values in 3D grid on common vertical coordinate (e.g., PV values on
isobaric levels). Assumes height dimension is highest to lowest in atmosphere.
interp_var: array_like (P, M, N)
Variable on 3D grid with same vertical coordinate as level_var to interpolate to
given level (e.g., potential temperature on isobaric levels)
level: int or float
Desired interpolated level (e.g., 2 PVU surface)
Other Parameters
----------------
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
interp_level: (M, N) ndarray
The interpolated variable (e.g., potential temperature) on the desired level (e.g.,
2 PVU surface)
Notes
-----
This function implements a linear interpolation to estimate values on a given surface.
The prototypical example is interpolation of potential temperature to the dynamic
tropopause (e.g., 2 PVU surface) |
def nofollow_callback(attrs, new=False):
parsed_url = urlparse(attrs[(None, 'href')])
if parsed_url.netloc in ('', current_app.config['SERVER_NAME']):
attrs[(None, 'href')] = '{scheme}://{netloc}{path}'.format(
scheme='https' if request.is_secure else 'http',
netloc=current_app.config['SERVER_NAME'],
path=parsed_url.path)
return attrs
else:
rel = [x for x in attrs.get((None, 'rel'), '').split(' ') if x]
if 'nofollow' not in [x.lower() for x in rel]:
rel.append('nofollow')
attrs[(None, 'rel')] = ' '.join(rel)
return attrs | Turn relative links into external ones and avoid `nofollow` for us,
otherwise add `nofollow`.
That callback is not splitted in order to parse the URL only once. |
def _all_spec(self):
base = self._mod_spec
for spec in self.basic_spec:
base[spec] = self.basic_spec[spec]
return base | All specifiers and their lengths. |
def stream(self, code):
self._say("Streaming code.")
if type(code) in [str, text_type]:
code = code.split("\n")
self._parse("stream()", code) | Stream in RiveScript source code dynamically.
:param code: Either a string containing RiveScript code or an array of
lines of RiveScript code. |
def get_repository(name):
cmd = 'Get-PSRepository "{0}"'.format(name)
no_ret = _pshell(cmd)
return name not in list_modules() | Get the details of a local PSGet repository
:param name: Name of the repository
:type name: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.get_repository MyRepo |
def get_enum_labels(enum_cls):
if not issubclass(enum_cls, enum.Enum):
raise EnumTypeError("Input class '%s' must be derived from enum.Enum"
% enum_cls)
try:
enum.unique(enum_cls)
except ValueError as exc:
raise EnumTypeError("Input class '%s' must be unique - %s"
% (enum_cls, exc))
values = [member.value for member in enum_cls]
if not values:
raise EnumTypeError("Input class '%s' has no members!" % enum_cls)
expected_value = 0
for value in values:
if value != expected_value:
raise EnumTypeError("Enum values for '%s' must start at 0 and "
"increment by 1. Values: %s"
% (enum_cls, values))
expected_value += 1
return [member.name for member in enum_cls] | Return list of enumeration labels from Enum class.
The list is useful when creating an attribute, for the
`enum_labels` parameter. The enumeration values are checked
to ensure they are unique, start at zero, and increment by one.
:param enum_cls: the Enum class to be inspected
:type enum_cls: :py:obj:`enum.Enum`
:return: List of label strings
:rtype: :py:obj:`list`
:raises EnumTypeError: in case the given class is invalid |
def snake_to_camel(s: str) -> str:
fragments = s.split('_')
return fragments[0] + ''.join(x.title() for x in fragments[1:]) | Convert string from snake case to camel case. |
def __is_gsi_maintenance_window(table_name, gsi_name, maintenance_windows):
maintenance_window_list = []
for window in maintenance_windows.split(','):
try:
start, end = window.split('-', 1)
except ValueError:
logger.error(
'{0} - GSI: {1} - '
'Malformatted maintenance window'.format(table_name, gsi_name))
return False
maintenance_window_list.append((start, end))
now = datetime.datetime.utcnow().strftime('%H%M')
for maintenance_window in maintenance_window_list:
start = ''.join(maintenance_window[0].split(':'))
end = ''.join(maintenance_window[1].split(':'))
if now >= start and now <= end:
return True
return False | Checks that the current time is within the maintenance window
:type table_name: str
:param table_name: Name of the DynamoDB table
:type gsi_name: str
:param gsi_name: Name of the GSI
:type maintenance_windows: str
:param maintenance_windows: Example: '00:00-01:00,10:00-11:00'
:returns: bool -- True if within maintenance window |
def write_temporary_file(content, prefix='', suffix=''):
temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, mode='w+t', delete=False)
temp.writelines(content)
temp.close()
return temp.name | Generating a temporary file with content.
Args:
content (str): file content (usually a script, Dockerfile, playbook or config file)
prefix (str): the filename starts with this prefix (default: no prefix)
suffix (str): the filename ends with this suffix (default: no suffix)
Returns:
str: name of the temporary file
Note:
You are responsible for the deletion of the file. |
def get_vmconfig(vmid, node=None, node_type='openvz'):
if node is None:
for host_name, host_details in six.iteritems(avail_locations()):
for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)):
if item['vmid'] == vmid:
node = host_name
data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid))
return data | Get VM configuration |
def open(self, name, *mode):
return self.file_factory(self.file_path(name), *mode) | Return an open file object for a file in the reference package. |
def instruction_LD8(self, opcode, m, register):
register.set(m)
self.clear_NZV()
self.update_NZ_8(m) | Loads the contents of memory location M into the designated register.
source code forms: LDA P; LDB P
CC bits "HNZVC": -aa0- |
def lookup_symbols(self,
symbols,
as_of_date,
fuzzy=False,
country_code=None):
if not symbols:
return []
multi_country = country_code is None
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbols[0])
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = f(
mapping,
multi_country,
sym,
as_of_date,
)
append_output(equity)
return out | Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equities : list[Equity] |
def find_actual_effect(self, mechanism, purviews=False):
return self.find_causal_link(Direction.EFFECT, mechanism, purviews) | Return the actual effect of a mechanism. |
def convert_type(self, type):
mapping = {
'any': 'STRING',
'array': None,
'boolean': 'BOOLEAN',
'date': 'DATE',
'datetime': 'DATETIME',
'duration': None,
'geojson': None,
'geopoint': None,
'integer': 'INTEGER',
'number': 'FLOAT',
'object': None,
'string': 'STRING',
'time': 'TIME',
'year': 'INTEGER',
'yearmonth': None,
}
if type not in mapping:
message = 'Type %s is not supported' % type
raise tableschema.exceptions.StorageError(message)
return mapping[type] | Convert type to BigQuery |
def format_atomic(value):
if isinstance(value, str):
if any(r in value for r in record.RESERVED_CHARS):
for k, v in record.ESCAPE_MAPPING:
value = value.replace(k, v)
if value is None:
return "."
else:
return str(value) | Format atomic value
This function also takes care of escaping the value in case one of the
reserved characters occurs in the value. |
def _format_dict(self, info_dict):
for key, value in info_dict.items():
if not value:
info_dict[key] = "NA"
return info_dict | Replaces empty content with 'NA's |
def lastLogged(self):
d = copy.deepcopy(self.__lastLogged)
d.pop(-1, None)
return d | Get a dictionary of last logged messages.
Keys are log types and values are the the last messages. |
def add_server(self, name, ip, port):
new_server = {
'key': name,
'name': name.split(':')[0],
'ip': ip,
'port': port,
'username': 'glances',
'password': '',
'status': 'UNKNOWN',
'type': 'DYNAMIC'}
self._server_list.append(new_server)
logger.debug("Updated servers list (%s servers): %s" %
(len(self._server_list), self._server_list)) | Add a new server to the list. |
def median_images(images):
images_data = np.array([image.data for image in images])
median_image_data = np.median(images_data, axis=0)
an_image = images[0]
return type(an_image)(
median_image_data.astype(
an_image.data.dtype),
an_image.frame) | Create a median Image from a list of Images.
Parameters
----------
:obj:`list` of :obj:`Image`
A list of Image objects.
Returns
-------
:obj:`Image`
A new Image of the same type whose data is the median of all of
the images' data. |
def iterqueue(queue, expected):
while expected > 0:
for item in iter(queue.get, EXIT):
yield item
expected -= 1 | Iterate all value from the queue until the ``expected`` number of EXIT elements is
received |
def attrfindrows(self, groupname, attrname, value):
values = self.attrgetcol(groupname, attrname)
return [i for i in range(len(values)) if values[i] == value] | Get the row numbers of all rows where the attribute matches the given value. |
def is_valid(self):
assert self._bundle_context
assert self._container_props is not None
assert self._get_distribution_provider()
assert self.get_config_name()
assert self.get_namespace()
return True | Checks if the component is valid
:return: Always True if it doesn't raise an exception
:raises AssertionError: Invalid properties |
def update(self, *args, **kwargs):
for next_dict in chain(args, (kwargs, )):
for k, v in next_dict.items():
self[k] = v | Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None |
def log_to_json(log):
return [log.timestamp.isoformat()[:22],
log.level, log.process, log.message] | Convert a log record into a list of strings |
def validate_one_of(values):
def one_of_validator(field, data):
if field.value is None:
return
options = values
if callable(options):
options = options()
if field.value not in options:
raise ValidationError('one_of', choices=', '.join(map(str, options)))
return one_of_validator | Validate that a field is in one of the given values.
:param values: Iterable of valid values.
:raises: ``ValidationError('one_of')`` |
def _resample_grid(stations, nodes, lags, mindepth, maxdepth, corners):
resamp_nodes = []
resamp_lags = []
for i, node in enumerate(nodes):
if mindepth < float(node[2]) < maxdepth and\
corners.contains_point(node[0:2]):
resamp_nodes.append(node)
resamp_lags.append([lags[:, i]])
print(np.shape(resamp_lags))
resamp_lags = np.reshape(resamp_lags, (len(resamp_lags), len(stations))).T
print(' '.join(['Grid now has ', str(len(resamp_nodes)), 'nodes']))
return stations, resamp_nodes, resamp_lags | Resample the lagtime grid to a given volume.
For use if the grid from Grid2Time is too large or you want to run a
faster, downsampled scan.
:type stations: list
:param stations:
List of station names from in the form where stations[i] refers to
nodes[i][:] and lags[i][:]
:type nodes: list
:param nodes:
List of node points where nodes[i] referes to stations[i] and
nodes[:][:][0] is latitude in degrees, nodes[:][:][1] is longitude in
degrees, nodes[:][:][2] is depth in km.
:type lags: numpy.ndarray
:param lags:
Array of arrays where lags[i][:] refers to stations[i]. lags[i][j]
should be the delay to the nodes[i][j] for stations[i] in seconds.
:type mindepth: float
:param mindepth: Upper limit of volume
:type maxdepth: float
:param maxdepth: Lower limit of volume
:type corners: matplotlib.path.Path
:param corners:
matplotlib Path of the corners for the 2D polygon to cut to in lat and
lon.
:returns: Stations
:rtype: list
:returns: List of lists of tuples of node locations
:rtype: list
:returns: Array of lags.
:rtype: :class:`numpy.ndarray`
.. note::
**Output:**
station[1] refers to nodes[1] and lags[1] nodes[1][1] refers
to station[1] and lags[1][1] nodes[n][n] is a tuple of latitude,
longitude and depth. |
def decrypt(self, encrypted):
fernet = Fernet(self.decryption_cipher_key)
return fernet.decrypt(encrypted) | decrypts the encrypted message using Fernet
:param encrypted: the encrypted message
:returns: the decrypted, serialized identifier collection |
def view_vector(self, vector, viewup=None):
focal_pt = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
cpos = [vector + np.array(focal_pt),
focal_pt, viewup]
self.camera_position = cpos
return self.reset_camera() | Point the camera in the direction of the given vector |
def _handle_packet(self, packet):
events = packet_events(packet)
for event in events:
if self.ignore_event(event['id']):
log.debug('ignoring event with id: %s', event)
continue
log.debug('got event: %s', event)
if self.event_callback:
self.event_callback(event)
else:
self.handle_event(event) | Event specific packet handling logic.
Break packet into events and fires configured event callback or
nicely prints events for console. |
def _data_keys(self):
return [name for name, child in iteritems(self._children) if not isinstance(child, GroupNode)] | every child key referencing a dataframe |
def update(old, new, collection, sneaky_update_filter=None):
need_save = False
locked_fields = old.get('_locked_fields', [])
for key, value in new.items():
if key in locked_fields:
continue
if old.get(key) != value:
if sneaky_update_filter and key in sneaky_update_filter:
if sneaky_update_filter[key](old[key], value):
old[key] = value
need_save = True
else:
old[key] = value
need_save = True
plus_key = '+%s' % key
if plus_key in old:
del old[plus_key]
need_save = True
if need_save:
old['updated_at'] = datetime.datetime.utcnow()
collection.save(old, safe=True)
return need_save | update an existing object with a new one, only saving it and
setting updated_at if something has changed
old
old object
new
new object
collection
collection to save changed object to
sneaky_update_filter
a filter for updates to object that should be ignored
format is a dict mapping field names to a comparison function
that returns True iff there is a change |
def _GetTimeValue(self, name):
timestamp = getattr(self._tsk_file.info.meta, name, None)
if self._file_system_type in self._TSK_HAS_NANO_FS_TYPES:
name_fragment = '{0:s}_nano'.format(name)
fraction_of_second = getattr(
self._tsk_file.info.meta, name_fragment, None)
else:
fraction_of_second = None
return TSKTime(timestamp=timestamp, fraction_of_second=fraction_of_second) | Retrieves a date and time value.
Args:
name (str): name of the date and time value, for example "atime" or
"mtime".
Returns:
dfdatetime.DateTimeValues: date and time value or None if not available. |
def publish_json(self, channel, obj):
return self.publish(channel, json.dumps(obj)) | Post a JSON-encoded message to channel. |
def _get_array_serializer(self):
if not self._items:
raise ValueError('Must specify \'items\' for \'array\' type')
field = SchemaField(self._items)
def encode(value, field=field):
if not isinstance(value, list):
value = [value]
return [field.encode(i) for i in value]
def decode(value, field=field):
return [field.decode(i) for i in value]
return (encode, decode) | Gets the encoder and decoder for an array. Uses the 'items' key to
build the encoders and decoders for the specified type. |
def is_one_of(obj, types):
for type_ in types:
if isinstance(obj, type_):
return True
return False | Return true iff obj is an instance of one of the types. |
def write_krona_plot(self, sample_names, read_taxonomies, output_krona_filename):
tempfiles = []
for n in sample_names:
tempfiles.append(tempfile.NamedTemporaryFile(prefix='GraftMkronaInput', suffix=n))
delim=u'\t'
for _, tax, counts in self._iterate_otu_table_rows(read_taxonomies):
for i, c in enumerate(counts):
if c != 0:
tempfiles[i].write(delim.join((str(c),
delim.join(tax)
))+"\n")
for t in tempfiles:
t.flush()
cmd = ["ktImportText",'-o',output_krona_filename]
for i, tmp in enumerate(tempfiles):
cmd.append(','.join([tmp.name,sample_names[i]]))
cmd = ' '.join(cmd)
extern.run(cmd)
for t in tempfiles:
t.close() | Creates krona plot at the given location. Assumes the krona executable
ktImportText is available on the shell PATH |
def reinitialize_all_clients(self):
for language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
folder = self.get_root_path(language)
instance = language_client['instance']
instance.folder = folder
instance.initialize() | Send a new initialize message to each LSP server when the project
path has changed so they can update the respective server root paths. |
def asRFC2822(self, tzinfo=None, includeDayOfWeek=True):
dtime = self.asDatetime(tzinfo)
if tzinfo is None:
rfcoffset = '-0000'
else:
rfcoffset = '%s%02i%02i' % _timedeltaToSignHrMin(dtime.utcoffset())
rfcstring = ''
if includeDayOfWeek:
rfcstring += self.rfc2822Weekdays[dtime.weekday()] + ', '
rfcstring += '%i %s %4i %02i:%02i:%02i %s' % (
dtime.day,
self.rfc2822Months[dtime.month - 1],
dtime.year,
dtime.hour,
dtime.minute,
dtime.second,
rfcoffset)
return rfcstring | Return this Time formatted as specified in RFC 2822.
RFC 2822 specifies the format of email messages.
RFC 2822 says times in email addresses should reflect the local
timezone. If tzinfo is a datetime.tzinfo instance, the returned
formatted string will reflect that timezone. Otherwise, the timezone
will be '-0000', which RFC 2822 defines as UTC, but with an unknown
local timezone.
RFC 2822 states that the weekday is optional. The parameter
includeDayOfWeek indicates whether or not to include it. |
def __get_query_agg_cardinality(cls, field, agg_id=None):
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("cardinality", field=field, precision_threshold=cls.ES_PRECISION)
return (agg_id, query_agg) | Create an es_dsl aggregation object for getting the approximate count of distinct values of a field.
:param field: field from which the get count of distinct values
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"cardinality": {
"field": <field>,
"precision_threshold": 3000
} |
def toggle_state(self, state, active=TOGGLE):
if active is TOGGLE:
active = not self.is_state(state)
if active:
self.set_state(state)
else:
self.remove_state(state) | Toggle the given state for this conversation.
The state will be set ``active`` is ``True``, otherwise the state will be removed.
If ``active`` is not given, it will default to the inverse of the current state
(i.e., ``False`` if the state is currently set, ``True`` if it is not; essentially
toggling the state).
For example::
conv.toggle_state('{relation_name}.foo', value=='foo')
This will set the state if ``value`` is equal to ``foo``. |
def attrs(self, dynamizer):
ret = {
self.key: {
'Action': self.action,
}
}
if not is_null(self.value):
ret[self.key]['Value'] = dynamizer.encode(self.value)
return ret | Get the attributes for the update |
def decorator_of_context_manager(ctxt):
def decorator_fn(*outer_args, **outer_kwargs):
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with ctxt(*outer_args, **outer_kwargs):
return fn(*args, **kwargs)
return wrapper
return decorator
if getattr(ctxt, "__doc__", None) is None:
msg = "Decorator that runs the inner function in the context of %s"
decorator_fn.__doc__ = msg % ctxt
else:
decorator_fn.__doc__ = ctxt.__doc__
return decorator_fn | Converts a context manager into a decorator.
This decorator will run the decorated function in the context of the
manager.
:param ctxt: Context to run the function in.
:return: Wrapper around the original function. |
def _parse_date(date):
result = ''.join(re.findall('\d', date))
l = len(result)
if l in (2, 3, 4):
year = str(datetime.today().year)
return year + result
if l in (6, 7, 8):
return result
return '' | Parse from the user input `date`.
e.g. current year 2016:
input 6-26, 626, ... return 2016626
input 2016-6-26, 2016/6/26, ... retrun 2016626
This fn wouldn't check the date, it only gather the number as a string. |
def count_many(self, views, include=None):
return self._get(self._build_url(self.endpoint(count_many=views, include=include))) | Return many ViewCounts.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param views: iterable of View or view ids |
def get_app_template(name):
app_name, template_name = name.split(':')
return get_lookups()[app_name].get_template(template_name) | Getter function of templates for each applications.
Argument `name` will be interpreted as colon separated, the left value
means application name, right value means a template name.
get_app_template('blog:dashboarb.mako')
It will return a template for dashboard page of `blog` application. |
def get_contributors(self, subreddit, *args, **kwargs):
def get_contributors_helper(self, subreddit):
url = self.config['contributors'].format(
subreddit=six.text_type(subreddit))
return self._get_userlist(url, user_only=True, *args, **kwargs)
if self.is_logged_in():
if not isinstance(subreddit, objects.Subreddit):
subreddit = self.get_subreddit(subreddit)
if subreddit.subreddit_type == "public":
decorator = decorators.restrict_access(scope='read', mod=True)
return decorator(get_contributors_helper)(self, subreddit)
return get_contributors_helper(self, subreddit) | Return a get_content generator of contributors for the given subreddit.
If it's a public subreddit, then authentication as a
moderator of the subreddit is required. For protected/private
subreddits only access is required. See issue #246. |
def get_rml_processors(es_defs):
proc_defs = es_defs.get("kds_esRmlProcessor", [])
if proc_defs:
new_defs = []
for proc in proc_defs:
params = proc['kds_rmlProcessorParams'][0]
proc_kwargs = {}
if params.get("kds_rtn_format"):
proc_kwargs["rtn_format"] = params.get("kds_rtn_format")[0]
new_def = dict(name=proc['rdfs_label'][0],
subj=params["kds_subjectKwarg"][0],
proc_kwargs=proc_kwargs,
force=proc.get('kds_forceNested',[False])[0],
processor=CFG.rml.get_processor(\
proc['rdfs_label'][0],
proc['kds_esRmlMapping'],
proc['rdf_type'][0]))
new_defs.append(new_def)
es_defs['kds_esRmlProcessor'] = new_defs
return es_defs | Returns the es_defs with the instaniated rml_processor
Args:
-----
es_defs: the rdf_class elacticsearch defnitions
cls_name: the name of the tied class |
def compiled_hash_func(self):
def get_primary_key_str(pkey_name):
return "str(self.{})".format(pkey_name)
hash_str = "+ ".join([get_primary_key_str(n) for n in self.primary_keys])
return ALCHEMY_TEMPLATES.hash_function.safe_substitute(concated_primary_key_strs=hash_str) | Returns compiled hash function based on hash of stringified primary_keys.
This isn't the most efficient way |
def is_period_arraylike(arr):
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return is_period_dtype(arr.dtype)
return getattr(arr, 'inferred_type', None) == 'period' | Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True |
def _all_tables_present(self, txn):
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False | Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False. |
def load_script(filename):
path, module_name, ext = _extract_script_components(filename)
add_search_path(path)
return _load_module(module_name) | Loads a python script as a module.
This function is provided to allow applications to load a Python module by its file name.
:param string filename:
Name of the python file to be loaded as a module.
:return:
A |Python|_ module loaded from the specified file. |
def _save_token_on_disk(self):
token = self._token.copy()
token.update(client_secret=self._client_secret)
with codecs.open(config.TOKEN_FILE_PATH, 'w', 'utf8') as f:
json.dump(
token, f,
ensure_ascii=False,
sort_keys=True,
indent=4,
) | Helper function that saves the token on disk |
def clean(self):
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now | Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them. |
def reboot(name, conn=None):
if not conn:
conn = get_conn()
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
log.info('Rebooting VM: %s', name)
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: %s', name)
__utils__['cloud.fire_event'](
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return True
log.error('Failed to reboot VM: %s', name)
return False | Reboot a single VM |
def add_child(self, valid_policy, qualifier_set, expected_policy_set):
child = PolicyTreeNode(valid_policy, qualifier_set, expected_policy_set)
child.parent = self
self.children.append(child) | Creates a new PolicyTreeNode as a child of this node
:param valid_policy:
A unicode string of a policy name or OID
:param qualifier_set:
An instance of asn1crypto.x509.PolicyQualifierInfos
:param expected_policy_set:
A set of unicode strings containing policy names or OIDs |
def parent(self):
if self._parent is None:
if self.pid is not None:
self._parent = self.api._load_directory(self.pid)
return self._parent | Parent directory that holds this directory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.