code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def from_span(cls, inputs, window_length, span, **kwargs):
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) | Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`. |
def load(self, name=None, *args, **kwargs):
"Load the instance of the object from the stash."
inst = self.stash.load(name)
if inst is None:
inst = self.instance(name, *args, **kwargs)
logger.debug(f'loaded (conf mng) instance: {inst}')
return inst | Load the instance of the object from the stash. |
def search(query, query_type=DEFAULT_QUERY_TYPE):
statement, arguments = _build_search(query)
if statement is None and arguments is None:
return QueryResults([], [], 'AND')
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
cursor.execute(statement, arguments)
search_results = cursor.fetchall()
return QueryResults(search_results, query, query_type) | Search database using parsed query.
Executes a database search query from the given ``query``
(a ``Query`` object) and optionally accepts a list of search weights.
By default, the search results are ordered by weight.
:param query: containing terms, filters, and sorts.
:type query: Query
:returns: a sequence of records that match the query conditions
:rtype: QueryResults (which is a sequence of QueryRecord objects) |
def get_png_data_url(blob: Optional[bytes]) -> str:
return BASE64_PNG_URL_PREFIX + base64.b64encode(blob).decode('ascii') | Converts a PNG blob into a local URL encapsulating the PNG. |
def cluster_get_keys_in_slots(self, slot, count, *, encoding):
return self.execute(b'CLUSTER', b'GETKEYSINSLOT', slot, count,
encoding=encoding) | Return local key names in the specified hash slot. |
def get_recirc_content(self, published=True, count=3):
query = self.get_query()
if not query.get('included_ids'):
qs = Content.search_objects.search()
qs = qs.query(
TagBoost(slugs=self.tags.values_list("slug", flat=True))
).filter(
~Ids(values=[self.id])
).sort(
"_score"
)
return qs[:count]
query['included_ids'] = query['included_ids'][:count]
search = custom_search_model(Content, query, published=published, field_map={
"feature_type": "feature_type.slug",
"tag": "tags.slug",
"content-type": "_type"
})
return search | gets the first 3 content objects in the `included_ids` |
def rescale(self, fun):
if self.bands != 1:
raise ValueError('only single band images are currently supported')
mat = self.matrix()
scaled = fun(mat)
self.assign(scaled, band=0) | perform raster computations with custom functions and assign them to the existing raster object in memory
Parameters
----------
fun: function
the custom function to compute on the data
Examples
--------
>>> with Raster('filename') as ras:
>>> ras.rescale(lambda x: 10 * x) |
def data_duration(self, data):
lengths = []
for key in self._time:
for idx in self._time.get(key, []):
lengths.append(data[key].shape[idx])
return min(lengths) | Compute the valid data duration of a dict
Parameters
----------
data : dict
As produced by pumpp.transform
Returns
-------
length : int
The minimum temporal extent of a dynamic observation in data |
def get_3_3_tuple_list(self,obj,default=None):
if is_sequence3(obj):
return [self.get_3_3_tuple(o,default) for o in obj]
return [self.get_3_3_tuple(obj,default)] | Return list of 3x3-tuples. |
def get_block_type(self, def_id):
try:
return self._definitions[def_id]
except KeyError:
try:
return def_id.aside_type
except AttributeError:
raise NoSuchDefinition(repr(def_id)) | Get a block_type by its definition id. |
def download(self, url, output_path):
request_failed_message = self.tr(
"Can't access PetaBencana API: {source}").format(
source=url)
downloader = FileDownloader(url, output_path)
result, message = downloader.download()
if not result:
display_warning_message_box(
self,
self.tr('Download error'),
self.tr(request_failed_message + '\n' + message))
if result == QNetworkReply.OperationCanceledError:
display_warning_message_box(
self,
self.tr('Download error'),
self.tr(message)) | Download file from API url and write to output path.
:param url: URL of the API.
:type url: str
:param output_path: Path of output file,
:type output_path: str |
def parse_opera (url_data):
from ..bookmarks.opera import parse_bookmark_data
for url, name, lineno in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, line=lineno, name=name) | Parse an opera bookmark file. |
def assign_to_all_link_group(self, group=0x01):
msg = StandardSend(self._address,
COMMAND_ASSIGN_TO_ALL_LINK_GROUP_0X01_NONE,
cmd2=group)
self._send_msg(msg) | Assign a device to an All-Link Group.
The default is group 0x01. |
def insert(self, iterable, data=None, weight=1.0):
self.root.insert(iterable, index=0, data=data, weight=1.0) | Used to insert into he root node
Args
iterable(hashable): index or key used to identify
data(object): data to be paired with the key |
def cli(ctx, config, debug):
ctx.obj['config'] = config
ctx.obj['engine'] = stex.SnakeTeX(config_file=config, debug=debug) | SnakTeX command line interface - write LaTeX faster through templating. |
def parse(cls, msg):
lines = msg.splitlines()
method, uri, version = lines[0].split()
headers = cls.parse_headers('\r\n'.join(lines[1:]))
return cls(version=version, uri=uri, method=method, headers=headers) | Parse message string to request object. |
def job_data(job_id):
job_dict = db.get_job(job_id)
if not job_dict:
return json.dumps({'error': 'job_id not found'}), 404, headers
if not is_authorized(job_dict):
return json.dumps({'error': 'not authorized'}), 403, headers
if job_dict['error']:
return json.dumps({'error': job_dict['error']}), 409, headers
content_type = job_dict['metadata'].get('mimetype')
return flask.Response(job_dict['data'], mimetype=content_type) | Get the raw data that the job returned. The mimetype
will be the value provided in the metdata for the key ``mimetype``.
**Results:**
:rtype: string
:statuscode 200: no error
:statuscode 403: not authorized to view the job's data
:statuscode 404: job id not found
:statuscode 409: an error occurred |
def add_scanner_param(self, name, scanner_param):
assert name
assert scanner_param
self.scanner_params[name] = scanner_param
command = self.commands.get('start_scan')
command['elements'] = {
'scanner_params':
{k: v['name'] for k, v in self.scanner_params.items()}} | Add a scanner parameter. |
def build_sensors_list(self, type):
ret = []
if type == SENSOR_TEMP_UNIT and self.init_temp:
input_list = self.stemps
self.stemps = psutil.sensors_temperatures()
elif type == SENSOR_FAN_UNIT and self.init_fan:
input_list = self.sfans
self.sfans = psutil.sensors_fans()
else:
return ret
for chipname, chip in iteritems(input_list):
i = 1
for feature in chip:
sensors_current = {}
if feature.label == '':
sensors_current['label'] = chipname + ' ' + str(i)
else:
sensors_current['label'] = feature.label
sensors_current['value'] = int(feature.current)
sensors_current['unit'] = type
ret.append(sensors_current)
i += 1
return ret | Build the sensors list depending of the type.
type: SENSOR_TEMP_UNIT or SENSOR_FAN_UNIT
output: a list |
def _find_playlist(self):
data = None
if self.id:
data = self.connection.get_item(
'find_playlist_by_id', playlist_id=self.id)
elif self.reference_id:
data = self.connection.get_item(
'find_playlist_by_reference_id',
reference_id=self.reference_id)
if data:
self._load(data) | Internal method to populate the object given the ``id`` or
``reference_id`` that has been set in the constructor. |
def process_delta(delta):
kwargs = {
"type": delta["type"],
"date": datetime.datetime.utcfromtimestamp(delta["date"]),
"object_id": delta["object_data"]["id"],
}
print(" * {type} at {date} with ID {object_id}".format(**kwargs)) | This is the part of the code where you would process the information
from the webhook notification. Each delta is one change that happened,
and might require fetching message IDs, updating your database,
and so on.
However, because this is just an example project, we'll just print
out information about the notification, so you can see what
information is being sent. |
def _buildDict(self):
lexDict = {}
with io.open(self.islePath, "r", encoding='utf-8') as fd:
wordList = [line.rstrip('\n') for line in fd]
for row in wordList:
word, pronunciation = row.split(" ", 1)
word, extraInfo = word.split("(", 1)
extraInfo = extraInfo.replace(")", "")
extraInfoList = [segment for segment in extraInfo.split(",")
if ("_" not in segment and "+" not in segment and
':' not in segment and segment != '')]
lexDict.setdefault(word, [])
lexDict[word].append((pronunciation, extraInfoList))
return lexDict | Builds the isle textfile into a dictionary for fast searching |
def __parse_affiliations_yml(self, affiliations):
enrollments = []
for aff in affiliations:
name = self.__encode(aff['organization'])
if not name:
error = "Empty organization name"
msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error}
raise InvalidFormatError(cause=msg)
elif name.lower() == 'unknown':
continue
org = Organization(name=name)
if org is None:
continue
if 'start' in aff:
start_date = self.__force_datetime(aff['start'])
else:
start_date = MIN_PERIOD_DATE
if 'end' in aff:
end_date = self.__force_datetime(aff['end'])
else:
end_date = MAX_PERIOD_DATE
enrollment = Enrollment(start=start_date, end=end_date,
organization=org)
enrollments.append(enrollment)
self.__validate_enrollment_periods(enrollments)
return enrollments | Parse identity's affiliations from a yaml dict. |
def convex_hull(self):
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._convex_hull | Return an array of vertex indexes representing the convex hull.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None. |
def _init_message(self):
try:
self.message = compat.text_type(self.error)
except UnicodeError:
try:
self.message = str(self.error)
except UnicodeEncodeError:
self.message = self.error.args[0]
if not isinstance(self.message, compat.text_type):
self.message = compat.text_type(self.message, 'ascii', 'replace') | Find a unicode representation of self.error |
def skip(self, length):
if length >= self.__size:
skip_amount = self.__size
rem = length - skip_amount
self.__segments.clear()
self.__offset = 0
self.__size = 0
self.position += skip_amount
else:
rem = 0
self.read(length, skip=True)
return rem | Removes ``length`` bytes and returns the number length still required to skip |
def _mosaik_args_from_config(config):
multi_mappers = config["algorithm"].get("multiple_mappers", True)
multi_flags = ["-m", "all"] if multi_mappers else ["-m", "unique"]
error_flags = ["-mm", "2"]
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-p", str(num_cores)] if num_cores > 1 else []
return core_flags + multi_flags + error_flags | Configurable high level options for mosaik. |
def get_pub_str(self, name='master'):
path = os.path.join(self.opts['pki_dir'],
name + '.pub')
if not os.path.isfile(path):
key = self.__get_keys()
if HAS_M2:
key.save_pub_key(path)
else:
with salt.utils.files.fopen(path, 'wb+') as wfh:
wfh.write(key.publickey().exportKey('PEM'))
with salt.utils.files.fopen(path) as rfh:
return rfh.read() | Return the string representation of a public key
in the pki-directory |
def setup_config(epab_version: str):
logger = logging.getLogger('EPAB')
logger.debug('setting up config')
elib_config.ELIBConfig.setup(
app_name='EPAB',
app_version=epab_version,
config_file_path='pyproject.toml',
config_sep_str='__',
root_path=['tool', 'epab']
)
elib_config.write_example_config('pyproject.toml.example')
if not pathlib.Path('pyproject.toml').exists():
raise FileNotFoundError('pyproject.toml')
elib_config.validate_config() | Set up elib_config package
:param epab_version: installed version of EPAB as as string |
def start(self, container, instances=None, map_name=None, **kwargs):
return self.run_actions('start', container, instances=instances, map_name=map_name, **kwargs) | Starts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to start. If not specified, will start all instances as specified in the
configuration (or just one default instance).
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:type instances: collections.Iterable[unicode | str | NoneType]
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container start.
:return: Return values of started containers.
:rtype: list[dockermap.map.runner.ActionOutput] |
def connect_redis(redis_client, name=None, transaction=False):
return ConnectionManager.connect_redis(
redis_client=redis_client, name=name, transaction=transaction) | Connect your redis-py instance to redpipe.
Example:
.. code:: python
redpipe.connect_redis(redis.StrictRedis(), name='users')
Do this during your application bootstrapping.
You can also pass a redis-py-cluster instance to this method.
.. code:: python
redpipe.connect_redis(rediscluster.StrictRedisCluster(), name='users')
You are allowed to pass in either the strict or regular instance.
.. code:: python
redpipe.connect_redis(redis.StrictRedis(), name='a')
redpipe.connect_redis(redis.Redis(), name='b')
redpipe.connect_redis(rediscluster.StrictRedisCluster(...), name='c')
redpipe.connect_redis(rediscluster.RedisCluster(...), name='d')
:param redis_client:
:param name: nickname you want to give to your connection.
:param transaction:
:return: |
def list_diff(list_a, list_b):
result = []
for item in list_b:
if not item in list_a:
result.append(item)
return result | Return the items from list_b that differ from list_a |
def _mean_square_error(y, y_pred, w):
return np.average(((y_pred - y) ** 2), weights=w) | Calculate the mean square error. |
def valgrind(command, env={}):
xml_file = tempfile.NamedTemporaryFile()
internal.register.after_check(lambda: _check_valgrind(xml_file))
return run(f"valgrind --show-leak-kinds=all --xml=yes --xml-file={xml_file.name} -- {command}", env=env) | Run a command with valgrind.
:param command: command to be run
:type command: str
:param env: environment in which to run command
:type env: str
:raises check50.Failure: if, at the end of the check, valgrind reports any errors
This function works exactly like :func:`check50.run`, with the additional effect that ``command`` is run through
``valgrind`` and ``valgrind``'s output is automatically reviewed at the end of the check for memory leaks and other
bugs. If ``valgrind`` reports any issues, the check is failed and student-friendly messages are printed to the log.
Example usage::
check50.c.valgrind("./leaky").stdin("foo").stdout("bar").exit(0)
.. note::
It is recommended that the student's code is compiled with the `-ggdb`
flag so that additional information, such as the file and line number at which
the issue was detected can be included in the log as well. |
def has_prev(self):
if self._result_cache:
return self._result_cache.has_prev
return self.all().has_prev | Return True if there are previous values present |
def add_edge(self, from_node, to_node):
if to_node not in self.nodes:
self.add_node(to_node)
try:
self.nodes[from_node]["sons"].append(to_node)
except KeyError:
self.nodes[from_node] = {"dfs_loop_status": "", "sons": [to_node]} | Add edge between two node
The edge is oriented
:param from_node: node where edge starts
:type from_node: object
:param to_node: node where edge ends
:type to_node: object
:return: None |
def get_service(station: str) -> Service:
for prefix in PREFERRED:
if station.startswith(prefix):
return PREFERRED[prefix]
return NOAA | Returns the preferred service for a given station |
def load_nddata(self, ndd, naxispath=None):
self.clear_metadata()
ahdr = self.get_header()
ahdr.update(ndd.meta)
self.setup_data(ndd.data, naxispath=naxispath)
if ndd.wcs is None:
self.wcs = wcsmod.WCS(logger=self.logger)
self.wcs.load_header(ahdr)
else:
wcsinfo = wcsmod.get_wcs_class('astropy')
self.wcs = wcsinfo.wrapper_class(logger=self.logger)
self.wcs.load_nddata(ndd) | Load from an astropy.nddata.NDData object. |
def add_external_reference_to_term(self,term_id, external_ref):
if self.term_layer is not None:
self.term_layer.add_external_reference(term_id, external_ref) | Adds an external reference to the given term identifier
@type term_id: string
@param term_id: the term identifier
@param external_ref: an external reference object
@type external_ref: L{CexternalReference} |
def Group(items, key):
result = {}
for item in items:
result.setdefault(key(item), []).append(item)
return result | Groups items by given key function.
Args:
items: An iterable or an iterator of items.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key. |
def _resolve(self, spec):
if not spec.remote:
return spec
try:
resolved_urls = self._resolver.resolve(spec.remote)
if resolved_urls:
return CacheSpec(local=spec.local, remote='|'.join(resolved_urls))
return spec
except Resolver.ResolverError as e:
self._log.warn('Error while resolving from {0}: {1}'.format(spec.remote, str(e)))
if spec.local:
return CacheSpec(local=spec.local, remote=None)
return None | Attempt resolving cache URIs when a remote spec is provided. |
def images(language, word, n = 20, *args, **kwargs):
from lltk.images import google
return google(language, word, n, *args, **kwargs) | Returns a list of URLs to suitable images for a given word. |
def is_transition(self):
return self.is_snv and is_purine(self.ref) == is_purine(self.alt) | Is this variant and pyrimidine to pyrimidine change or purine to purine change |
def set_goid2color_pval(self, goid2color):
alpha2col = self.alpha2col
if self.pval_name is not None:
pval_name = self.pval_name
for goid, res in self.go2res.items():
pval = getattr(res, pval_name, None)
if pval is not None:
for alpha, color in alpha2col.items():
if pval <= alpha and res.study_count != 0:
if goid not in goid2color:
goid2color[goid] = color | Fill missing colors based on p-value of an enriched GO term. |
def load_configuration(linter):
name_checker = get_checker(linter, NameChecker)
name_checker.config.good_names += ('qs', 'urlpatterns', 'register', 'app_name', 'handler500')
linter.config.black_list += ('migrations', 'south_migrations') | Amend existing checker config. |
def get_pmg_structure(phonopy_structure):
lattice = phonopy_structure.get_cell()
frac_coords = phonopy_structure.get_scaled_positions()
symbols = phonopy_structure.get_chemical_symbols()
masses = phonopy_structure.get_masses()
mms = phonopy_structure.get_magnetic_moments()
mms = mms or [0] * len(symbols)
return Structure(lattice, symbols, frac_coords,
site_properties={"phonopy_masses": masses,
"magnetic_moments": mms}) | Convert a PhonopyAtoms object to pymatgen Structure object.
Args:
phonopy_structure (PhonopyAtoms): A phonopy structure object. |
def orchestration(self):
if self._orchestration is not None:
return self._orchestration
API_VERSIONS = {
'1': 'heatclient.v1.client.Client',
}
heat_client = utils.get_client_class(
API_NAME,
self._instance._api_version[API_NAME],
API_VERSIONS)
LOG.debug('Instantiating orchestration client: %s', heat_client)
endpoint = self._instance.get_endpoint_for_service_type(
'orchestration')
token = self._instance.auth.get_token(self._instance.session)
client = heat_client(
endpoint=endpoint,
auth_url=self._instance._auth_url,
token=token,
username=self._instance._username,
password=self._instance._password,
region_name=self._instance._region_name,
insecure=self._instance._insecure,
ca_file=self._instance._cli_options.os_cacert,
)
self._orchestration = client
return self._orchestration | Returns an orchestration service client |
def _render(roster_file, **kwargs):
renderers = salt.loader.render(__opts__, {})
domain = __opts__.get('roster_domain', '')
try:
result = salt.template.compile_template(roster_file,
renderers,
__opts__['renderer'],
__opts__['renderer_blacklist'],
__opts__['renderer_whitelist'],
mask_value='passw*',
**kwargs)
result.setdefault('host', '{}.{}'.format(os.path.basename(roster_file), domain))
return result
except:
log.warning('Unable to render roster file "%s".', roster_file, exc_info=True)
return {} | Render the roster file |
def to_ccw(geom):
if isinstance(geom, sgeom.Polygon) and not geom.exterior.is_ccw:
geom = sgeom.polygon.orient(geom)
return geom | Reorients polygon to be wound counter-clockwise. |
def getPhotosets(self):
method = 'flickr.photosets.getList'
data = _doget(method, user_id=self.id)
sets = []
if isinstance(data.rsp.photosets.photoset, list):
for photoset in data.rsp.photosets.photoset:
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
else:
photoset = data.rsp.photosets.photoset
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
return sets | Returns a list of Photosets. |
def create_scree_plot(in_filename, out_filename, plot_title):
scree_plot_args = ("--evec", in_filename, "--out", out_filename,
"--scree-plot-title", plot_title)
try:
PlotEigenvalues.main(argString=scree_plot_args)
except PlotEigenvalues.ProgramError as e:
msg = "PlotEigenvalues: {}".format(e)
raise ProgramError(msg) | Creates a scree plot using smartpca results.
:param in_filename: the name of the input file.
:param out_filename: the name of the output file.
:param plot_title: the title of the scree plot.
:type in_filename: str
:type out_filename: str
:type plot_title: str |
def unlock_swarm(self, key):
if isinstance(key, dict):
if 'UnlockKey' not in key:
raise errors.InvalidArgument('Invalid unlock key format')
else:
key = {'UnlockKey': key}
url = self._url('/swarm/unlock')
res = self._post_json(url, data=key)
self._raise_for_status(res)
return True | Unlock a locked swarm.
Args:
key (string): The unlock key as provided by
:py:meth:`get_unlock_key`
Raises:
:py:class:`docker.errors.InvalidArgument`
If the key argument is in an incompatible format
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
Example:
>>> key = client.get_unlock_key()
>>> client.unlock_node(key) |
def add_result(self, values):
idx = [values['host']]
for gid in self.key_gids[1:]:
idx.append(values[gid])
idx = tuple(idx)
try:
self.results[idx] += 1
except KeyError:
self.results[idx] = 1
self._last_idx = idx | Add a tuple or increment the value of an existing one
in the rule results dictionary. |
def parse_eprocess(self, eprocess_data):
Name = eprocess_data['_EPROCESS']['Cybox']['Name']
PID = eprocess_data['_EPROCESS']['Cybox']['PID']
PPID = eprocess_data['_EPROCESS']['Cybox']['Parent_PID']
return {'Name': Name, 'PID': PID, 'PPID': PPID} | Parse the EProcess object we get from some rekall output |
def chat(self, message):
if message:
action_chat = sc_pb.ActionChat(
channel=sc_pb.ActionChat.Broadcast, message=message)
action = sc_pb.Action(action_chat=action_chat)
return self.act(action) | Send chat message as a broadcast. |
def put(src_path, dest_hdfs_path, **kwargs):
cp(path.abspath(src_path, local=True), dest_hdfs_path, **kwargs) | \
Copy the contents of ``src_path`` to ``dest_hdfs_path``.
``src_path`` is forced to be interpreted as an ordinary local path
(see :func:`~path.abspath`). The source file is opened for reading
and the copy is opened for writing. Additional keyword arguments,
if any, are handled like in :func:`open`. |
def select_where(self, where_col_list, where_value_list, col_name=''):
res = []
col_ids = []
for col_id, col in enumerate(self.header):
if col in where_col_list:
col_ids.append([col_id, col])
for row_num, row in enumerate(self.arr):
keep_this_row = True
for ndx, where_col in enumerate(col_ids):
if row[where_col[0]] != where_value_list[ndx]:
keep_this_row = False
if keep_this_row is True:
if col_name == '':
res.append([row_num, row])
else:
l_dat = self.get_col_by_name(col_name)
if l_dat is not None:
res.append(row[l_dat])
return res | selects rows from the array where col_list == val_list |
def save(self, path_info, checksum):
assert path_info["scheme"] == "local"
assert checksum is not None
path = path_info["path"]
assert os.path.exists(path)
actual_mtime, actual_size = get_mtime_and_size(path)
actual_inode = get_inode(path)
existing_record = self.get_state_record_for_inode(actual_inode)
if not existing_record:
self._insert_new_state_record(
path, actual_inode, actual_mtime, actual_size, checksum
)
return
self._update_state_for_path_changed(
path, actual_inode, actual_mtime, actual_size, checksum
) | Save checksum for the specified path info.
Args:
path_info (dict): path_info to save checksum for.
checksum (str): checksum to save. |
def _remove_ordered_from_queue(self, last_caught_up_3PC=None):
to_remove = []
for i, msg in enumerate(self.outBox):
if isinstance(msg, Ordered) and \
(not last_caught_up_3PC or
compare_3PC_keys((msg.viewNo, msg.ppSeqNo), last_caught_up_3PC) >= 0):
to_remove.append(i)
self.logger.trace('{} going to remove {} Ordered messages from outbox'.format(self, len(to_remove)))
removed = []
for i in reversed(to_remove):
removed.insert(0, self.outBox[i])
del self.outBox[i]
return removed | Remove any Ordered that the replica might be sending to node which is
less than or equal to `last_caught_up_3PC` if `last_caught_up_3PC` is
passed else remove all ordered, needed in catchup |
def callback(newstate):
print('callback: ', newstate)
if newstate == modem.STATE_RING:
if state == modem.STATE_IDLE:
att = {"cid_time": modem.get_cidtime,
"cid_number": modem.get_cidnumber,
"cid_name": modem.get_cidname}
print('Ringing', att)
elif newstate == modem.STATE_CALLERID:
att = {"cid_time": modem.get_cidtime,
"cid_number": modem.get_cidnumber,
"cid_name": modem.get_cidname}
print('CallerID', att)
elif newstate == modem.STATE_IDLE:
print('idle')
return | Callback from modem, process based on new state |
def parse_venue(data):
return MeetupVenue(
id=data.get('id', None),
name=data.get('name', None),
address_1=data.get('address_1', None),
address_2=data.get('address_2', None),
address_3=data.get('address_3', None),
city=data.get('city', None),
state=data.get('state', None),
zip=data.get('zip', None),
country=data.get('country', None),
lat=data.get('lat', None),
lon=data.get('lon', None)
) | Parse a ``MeetupVenue`` from the given response data.
Returns
-------
A `pythonkc_meetups.types.`MeetupVenue``. |
def acquire_multi(self, n=1):
browsers = []
with self._lock:
if len(self._in_use) >= self.size:
raise NoBrowsersAvailable
while len(self._in_use) < self.size and len(browsers) < n:
browser = self._fresh_browser()
browsers.append(browser)
self._in_use.add(browser)
return browsers | Returns a list of up to `n` browsers.
Raises:
NoBrowsersAvailable if none available |
def set_layout(wlayout, callback):
global display_size
global window_list
global loaded_layout
global pending_load
global vehiclename
if not wlayout.name in window_list and loaded_layout is not None and wlayout.name in loaded_layout:
callback(loaded_layout[wlayout.name])
window_list[wlayout.name] = ManagedWindow(wlayout, callback)
display_size = wlayout.dsize
if pending_load:
pending_load = False
load_layout(vehiclename) | set window layout |
def create_destination(flowable, container, at_top_of_container=False):
vertical_position = 0 if at_top_of_container else container.cursor
ids = flowable.get_ids(container.document)
destination = NamedDestination(*(str(id) for id in ids))
container.canvas.annotate(destination, 0, vertical_position,
container.width, None)
container.document.register_page_reference(container.page, flowable) | Create a destination anchor in the `container` to direct links to
`flowable` to. |
def _parse_status(self, output):
parsed = self._parse_machine_readable_output(output)
statuses = []
for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]):
info = {kind: data for timestamp, _, kind, data in tuples}
status = Status(name=target, state=info.get('state'),
provider=info.get('provider-name'))
statuses.append(status)
return statuses | Unit testing is so much easier when Vagrant is removed from the
equation. |
def parse(self, argv):
rv = {}
for pattern in self.patterns:
pattern.apply(rv, argv)
return rv | Parses the given `argv` and returns a dictionary mapping argument names
to the values found in `argv`. |
def update(self, key, value):
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v) | Update a `key` in the keystore.
If the key is non-existent, it's being created |
def decode_osgi_props(input_props):
result_props = {}
intfs = decode_list(input_props, OBJECTCLASS)
result_props[OBJECTCLASS] = intfs
for intf in intfs:
package_key = ENDPOINT_PACKAGE_VERSION_ + package_name(intf)
intfversionstr = input_props.get(package_key, None)
if intfversionstr:
result_props[package_key] = intfversionstr
result_props[ENDPOINT_ID] = input_props[ENDPOINT_ID]
result_props[ENDPOINT_SERVICE_ID] = input_props[ENDPOINT_SERVICE_ID]
result_props[ENDPOINT_FRAMEWORK_UUID] = input_props[ENDPOINT_FRAMEWORK_UUID]
imp_configs = decode_list(input_props, SERVICE_IMPORTED_CONFIGS)
if imp_configs:
result_props[SERVICE_IMPORTED_CONFIGS] = imp_configs
intents = decode_list(input_props, SERVICE_INTENTS)
if intents:
result_props[SERVICE_INTENTS] = intents
remote_configs = decode_list(input_props, REMOTE_CONFIGS_SUPPORTED)
if remote_configs:
result_props[REMOTE_CONFIGS_SUPPORTED] = remote_configs
remote_intents = decode_list(input_props, REMOTE_INTENTS_SUPPORTED)
if remote_intents:
result_props[REMOTE_INTENTS_SUPPORTED] = remote_intents
return result_props | Decodes the OSGi properties of the given endpoint properties |
def _on_process_error(self, error):
if self is None:
return
err = PROCESS_ERROR_STRING[error]
self._formatter.append_message(err + '\r\n', output_format=OutputFormat.ErrorMessageFormat) | Display child process error in the text edit. |
def make_pizzly_gtf(gtf_file, out_file, data):
if file_exists(out_file):
return out_file
db = gtf.get_gtf_db(gtf_file)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for gene in db.features_of_type("gene"):
children = [x for x in db.children(id=gene)]
for child in children:
if child.attributes.get("gene_biotype", None):
gene_biotype = child.attributes.get("gene_biotype")
gene.attributes['gene_biotype'] = gene_biotype
gene.source = gene_biotype[0]
print(gene, file=out_handle)
for child in children:
child.source = gene_biotype[0]
child.attributes.pop("transcript_version", None)
print(child, file=out_handle)
return out_file | pizzly needs the GTF to be in gene -> transcript -> exon order for each
gene. it also wants the gene biotype set as the source |
def get_adj_records(results, min_ratio=None, pval=0.05):
records = []
for rec in results:
rec.update_remaining_fldsdefprt(min_ratio=min_ratio)
if pval is not None and rec.p_uncorrected >= pval:
continue
if rec.is_ratio_different:
records.append(rec)
return records | Return GOEA results with some additional statistics calculated. |
def find_library_linux(cls):
dll = Library.JLINK_SDK_NAME
root = os.path.join('/', 'opt', 'SEGGER')
for (directory_name, subdirs, files) in os.walk(root):
fnames = []
x86_found = False
for f in files:
path = os.path.join(directory_name, f)
if os.path.isfile(path) and f.startswith(dll):
fnames.append(f)
if '_x86' in path:
x86_found = True
for fname in fnames:
fpath = os.path.join(directory_name, fname)
if util.is_os_64bit():
if '_x86' not in fname:
yield fpath
elif x86_found:
if '_x86' in fname:
yield fpath
else:
yield fpath | Loads the SEGGER DLL from the root directory.
On Linux, the SEGGER tools are installed under the ``/opt/SEGGER``
directory with versioned directories having the suffix ``_VERSION``.
Args:
cls (Library): the ``Library`` class
Returns:
The paths to the J-Link library files in the order that they are
found. |
def luns(self):
lun_list, smp_list = [], []
if self.ioclass_luns:
lun_list = map(lambda l: VNXLun(lun_id=l.lun_id, name=l.name,
cli=self._cli), self.ioclass_luns)
if self.ioclass_snapshots:
smp_list = map(lambda smp: VNXLun(name=smp.name, cli=self._cli),
self.ioclass_snapshots)
return list(lun_list) + list(smp_list) | Aggregator for ioclass_luns and ioclass_snapshots. |
def decimate(self, fraction=0.5, N=None, boundaries=False, verbose=True):
poly = self.polydata(True)
if N:
Np = poly.GetNumberOfPoints()
fraction = float(N) / Np
if fraction >= 1:
return self
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(poly)
decimate.SetTargetReduction(1 - fraction)
decimate.PreserveTopologyOff()
if boundaries:
decimate.BoundaryVertexDeletionOff()
else:
decimate.BoundaryVertexDeletionOn()
decimate.Update()
if verbose:
print("Nr. of pts, input:", poly.GetNumberOfPoints(), end="")
print(" output:", decimate.GetOutput().GetNumberOfPoints())
return self.updateMesh(decimate.GetOutput()) | Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_ |
def minimum_image_dr( self, r1, r2, cutoff=None ):
delta_r_vector = self.minimum_image( r1, r2 )
return( self.dr( np.zeros( 3 ), delta_r_vector, cutoff ) ) | Calculate the shortest distance between two points in the cell,
accounting for periodic boundary conditions.
Args:
r1 (np.array): fractional coordinates of point r1.
r2 (np.array): fractional coordinates of point r2.
cutoff (:obj: `float`, optional): if set, return zero if the minimum distance is greater than `cutoff`. Defaults to None.
Returns:
(float): The distance between r1 and r2. |
def use_comparative_asseessment_part_item_view(self):
self._object_views['asseessment_part_item'] = COMPARATIVE
for session in self._get_provider_sessions():
try:
session.use_comparative_asseessment_part_item_view()
except AttributeError:
pass | Pass through to provider AssessmentPartItemSession.use_comparative_asseessment_part_item_view |
def new_keys(self):
if self._new_keys is None:
self._new_keys = NewKeyList(self._version, account_sid=self._solution['sid'], )
return self._new_keys | Access the new_keys
:returns: twilio.rest.api.v2010.account.new_key.NewKeyList
:rtype: twilio.rest.api.v2010.account.new_key.NewKeyList |
def set_min_level_to_mail(self, level):
self.min_log_level_to_mail = level
handler_class = AlkiviEmailHandler
self._set_min_level(handler_class, level) | Allow to change mail level after creation |
def email_type(arg):
if not is_valid_email_address(arg):
raise argparse.ArgumentTypeError("{0} is not a valid email address".format(repr(arg)))
return arg | An argparse type representing an email address. |
def prepare_schemes(self, req):
ret = sorted(self.__schemes__ & set(req.schemes), reverse=True)
if len(ret) == 0:
raise ValueError('No schemes available: {0}'.format(req.schemes))
return ret | make sure this client support schemes required by current request
:param pyswagger.io.Request req: current request object |
def _clean(self):
if CleanupPolicy.EVERYTHING in self.cleanup:
self.cleanup_containers()
self.cleanup_volumes()
self.cleanup_images()
self._clean_tmp_dirs()
else:
if CleanupPolicy.CONTAINERS in self.cleanup:
self.cleanup_containers()
if CleanupPolicy.VOLUMES in self.cleanup:
self.cleanup_volumes()
if CleanupPolicy.IMAGES in self.cleanup:
self.cleanup_images()
if CleanupPolicy.TMP_DIRS in self.cleanup:
self._clean_tmp_dirs() | Method for cleaning according to object cleanup policy value
:return: None |
def asdict(self):
timestamp_str = None
if self.reading_time is not None:
timestamp_str = self.reading_time.isoformat()
return {
'stream': self.stream,
'device_timestamp': self.raw_time,
'streamer_local_id': self.reading_id,
'timestamp': timestamp_str,
'value': self.value
} | Encode the data in this reading into a dictionary.
Returns:
dict: A dictionary containing the information from this reading. |
def dropdb():
manager.db.engine.echo = True
if prompt_bool("Are you sure you want to lose all your data"):
manager.db.drop_all()
metadata, alembic_version = alembic_table_metadata()
alembic_version.drop()
manager.db.session.commit() | Drop database tables |
def ms_pan(self, viewer, event, data_x, data_y):
if not self.canpan:
return True
x, y = viewer.get_last_win_xy()
if event.state == 'move':
data_x, data_y = self.get_new_pan(viewer, x, y,
ptype=self._pantype)
viewer.panset_xy(data_x, data_y)
elif event.state == 'down':
self.pan_set_origin(viewer, x, y, data_x, data_y)
self.pan_start(viewer, ptype=2)
else:
self.pan_stop(viewer)
return True | A 'drag' or proportional pan, where the image is panned by
'dragging the canvas' up or down. The amount of the pan is
proportionate to the length of the drag. |
def _ParseCachedEntry2003(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(
value_data[cached_entry_offset:], cached_entry_offset,
self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse cached entry value with error: {0!s}'.format(
exception))
path_size = cached_entry.path_size
maximum_path_size = cached_entry.maximum_path_size
path_offset = cached_entry.path_offset
if path_offset > 0 and path_size > 0:
path_size += path_offset
maximum_path_size += path_offset
try:
path = value_data[path_offset:path_size].decode('utf-16-le')
except UnicodeDecodeError:
raise errors.ParseError('Unable to decode cached entry path to string')
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = (
self._cached_entry_data_type_map.GetByteSize())
cached_entry_object.file_size = getattr(cached_entry, 'file_size', None)
cached_entry_object.last_modification_time = (
cached_entry.last_modification_time)
cached_entry_object.path = path
return cached_entry_object | Parses a Windows 2003 cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed. |
def read_secret(path, key=None):
log.debug('Reading Vault secret for %s at %s', __grains__['id'], path)
try:
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('GET', url)
if response.status_code != 200:
response.raise_for_status()
data = response.json()['data']
if key is not None:
return data[key]
return data
except Exception as err:
log.error('Failed to read secret! %s: %s', type(err).__name__, err)
return None | Return the value of key at path in vault, or entire secret
Jinja Example:
.. code-block:: jinja
my-secret: {{ salt['vault'].read_secret('secret/my/secret', 'some-key') }}
.. code-block:: jinja
{% set supersecret = salt['vault'].read_secret('secret/my/secret') %}
secrets:
first: {{ supersecret.first }}
second: {{ supersecret.second }} |
def parseerror(self, msg, line=None):
if line is None:
line = self.sline
error('parse error: ' + msg + ' on line {}'.format(line))
sys.exit(-2) | Emit parse error and abort assembly. |
def emitCurrentRecordEdited(self):
if self._changedRecord == -1:
return
if self.signalsBlocked():
return
record = self._changedRecord
self._changedRecord = -1
self.currentRecordEdited.emit(record) | Emits the current record edited signal for this combobox, provided the
signals aren't blocked and the record has changed since the last time. |
def transition_to_add(self):
assert self.state in [AQStateMachineStates.init, AQStateMachineStates.add]
self.state = AQStateMachineStates.add | Transition to add |
def sizeof_fmt(num):
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte' | Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format. |
def save(self, filename, image_format="eps", width=8, height=6):
self.get_plot(width, height).savefig(filename, format=image_format) | Save the plot to an image file.
Args:
filename: Filename to save to.
image_format: Format to save to. Defaults to eps. |
def random_hex(length):
charset = ''.join(set(string.hexdigits.lower()))
return random_string(length, charset) | Return a random hex string.
:param int length: The length of string to return
:returns: A random string
:rtype: str |
def add_host_with_group(self, ip, mac, groupname):
msg = OmapiMessage.open(b"host")
msg.message.append(("create", struct.pack("!I", 1)))
msg.message.append(("exclusive", struct.pack("!I", 1)))
msg.obj.append(("hardware-address", pack_mac(mac)))
msg.obj.append(("hardware-type", struct.pack("!I", 1)))
msg.obj.append(("ip-address", pack_ip(ip)))
msg.obj.append(("group", groupname))
response = self.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
raise OmapiError("add failed") | Adds a host with given ip and mac in a group named groupname
@type ip: str
@type mac: str
@type groupname: str |
def pop_scope(self, aliases, frame):
for name, alias in aliases.iteritems():
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
self.writeline(' = '.join(to_delete) + ' = missing') | Restore all aliases and delete unused variables. |
def to_dict(self, index=True, ordered=False):
result = OrderedDict() if ordered else dict()
if index:
result.update({self._index_name: self._index})
if ordered:
data_dict = [(column, self._data[i]) for i, column in enumerate(self._columns)]
else:
data_dict = {column: self._data[i] for i, column in enumerate(self._columns)}
result.update(data_dict)
return result | Returns a dict where the keys are the column names and the values are lists of the values for that column.
:param index: If True then include the index in the dict with the index_name as the key
:param ordered: If True then return an OrderedDict() to preserve the order of the columns in the DataFrame
:return: dict or OrderedDict() |
def trace_region(self, region_index):
cmd = enums.JLinkTraceCommand.GET_REGION_PROPS_EX
region = structs.JLinkTraceRegion()
region.RegionIndex = int(region_index)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(region))
if (res == 1):
raise errors.JLinkException('Failed to get trace region.')
return region | Retrieves the properties of a trace region.
Args:
self (JLink): the ``JLink`` instance.
region_index (int): the trace region index.
Returns:
An instance of ``JLinkTraceRegion`` describing the specified region. |
def is_closest_date_parameter(task, param_name):
for name, obj in task.get_params():
if name == param_name:
return hasattr(obj, 'use_closest_date')
return False | Return the parameter class of param_name on task. |
def model_sizes(m:nn.Module, size:tuple=(64,64))->Tuple[Sizes,Tensor,Hooks]:
"Pass a dummy input through the model `m` to get the various sizes of activations."
with hook_outputs(m) as hooks:
x = dummy_eval(m, size)
return [o.stored.shape for o in hooks] | Pass a dummy input through the model `m` to get the various sizes of activations. |
def get_resource_value(self, device_id, resource_path, fix_path=True, timeout=None):
return self.get_resource_value_async(device_id, resource_path, fix_path).wait(timeout) | Get a resource value for a given device and resource path by blocking thread.
Example usage:
.. code-block:: python
try:
v = api.get_resource_value(device_id, path)
print("Current value", v)
except CloudAsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to get (Required)
:param fix_path: if True then the leading /, if found, will be stripped before
doing request to backend. This is a requirement for the API to work properly
:param timeout: Seconds to request value for before timeout. If not provided, the
program might hang indefinitely.
:raises: CloudAsyncError, CloudTimeoutError
:returns: The resource value for the requested resource path
:rtype: str |
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma) | Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
Returns:
Kpoints |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.