code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def write_release_version(version):
dirname = os.path.abspath(os.path.dirname(__file__))
f = open(os.path.join(dirname, "_version.py"), "wt")
f.write("__version__ = '%s'\n" % version)
f.close() | Write the release version to ``_version.py``. |
def _serialize_object(self, response_data, request):
if not self.factory:
return response_data
if isinstance(response_data, (list, tuple)):
return map(
lambda item: self.factory.serialize(item, request),
response_data)
else:
return self.factory.serialize(response_data, request) | Create a python datatype from the given python object.
This will use ``self.factory`` object's ``serialize()`` function
to convert the object into dictionary.
If no factory is defined, this will simply return the same data
that was given.
:param response_data: data returned by the resource |
def _download_file(url, local_filename):
response = requests.get(url, stream=True)
with open(local_filename, 'wb') as outfile:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
outfile.write(chunk) | Utility function that downloads a chunked response from the specified url to a local path.
This method is suitable for larger downloads. |
def validate():
if not os.path.exists(os.path.join(ROOT, APP, '__init__.py')):
message = ansi.error() + ' Python module not found.'
if os.environ.get('LORE_APP') is None:
message += ' $LORE_APP is not set. Should it be different than "%s"?' % APP
else:
message += ' $LORE_APP is set to "%s". Should it be different?' % APP
sys.exit(message)
if exists():
return
if len(sys.argv) > 1:
command = sys.argv[1]
else:
command = 'lore'
sys.exit(
ansi.error() + ' %s is only available in lore '
'app directories (missing %s)' % (
ansi.bold(command),
ansi.underline(VERSION_PATH)
)
) | Display error messages and exit if no lore environment can be found. |
def set_dicts(self, word_dict, char_dict):
self.word_dict = word_dict
self.char_dict = char_dict | Set with custom dictionaries.
:param word_dict: The word dictionary.
:param char_dict: The character dictionary. |
def check_file_version(notebook, source_path, outputs_path):
if not insert_or_test_version_number():
return
_, ext = os.path.splitext(source_path)
if ext.endswith('.ipynb'):
return
version = notebook.metadata.get('jupytext', {}).get('text_representation', {}).get('format_version')
format_name = format_name_for_ext(notebook.metadata, ext)
fmt = get_format_implementation(ext, format_name)
current = fmt.current_version_number
if notebook.metadata and not version:
version = current
if version == fmt.current_version_number:
return
if (fmt.min_readable_version_number or current) <= version <= current:
return
raise JupytextFormatError("File {} is in format/version={}/{} (current version is {}). "
"It would not be safe to override the source of {} with that file. "
"Please remove one or the other file."
.format(os.path.basename(source_path),
format_name, version, current,
os.path.basename(outputs_path))) | Raise if file version in source file would override outputs |
def to_df(self, recommended_only=False, include_io=True):
od = BMDS._df_ordered_dict(include_io)
[
session._add_to_to_ordered_dict(od, i, recommended_only)
for i, session in enumerate(self)
]
return pd.DataFrame(od) | Return a pandas DataFrame for each model and dataset.
Parameters
----------
recommended_only : bool, optional
If True, only recommended models for each session are included. If
no model is recommended, then a row with it's ID will be included,
but all fields will be null.
include_io : bool, optional
If True, then the input/output files from BMDS will also be
included, specifically the (d) input file and the out file.
Returns
-------
out : pandas.DataFrame
Data frame containing models and outputs |
def load_fits(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.set_image(image) | Load a FITS file into the viewer. |
def cli(self, *args, **kwargs):
kwargs['api'] = self.api
return cli(*args, **kwargs) | Defines a CLI function that should be routed by this API |
def _get_public_ip(name, resource_group):
netconn = get_conn(client_type='network')
try:
pubip_query = netconn.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=name
)
pubip = pubip_query.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', exc.message)
pubip = {'error': exc.message}
return pubip | Get the public ip address details by name. |
def normalise(v):
vn = np.sqrt(np.sum(v**2, 0))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype) | Normalise columns of matrix.
Parameters
----------
v : array_like
Array with columns to be normalised
Returns
-------
vnrm : ndarray
Normalised array |
def setDebugActions( self, startAction, successAction, exceptionAction ):
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self | Enable display of debugging messages while doing pattern matching. |
def generate(env):
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg' | Add Builders and construction variables for yacc to an Environment. |
def screenshot(self, png_filename=None, format='raw'):
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDAError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
return Image.open(buff)
else:
raise ValueError("unknown format") | Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, pillow or raw(default)
Returns:
raw data or PIL.Image
Raises:
WDAError |
def _calculate_scores(self):
scores = {}
for node in self.graph.nodes():
score = -1 * len([
d for d in nx.descendants(self.graph, node)
if self._include_in_cost(d)
])
scores[node] = score
return scores | Calculate the 'value' of each node in the graph based on how many
blocking descendants it has. We use this score for the internal
priority queue's ordering, so the quality of this metric is important.
The score is stored as a negative number because the internal
PriorityQueue picks lowest values first.
We could do this in one pass over the graph instead of len(self.graph)
passes but this is easy. For large graphs this may hurt performance.
This operates on the graph, so it would require a lock if called from
outside __init__.
:return Dict[str, int]: The score dict, mapping unique IDs to integer
scores. Lower scores are higher priority. |
def query_dict_to_string(query):
query_params = []
for key, value in query.items():
query_params.append(key + "=" + value)
return "&".join(query_params) | Convert an OrderedDict to a query string.
Args:
query (obj): The key value object with query params.
Returns:
str: The query string.
Note:
This method does the same as urllib.parse.urlencode except
that it doesn't actually encode the values. |
def removeCurrentItem(self):
logger.debug("removeCurrentFile")
currentIndex = self.getRowCurrentIndex()
if not currentIndex.isValid():
return
self.model().deleteItemAtIndex(currentIndex) | Removes the current item from the repository tree. |
def ebalance(sdat, tstart=None, tend=None):
tseries = sdat.tseries_between(tstart, tend)
rbot, rtop = misc.get_rbounds(sdat.steps.last)
if rbot != 0:
coefsurf = (rtop / rbot)**2
volume = rbot * ((rtop / rbot)**3 - 1) / 3
else:
coefsurf = 1.
volume = 1.
dtdt, time = dt_dt(sdat, tstart, tend)
ftop = tseries['ftop'].values * coefsurf
fbot = tseries['fbot'].values
radio = tseries['H_int'].values
ebal = ftop[1:] - fbot[1:] + volume * (dtdt - radio[1:])
return ebal, time | Energy balance.
Compute Nu_t - Nu_b + V*dT/dt as a function of time using an explicit
Euler scheme. This should be zero if energy is conserved.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the time series data if set to None.
Returns:
tuple of :class:`numpy.array`: energy balance and time arrays. |
def choose(self):
if not self.choosed:
self.choosed = True
self.pos = self.pos + Sep(5, 0) | Marks the item as the one the user is in. |
def RegisterAnyElement(cls):
for k,v in cls.types_dict.items():
what = Any.serialmap.get(k)
if what is None: continue
if v in what.__class__.seriallist: continue
what.__class__.seriallist.append(v)
RegisterType(what.__class__, clobber=1, **what.__dict__) | If find registered TypeCode instance, add Wrapper class
to TypeCode class serialmap and Re-RegisterType. Provides
Any serialzation of any instances of the Wrapper. |
def policy(self, args):
word = args[0]
if word == 'reject':
self.accepted_ports = None
self.rejected_ports = []
target = self.rejected_ports
elif word == 'accept':
self.accepted_ports = []
self.rejected_ports = None
target = self.accepted_ports
else:
raise RuntimeError("Don't understand policy word \"%s\"" % word)
for port in args[1].split(','):
if '-' in port:
(a, b) = port.split('-')
target.append(PortRange(int(a), int(b)))
else:
target.append(int(port)) | setter for the policy descriptor |
def evaluateQuotes(self, argument):
r = set()
search_terms = []
for item in argument:
search_terms.append(item[0])
if len(r) == 0:
r = self.evaluate(item)
else:
r = r.intersection(self.evaluate(item))
return self.GetQuotes(' '.join(search_terms), r) | Evaluate quoted strings
First is does an 'and' on the indidual search terms, then it asks the
function GetQuoted to only return the subset of ID's that contain the
literal string. |
def before_event(self, event):
if event not in ["prev_page", "next_page"]:
while self.history.position < self.history.size:
self.next_page() | Ensure a screen is at the bottom of the history buffer.
:param str event: event name, for example ``"linefeed"``. |
def _batched_write_command(
namespace, operation, command, docs, check_keys, opts, ctx):
buf = StringIO()
buf.write(_ZERO_64)
buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00")
to_send, length = _batched_write_command_impl(
namespace, operation, command, docs, check_keys, opts, ctx, buf)
buf.seek(4)
request_id = _randint()
buf.write(_pack_int(request_id))
buf.seek(0)
buf.write(_pack_int(length))
return request_id, buf.getvalue(), to_send | Create the next batched insert, update, or delete command. |
def get_industry_index(self, index_id,items=None):
response = self.select('yahoo.finance.industry',items).where(['id','=',index_id])
return response | retrieves all symbols that belong to an industry. |
def sd(d,**kw):
r={};
r.update(d);
r.update(kw);
return r; | A hack to return a modified dict dynamically. Basically,
Does "classless OOP" as in js but with dicts, although
not really for the "verb" parts of OOP but more of the
"subject" stuff.
Confused? Here's how it works:
`d` is a dict. We have that
sd(d, perfect=42, gf='qt3.14')
returns a dict like d but with d['perfect']==42 and
d['gf']=='qt3.14'. 'sd' stands for "setdefault" which is,
you know, what we do when we set elements of a dict.
I plan to use this heavily. |
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf):
key = thisbunch.key.upper()
obj = copy.copy(thisbunch.obj)
abunch = obj2bunch(data, commdct, obj)
bunchdt[key].append(abunch)
return abunch | add a bunch to model.
abunch usually comes from another idf file
or it can be used to copy within the idf file |
def DEFINE_enum(
name, default, enum_values, help, flag_values=FLAGS, module_name=None,
**args):
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values, module_name) | Registers a flag whose value can be any string from enum_values.
Args:
name: A string, the flag name.
default: The default value of the flag.
enum_values: A list of strings with the possible values for the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__. |
def _GetRowValue(self, query_hash, row, value_name):
keys_name_to_index_map = self._keys_per_query.get(query_hash, None)
if not keys_name_to_index_map:
keys_name_to_index_map = {
name: index for index, name in enumerate(row.keys())}
self._keys_per_query[query_hash] = keys_name_to_index_map
value_index = keys_name_to_index_map.get(value_name)
return row[value_index] | Retrieves a value from the row.
Args:
query_hash (int): hash of the query, that uniquely identifies the query
that produced the row.
row (sqlite3.Row): row.
value_name (str): name of the value.
Returns:
object: value. |
def option(value, is_default=False, label=None):
_annotate("option", value, is_default=is_default, label=label) | Annotates a possible value for IValueOptions,
will be validated at instance creation time.
@param value: a possible value for the IValueOptions being defined.
@type value: Any
@param is_default: if the option should be the default value.
@type is_default: bool
@param label: option label or None; if none the string representation
of the value will be used as label.
@type label: str or unicode or None |
def except_(self, arguments):
if not isinstance(arguments, list):
arguments = list(arguments)
args = self.request.arguments
data = {}
for key, value in args.items():
if key not in arguments:
data[key] = self.get_argument(key)
return data | returns the arguments passed to the route except that set by user
Sample Usage
++++++++++++++
.. code:: python
from bast import Controller
class MyController(Controller):
def index(self):
data = self.except_(['arg_name'])
Returns a dictionary of all arguments except for that provided by as ``arg_name`` |
def clear_bg(self, which_data=("amplitude", "phase"), keys="fit"):
which_data = QPImage._conv_which_data(which_data)
if isinstance(keys, str):
keys = [keys]
imdats = []
if "amplitude" in which_data:
imdats.append(self._amp)
if "phase" in which_data:
imdats.append(self._pha)
if not imdats:
msg = "`which_data` must contain 'phase' or 'amplitude'!"
raise ValueError(msg)
for imdat in imdats:
for key in keys:
imdat.del_bg(key) | Clear background correction
Parameters
----------
which_data: str or list of str
From which type of data to remove the background
information. The list contains either "amplitude",
"phase", or both.
keys: str or list of str
Which type of background data to remove. One of:
- "fit": the background data computed with
:func:`qpimage.QPImage.compute_bg`
- "data": the experimentally obtained background image |
def userCreate(self, request, tag):
userCreator = liveform.LiveForm(
self.createUser,
[liveform.Parameter(
"localpart",
liveform.TEXT_INPUT,
unicode,
"localpart"),
liveform.Parameter(
"domain",
liveform.TEXT_INPUT,
unicode,
"domain"),
liveform.Parameter(
"password",
liveform.PASSWORD_INPUT,
unicode,
"password")])
userCreator.setFragmentParent(self)
return userCreator | Render a form for creating new users. |
def clear(self, timestamp):
self.storage.clear()
self.push(streams.DATA_CLEARED, timestamp, 1) | Clear all data from the RSL.
This pushes a single reading once we clear everything so that
we keep track of the highest ID that we have allocated to date.
This needs the current timestamp to be able to properly timestamp
the cleared storage reading that it pushes.
Args:
timestamp (int): The current timestamp to store with the
reading. |
def build_one_definition_example(self, def_name):
if def_name in self.definitions_example.keys():
return True
elif def_name not in self.specification['definitions'].keys():
return False
self.definitions_example[def_name] = {}
def_spec = self.specification['definitions'][def_name]
if def_spec.get('type') == 'array' and 'items' in def_spec:
item = self.get_example_from_prop_spec(def_spec['items'])
self.definitions_example[def_name] = [item]
return True
if 'properties' not in def_spec:
self.definitions_example[def_name] = self.get_example_from_prop_spec(def_spec)
return True
for prop_name, prop_spec in def_spec['properties'].items():
example = self.get_example_from_prop_spec(prop_spec)
if example is None:
return False
self.definitions_example[def_name][prop_name] = example
return True | Build the example for the given definition.
Args:
def_name: Name of the definition.
Returns:
True if the example has been created, False if an error occured. |
def _save(self):
fname = self.filename()
def _write_file(fname):
if PY2:
with codecs.open(fname, 'w', encoding='utf-8') as configfile:
self._write(configfile)
else:
with open(fname, 'w', encoding='utf-8') as configfile:
self.write(configfile)
try:
_write_file(fname)
except EnvironmentError:
try:
if osp.isfile(fname):
os.remove(fname)
time.sleep(0.05)
_write_file(fname)
except Exception as e:
print("Failed to write user configuration file to disk, with "
"the exception shown below")
print(e) | Save config into the associated .ini file |
def _draw_using_figure(self, figure, axs):
self = deepcopy(self)
self._build()
self.theme = self.theme or theme_get()
self.figure = figure
self.axs = axs
try:
with mpl.rc_context():
self.theme.apply_rcparams()
self._setup_parameters()
self._draw_layers()
self._draw_facet_labels()
self._draw_legend()
self._apply_theme()
except Exception as err:
if self.figure is not None:
plt.close(self.figure)
raise err
return self | Draw onto already created figure and axes
This is can be used to draw animation frames,
or inset plots. It is intended to be used
after the key plot has been drawn.
Parameters
----------
figure : ~matplotlib.figure.Figure
Matplotlib figure
axs : array_like
Array of Axes onto which to draw the plots |
def add_in(self, delay: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process':
process = Process(self, fn_process, self._gr)
if _logger is not None:
self._log(INFO, "add", __now=self.now(), fn=fn_process, args=args, kwargs=kwargs)
self._schedule(delay, process.switch, *args, **kwargs)
return process | Adds a process to the simulation, which is made to start after the given delay in simulated time.
See method add() for more details. |
def cancelPnL(self, account, modelCode: str = ''):
key = (account, modelCode)
reqId = self.wrapper.pnlKey2ReqId.pop(key, None)
if reqId:
self.client.cancelPnL(reqId)
self.wrapper.pnls.pop(reqId, None)
else:
self._logger.error(
'cancelPnL: No subscription for '
f'account {account}, modelCode {modelCode}') | Cancel PnL subscription.
Args:
account: Cancel for this account.
modelCode: If specified, cancel for this account model. |
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema) | new query builder on current db |
def lu_decomposition(matrix_a):
q = len(matrix_a)
for idx, m_a in enumerate(matrix_a):
if len(m_a) != q:
raise ValueError("The input must be a square matrix. " +
"Row " + str(idx + 1) + " has a size of " + str(len(m_a)) + ".")
return _linalg.doolittle(matrix_a) | LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. The input matrix is **2-dimensional**, i.e. list of lists of
integers and/or floats.
:param matrix_a: Input matrix (must be a square matrix)
:type matrix_a: list, tuple
:return: a tuple containing matrices L and U
:rtype: tuple |
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError:
self._logger.exception('Problem parsing YAML')
raise self.ConfigurationInvalidError(
'Failed to load from %s as YAML' % yamlfile)
if not isinstance(parsed_yaml, dict):
raise self.ConfigurationInvalidError(
'YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(
parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared) | Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML). |
def dealias_image(alias):
with Session() as session:
try:
result = session.Image.dealiasImage(alias)
except Exception as e:
print_error(e)
sys.exit(1)
if result['ok']:
print("alias {0} removed.".format(alias))
else:
print(result['msg']) | Remove an image alias. |
def get_ser_val_alt(lat: float, lon: float,
da_alt_x: xr.DataArray,
da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series:
alt_t_1d = da_alt.sel(
latitude=lat, longitude=lon, method='nearest')
val_t_1d = da_val.sel(
latitude=lat, longitude=lon, method='nearest')
alt_x = da_alt_x.sel(
latitude=lat, longitude=lon, method='nearest')[0]
val_alt = np.array(
[interp1d(alt_1d, val_1d)(alt_x)
for alt_1d, val_1d
in zip(alt_t_1d, val_t_1d)])
ser_alt = pd.Series(
val_alt,
index=da_val.time.values,
name=da_val.name,
)
return ser_alt | interpolate atmospheric variable to a specified altitude
Parameters
----------
lat : float
latitude of specified site
lon : float
longitude of specified site
da_alt_x : xr.DataArray
desired altitude to interpolate variable at
da_alt : xr.DataArray
altitude associated with `da_val`: variable array to interpolate
da_val : xr.DataArray
atmospheric varialble to interpolate
Returns
-------
pd.Series
interpolated values at the specified altitude of site positioned by [`lat`, `lon`] |
def eval_first_non_none(eval_list: Iterable[Callable[..., Any]], **kwargs: Any) -> Any:
Validator.is_real_iterable(raise_ex=True, eval_list=eval_list)
for eval_fun in eval_list:
res = eval_fun(**kwargs)
if res is not None:
return res
return None | Executes a list of functions and returns the first non none result. All kwargs will be passed as
kwargs to each individual function. If all functions return None, None is the overall result.
Examples:
>>> eval_first_non_none((lambda: None, lambda: None, lambda: 3))
3
>>> print(eval_first_non_none([lambda: None, lambda: None, lambda: None]))
None
>>> eval_first_non_none([
... lambda cnt: cnt if cnt == 1 else None,
... lambda cnt: cnt if cnt == 2 else None,
... lambda cnt: cnt if cnt == 3 else None]
... , cnt=2)
2 |
def sheetDeleteEmpty(bookName=None):
if bookName is None:
bookName = activeBook()
if not bookName.lower() in [x.lower() for x in bookNames()]:
print("can't clean up a book that doesn't exist:",bookName)
return
poBook=PyOrigin.WorksheetPages(bookName)
namesToKill=[]
for i,poSheet in enumerate([poSheet for poSheet in poBook.Layers()]):
poFirstCol=poSheet.Columns(0)
if poFirstCol.GetLongName()=="" and poFirstCol.GetData()==[]:
namesToKill.append(poSheet.GetName())
for sheetName in namesToKill:
print("deleting empty sheet",sheetName)
sheetDelete(bookName,sheetName) | Delete all sheets which contain no data |
def less_than(self, less_than):
if hasattr(less_than, 'strftime'):
less_than = datetime_as_utc(less_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(less_than, six.string_types):
raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(less_than))
return self._add_condition('<', less_than, types=[int, str]) | Adds new `<` condition
:param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `less_than` is of an unexpected type |
def set_python(self, value):
if not isinstance(value, (list, type(None))):
raise ValidationError(
self.record,
"Field '{}' must be set to a list, not '{}'".format(
self.name,
value.__class__
)
)
value = value or []
self.cursor._validate_list(value)
return super(ListField, self).set_python(value) | Validate using cursor for consistency between direct set of values vs modification of cursor values |
def get_services_by_explosion(self, servicegroups):
self.already_exploded = True
if self.rec_tag:
logger.error("[servicegroup::%s] got a loop in servicegroup definition",
self.get_name())
if hasattr(self, 'members'):
return self.members
return ''
self.rec_tag = True
sg_mbrs = self.get_servicegroup_members()
for sg_mbr in sg_mbrs:
servicegroup = servicegroups.find_by_name(sg_mbr.strip())
if servicegroup is not None:
value = servicegroup.get_services_by_explosion(servicegroups)
if value is not None:
self.add_members(value)
if hasattr(self, 'members'):
return self.members
return '' | Get all services of this servicegroup and add it in members container
:param servicegroups: servicegroups object
:type servicegroups: alignak.objects.servicegroup.Servicegroups
:return: return empty string or list of members
:rtype: str or list |
def get_expiration_seconds_v2(expiration):
if isinstance(expiration, datetime.timedelta):
now = NOW().replace(tzinfo=_helpers.UTC)
expiration = now + expiration
if isinstance(expiration, datetime.datetime):
micros = _helpers._microseconds_from_datetime(expiration)
expiration = micros // 10 ** 6
if not isinstance(expiration, six.integer_types):
raise TypeError(
"Expected an integer timestamp, datetime, or "
"timedelta. Got %s" % type(expiration)
)
return expiration | Convert 'expiration' to a number of seconds in the future.
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Point in time when the signed URL should expire.
:raises: :exc:`TypeError` when expiration is not a valid type.
:rtype: int
:returns: a timestamp as an absolute number of seconds since epoch. |
def draw_rect(
self,
x: int,
y: int,
width: int,
height: int,
ch: int,
fg: Optional[Tuple[int, int, int]] = None,
bg: Optional[Tuple[int, int, int]] = None,
bg_blend: int = tcod.constants.BKGND_SET,
) -> None:
x, y = self._pythonic_index(x, y)
lib.draw_rect(
self.console_c,
x,
y,
width,
height,
ch,
(fg,) if fg is not None else ffi.NULL,
(bg,) if bg is not None else ffi.NULL,
bg_blend,
) | Draw characters and colors over a rectangular region.
`x` and `y` are the starting tile, with ``0,0`` as the upper-left
corner of the console. You can use negative numbers if you want to
start printing relative to the bottom-right corner, but this behavior
may change in future versions.
`width` and `height` determine the size of the rectangle.
`ch` is a Unicode integer. You can use 0 to leave the current
characters unchanged.
`fg` and `bg` are the foreground text color and background tile color
respectfully. This is a 3-item tuple with (r, g, b) color values from
0 to 255. These parameters can also be set to `None` to leave the
colors unchanged.
`bg_blend` is the blend type used by libtcod.
.. versionadded:: 8.5
.. versionchanged:: 9.0
`fg` and `bg` now default to `None` instead of white-on-black. |
def create_tileset(ctx, dataset, tileset, name):
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
service = mapbox.Uploader(access_token=access_token)
uri = "mapbox://datasets/{username}/{dataset}".format(
username=tileset.split('.')[0], dataset=dataset)
res = service.create(uri, tileset, name)
if res.status_code == 201:
click.echo(res.text)
else:
raise MapboxCLIException(res.text.strip()) | Create a vector tileset from a dataset.
$ mapbox datasets create-tileset dataset-id username.data
Note that the tileset must start with your username and the dataset
must be one that you own. To view processing status, visit
https://www.mapbox.com/data/. You may not generate another tilesets
from the same dataset until the first processing job has completed.
All endpoints require authentication. An access token with
`uploads:write` scope is required, see `mapbox --help`. |
def get_onchain_exchange_rates(deposit_crypto=None, withdraw_crypto=None, **modes):
from moneywagon.onchain_exchange import ALL_SERVICES
rates = []
for Service in ALL_SERVICES:
srv = Service(verbose=modes.get('verbose', False))
rates.extend(srv.onchain_exchange_rates())
if deposit_crypto:
rates = [x for x in rates if x['deposit_currency']['code'] == deposit_crypto.upper()]
if withdraw_crypto:
rates = [x for x in rates if x['withdraw_currency']['code'] == withdraw_crypto.upper()]
if modes.get('best', False):
return max(rates, key=lambda x: float(x['rate']))
return rates | Gets exchange rates for all defined on-chain exchange services. |
def dbg(*objects, file=sys.stderr, flush=True, **kwargs):
"Helper function to print to stderr and flush"
print(*objects, file=file, flush=flush, **kwargs) | Helper function to print to stderr and flush |
def memory_zones(self):
count = self.num_memory_zones()
if count == 0:
return list()
buf = (structs.JLinkMemoryZone * count)()
res = self._dll.JLINK_GetMemZones(buf, count)
if res < 0:
raise errors.JLinkException(res)
return list(buf) | Gets all memory zones supported by the current target.
Some targets support multiple memory zones. This function provides the
ability to get a list of all the memory zones to facilate using the
memory zone routing functions.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of all the memory zones as ``JLinkMemoryZone`` structures.
Raises:
JLinkException: on hardware errors. |
def _get_stddevs(self, dists, mag, dctx, imt, stddev_types):
stddevs = []
for stddev_type in stddev_types:
if stddev_type not in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES:
raise ValueError("Standard Deviation type %s not supported"
% stddev_type)
sigma = self._return_tables(mag, imt, stddev_type)
interpolator_std = interp1d(dists, sigma,
bounds_error=False)
stddev = interpolator_std(getattr(dctx, self.distance_type))
stddev[getattr(dctx, self.distance_type) < dists[0]] = sigma[0]
stddev[getattr(dctx, self.distance_type) > dists[-1]] = sigma[-1]
stddevs.append(stddev)
return stddevs | Returns the total standard deviation of the intensity measure level
from the tables.
:param fle:
HDF5 data stream as instance of :class:`h5py.File`
:param distances:
The distance vector for the given magnitude and IMT
:param key:
The distance type
:param mag:
The rupture magnitude |
def noninjectable(*args):
def decorator(function):
argspec = inspect.getfullargspec(inspect.unwrap(function))
for arg in args:
if arg not in argspec.args and arg not in argspec.kwonlyargs:
raise UnknownArgument('Unable to mark unknown argument %s ' 'as non-injectable.' % arg)
existing = getattr(function, '__noninjectables__', set())
merged = existing | set(args)
function.__noninjectables__ = merged
return function
return decorator | Mark some parameters as not injectable.
This serves as documentation for people reading the code and will prevent
Injector from ever attempting to provide the parameters.
For example:
>>> class Service:
... pass
...
>>> class SomeClass:
... @inject
... @noninjectable('user_id')
... def __init__(self, service: Service, user_id: int):
... # ...
... pass
:func:`noninjectable` decorations can be stacked on top of
each other and the order in which a function is decorated with
:func:`inject` and :func:`noninjectable`
doesn't matter. |
def set_run_on_node_mask(nodemask):
mask = set_to_numa_nodemask(nodemask)
tmp = bitmask_t()
tmp.maskp = cast(byref(mask), POINTER(c_ulong))
tmp.size = sizeof(nodemask_t) * 8
if libnuma.numa_run_on_node_mask(byref(tmp)) < 0:
raise RuntimeError() | Runs the current thread and its children only on nodes specified in nodemask.
They will not migrate to CPUs of other nodes until the node affinity is
reset with a new call to L{set_run_on_node_mask}.
@param nodemask: node mask
@type nodemask: C{set} |
def sync_groups_from_ad(self):
ad_list = ADGroupMapping.objects.values_list('ad_group', 'group')
mappings = {ad_group: group for ad_group, group in ad_list}
user_ad_groups = set(self.ad_groups.filter(groups__isnull=False).values_list(flat=True))
all_mapped_groups = set(mappings.values())
old_groups = set(self.groups.filter(id__in=all_mapped_groups).values_list(flat=True))
new_groups = set([mappings[x] for x in user_ad_groups])
groups_to_delete = old_groups - new_groups
if groups_to_delete:
self.groups.remove(*groups_to_delete)
groups_to_add = new_groups - old_groups
if groups_to_add:
self.groups.add(*groups_to_add) | Determine which Django groups to add or remove based on AD groups. |
def filter_macro(func, *args, **kwargs):
filter_partial = partial(func, *args, **kwargs)
class FilterMacroMeta(FilterMeta):
@staticmethod
def __new__(mcs, name, bases, attrs):
for attr in WRAPPER_ASSIGNMENTS:
if hasattr(func, attr):
attrs[attr] = getattr(func, attr)
return super(FilterMacroMeta, mcs)\
.__new__(mcs, func.__name__, bases, attrs)
def __call__(cls, *runtime_args, **runtime_kwargs):
return filter_partial(*runtime_args, **runtime_kwargs)
class FilterMacro(with_metaclass(FilterMacroMeta, FilterMacroType)):
def _apply(self, value):
return self.__class__()._apply(value)
return FilterMacro | Promotes a function that returns a filter into its own filter type.
Example::
@filter_macro
def String():
return Unicode | Strip | NotEmpty
# You can now use `String` anywhere you would use a regular Filter:
(String | Split(':')).apply('...')
You can also use ``filter_macro`` to create partials, allowing you to
preset one or more initialization arguments::
Minor = filter_macro(Max, max_value=18, inclusive=False)
Minor(inclusive=True).apply(18) |
def create(graph, label_field,
threshold=1e-3,
weight_field='',
self_weight=1.0,
undirected=False,
max_iterations=None,
_single_precision=False,
_distributed='auto',
verbose=True):
from turicreate._cython.cy_server import QuietProgress
_raise_error_if_not_of_type(label_field, str)
_raise_error_if_not_of_type(weight_field, str)
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
if graph.vertices[label_field].dtype != int:
raise TypeError('label_field %s must be integer typed.' % label_field)
opts = {'label_field': label_field,
'threshold': threshold,
'weight_field': weight_field,
'self_weight': self_weight,
'undirected': undirected,
'max_iterations': max_iterations,
'single_precision': _single_precision,
'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.label_propagation.create(opts)
model = params['model']
return LabelPropagationModel(model) | Given a weighted graph with observed class labels of a subset of vertices,
infer the label probability for the unobserved vertices using the
"label propagation" algorithm.
The algorithm iteratively updates the label probability of current vertex
as a weighted sum of label probability of self and the neighboring vertices
until converge. See
:class:`turicreate.label_propagation.LabelPropagationModel` for the details
of the algorithm.
Notes: label propagation works well with small number of labels, i.e. binary
labels, or less than 1000 classes. The toolkit will throw error
if the number of classes exceeds the maximum value (1000).
Parameters
----------
graph : SGraph
The graph on which to compute the label propagation.
label_field: str
Vertex field storing the initial vertex labels. The values in
must be [0, num_classes). None values indicate unobserved vertex labels.
threshold : float, optional
Threshold for convergence, measured in the average L2 norm
(the sum of squared values) of the delta of each vertex's
label probability vector.
max_iterations: int, optional
The max number of iterations to run. Default is unlimited.
If set, the algorithm terminates when either max_iterations
or convergence threshold is reached.
weight_field: str, optional
Vertex field for edge weight. If empty, all edges are assumed
to have unit weight.
self_weight: float, optional
The weight for self edge.
undirected: bool, optional
If true, treat each edge as undirected, and propagates label in
both directions.
_single_precision : bool, optional
If true, running label propagation in single precision. The resulting
probability values may less accurate, but should run faster
and use less memory.
_distributed : distributed environment, internal
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LabelPropagationModel
References
----------
- Zhu, X., & Ghahramani, Z. (2002). `Learning from labeled and unlabeled data
with label propagation <http://www.cs.cmu.edu/~zhuxj/pub/CMU-CALD-02-107.pdf>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.label_propagation.LabelPropagationModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz',
... format='snap')
# Initialize random classes for a subset of vertices
# Leave the unobserved vertices with None label.
>>> import random
>>> def init_label(vid):
... x = random.random()
... if x < 0.2:
... return 0
... elif x > 0.9:
... return 1
... else:
... return None
>>> g.vertices['label'] = g.vertices['__id'].apply(init_label, int)
>>> m = turicreate.label_propagation.create(g, label_field='label')
We can obtain for each vertex the predicted label and the probability of
each label in the graph ``g`` using:
>>> labels = m['labels'] # SFrame
>>> labels
+------+-------+-----------------+-------------------+----------------+
| __id | label | predicted_label | P0 | P1 |
+------+-------+-----------------+-------------------+----------------+
| 5 | 1 | 1 | 0.0 | 1.0 |
| 7 | None | 0 | 0.8213214997 | 0.1786785003 |
| 8 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 10 | None | 0 | 0.534984718273 | 0.465015281727 |
| 27 | None | 0 | 0.752801638549 | 0.247198361451 |
| 29 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 33 | None | 1 | 5.96046447754e-08 | 0.999999940395 |
| 47 | 0 | 0 | 1.0 | 0.0 |
| 50 | None | 0 | 0.788279032657 | 0.211720967343 |
| 52 | None | 0 | 0.666666666667 | 0.333333333333 |
+------+-------+-----------------+-------------------+----------------+
[36692 rows x 5 columns]
See Also
--------
LabelPropagationModel |
def _self_event(self, event_name, cmd, *pargs, **kwargs):
if hasattr(self, event_name):
getattr(self, event_name)(cmd, *pargs, **kwargs) | Call self event |
def recurse(node, *args, **kwargs):
fwd = dict()
for node_info in _NODE_INFO_TABLE.values():
fwd[node_info.handler] = kwargs.get(node_info.handler, None)
fwd["depth"] = 0
_recurse(node, *args, **fwd) | Entry point for AST recursion. |
def linkify_contactgroups_contacts(self, contacts):
for contactgroup in self:
mbrs = contactgroup.get_contacts()
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip()
if mbr == '':
continue
member = contacts.find_by_name(mbr)
if member is not None:
new_mbrs.append(member.uuid)
else:
contactgroup.add_unknown_members(mbr)
new_mbrs = list(set(new_mbrs))
contactgroup.replace_members(new_mbrs) | Link the contacts with contactgroups
:param contacts: realms object to link with
:type contacts: alignak.objects.contact.Contacts
:return: None |
def groups_moderators(self, room_id=None, group=None, **kwargs):
if room_id:
return self.__call_api_get('groups.moderators', roomId=room_id, kwargs=kwargs)
elif group:
return self.__call_api_get('groups.moderators', roomName=group, kwargs=kwargs)
else:
raise RocketMissingParamException('roomId or group required') | Lists all moderators of a group. |
def remove_user(name, profile='github'):
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
try:
git_user = client.get_user(name)
except UnknownObjectException:
log.exception("Resource not found")
return False
if organization.has_in_members(git_user):
organization.remove_from_members(git_user)
return not organization.has_in_members(git_user) | Remove a Github user by name.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_user github-handle |
def finalize_options(self):
assert bool(self.fa_version), 'FA version is mandatory for this command.'
if self.zip_path:
assert os.path.exists(self.zip_path), (
'Local zipfile does not exist: %s' % self.zip_path) | Validate the command options. |
def write(self, data):
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._flushsize:
self._flush() | Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str. |
def normalize_release_properties(ensembl_release, species):
ensembl_release = check_release_number(ensembl_release)
if not isinstance(species, Species):
species = find_species_by_name(species)
reference_name = species.which_reference(ensembl_release)
return ensembl_release, species.latin_name, reference_name | Make sure a given release is valid, normalize it to be an integer,
normalize the species name, and get its associated reference. |
def precompute(self, cache_dir=None, swath_usage=0, **kwargs):
if kwargs.get('mask') is not None:
LOG.warning("'mask' parameter has no affect during EWA "
"resampling")
del kwargs
source_geo_def = self.source_geo_def
target_geo_def = self.target_geo_def
if cache_dir:
LOG.warning("'cache_dir' is not used by EWA resampling")
lons, lats = source_geo_def.get_lonlats()
if isinstance(lons, xr.DataArray):
lons = lons.data
lats = lats.data
chunks = (2,) + lons.chunks
res = da.map_blocks(self._call_ll2cr, lons, lats,
target_geo_def, swath_usage,
dtype=lons.dtype, chunks=chunks, new_axis=[0])
cols = res[0]
rows = res[1]
self.cache = {
"rows": rows,
"cols": cols,
}
return None | Generate row and column arrays and store it for later use. |
def gen_sext(src, dst):
assert src.size <= dst.size
empty_reg = ReilEmptyOperand()
return ReilBuilder.build(ReilMnemonic.SEXT, src, empty_reg, dst) | Return a SEXT instruction. |
def track_purchase(self, user, items, total, purchase_id= None, campaign_id=None,
template_id=None, created_at=None,
data_fields=None):
call="/api/commerce/trackPurchase"
payload ={}
if isinstance(user, dict):
payload["user"]= user
else:
raise TypeError('user key is not in Dictionary format')
if isinstance(items, list):
payload["items"]= items
else:
raise TypeError('items are not in Array format')
if isinstance(total, float):
payload["total"]= total
else:
raise TypeError('total is not in correct format')
if purchase_id is not None:
payload["id"]= str(purchase_id)
if campaign_id is not None:
payload["campaignId"]= campaign_id
if template_id is not None:
payload["templateId"]= template_id
if created_at is not None:
payload["createdAt"]= created_at
if data_fields is not None:
payload["data_fields"]= data_fields
return self.api_call(call=call, method="POST", json=payload) | The 'purchase_id' argument maps to 'id' for this API endpoint.
This name is used to distinguish it from other instances where
'id' is a part of the API request with other Iterable endpoints. |
def _namify_arguments(mapping):
result = []
for name, parameter in mapping.iteritems():
parameter.name = name
result.append(parameter)
return result | Ensure that a mapping of names to parameters has the parameters set to the
correct name. |
def tgread_bool(self):
value = self.read_int(signed=False)
if value == 0x997275b5:
return True
elif value == 0xbc799737:
return False
else:
raise RuntimeError('Invalid boolean code {}'.format(hex(value))) | Reads a Telegram boolean value. |
def _get_contents(self):
return [
str(value) if is_lazy_string(value) else value
for value in super(LazyNpmBundle, self)._get_contents()
] | Create strings from lazy strings. |
def get_host(self, use_x_forwarded=True):
if use_x_forwarded and ('HTTP_X_FORWARDED_HOST' in self.environ):
host = self.environ['HTTP_X_FORWARDED_HOST']
port = self.environ.get('HTTP_X_FORWARDED_PORT')
if port and port != ('443' if self.is_secure else '80'):
host = '%s:%s' % (host, port)
return host
elif 'HTTP_HOST' in self.environ:
host = self.environ['HTTP_HOST']
else:
host = self.environ['SERVER_NAME']
server_port = str(self.environ['SERVER_PORT'])
if server_port != ('443' if self.is_secure else '80'):
host = '%s:%s' % (host, server_port)
return host | Returns the HTTP host using the environment or request headers. |
def tenant_absent(name, profile=None, **connection_args):
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Tenant / project "{0}" is already absent'.format(name)}
tenant = __salt__['keystone.tenant_get'](name=name,
profile=profile,
**connection_args)
if 'Error' not in tenant:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be deleted'.format(name)
return ret
__salt__['keystone.tenant_delete'](name=name, profile=profile,
**connection_args)
ret['comment'] = 'Tenant / project "{0}" has been deleted'.format(name)
ret['changes']['Tenant/Project'] = 'Deleted'
return ret | Ensure that the keystone tenant is absent.
name
The name of the tenant that should not exist |
def inject_default_call(self, high):
for chunk in high:
state = high[chunk]
if not isinstance(state, collections.Mapping):
continue
for state_ref in state:
needs_default = True
if not isinstance(state[state_ref], list):
continue
for argset in state[state_ref]:
if isinstance(argset, six.string_types):
needs_default = False
break
if needs_default:
state[state_ref].insert(-1, '__call__') | Sets .call function to a state, if not there.
:param high:
:return: |
def run(self):
self._capture_signals()
self._start_monitor()
try:
while True:
if not self._run_worker():
self._wait_for_changes()
time.sleep(self.reload_interval)
except KeyboardInterrupt:
pass
finally:
self._stop_monitor()
self._restore_signals()
sys.exit(1) | Execute the reloader forever, blocking the current thread.
This will invoke ``sys.exit(1)`` if interrupted. |
def subtract_afromb(*inputs, **kwargs):
try:
value_a = inputs[0].pop()
value_b = inputs[1].pop()
return [IOTileReading(0, 0, value_b.value - value_a.value)]
except StreamEmptyError:
return [] | Subtract stream a from stream b.
Returns:
list(IOTileReading) |
def ctcp_reply(self, command, dst, message=None):
if message is None:
raw_cmd = u'\x01{0}\x01'.format(command)
else:
raw_cmd = u'\x01{0} {1}\x01'.format(command, message)
self.notice(dst, raw_cmd) | Sends a reply to a CTCP request.
:param command: CTCP command to use.
:type command: str
:param dst: sender of the initial request.
:type dst: str
:param message: data to attach to the reply.
:type message: str |
def caesar(shift, data, shift_ranges=('az', 'AZ')):
alphabet = dict(
(chr(c), chr((c - s + shift) % (e - s + 1) + s))
for s, e in map(lambda r: (ord(r[0]), ord(r[-1])), shift_ranges)
for c in range(s, e + 1)
)
return ''.join(alphabet.get(c, c) for c in data) | Apply a caesar cipher to a string.
The caesar cipher is a substition cipher where each letter in the given
alphabet is replaced by a letter some fixed number down the alphabet.
If ``shift`` is ``1``, *A* will become *B*, *B* will become *C*, etc...
You can define the alphabets that will be shift by specifying one or more
shift ranges. The characters will than be shifted within the given ranges.
Args:
shift(int): The shift to apply.
data(str): The string to apply the cipher to.
shift_ranges(list of str): Which alphabets to shift.
Returns:
str: The string with the caesar cipher applied.
Examples:
>>> caesar(16, 'Pwnypack')
'Fmdofqsa'
>>> caesar(-16, 'Fmdofqsa')
'Pwnypack'
>>> caesar(16, 'PWNYpack', shift_ranges=('AZ',))
'FMDOpack'
>>> caesar(16, 'PWNYpack', shift_ranges=('Az',))
'`g^iFqsA' |
def data_only_container(name, volumes):
info = inspect_container(name)
if info:
return
c = _get_docker().create_container(
name=name,
image='datacats/postgres',
command='true',
volumes=volumes,
detach=True)
return c | create "data-only container" if it doesn't already exist.
We'd like to avoid these, but postgres + boot2docker make
it difficult, see issue #5 |
def color_scale(begin_hsl, end_hsl, nb):
if nb < 0:
raise ValueError(
"Unsupported negative number of colors (nb=%r)." % nb)
step = tuple([float(end_hsl[i] - begin_hsl[i]) / nb for i in range(0, 3)]) \
if nb > 0 else (0, 0, 0)
def mul(step, value):
return tuple([v * value for v in step])
def add_v(step, step2):
return tuple([v + step2[i] for i, v in enumerate(step)])
return [add_v(begin_hsl, mul(step, r)) for r in range(0, nb + 1)] | Returns a list of nb color HSL tuples between begin_hsl and end_hsl
>>> from colour import color_scale
>>> [rgb2hex(hsl2rgb(hsl)) for hsl in color_scale((0, 1, 0.5),
... (1, 1, 0.5), 3)]
['#f00', '#0f0', '#00f', '#f00']
>>> [rgb2hex(hsl2rgb(hsl))
... for hsl in color_scale((0, 0, 0),
... (0, 0, 1),
... 15)] # doctest: +ELLIPSIS
['#000', '#111', '#222', ..., '#ccc', '#ddd', '#eee', '#fff']
Of course, asking for negative values is not supported:
>>> color_scale((0, 1, 0.5), (1, 1, 0.5), -2)
Traceback (most recent call last):
...
ValueError: Unsupported negative number of colors (nb=-2). |
def pauli_from_char(ch, n=0):
ch = ch.upper()
if ch == "I":
return I
if ch == "X":
return X(n)
if ch == "Y":
return Y(n)
if ch == "Z":
return Z(n)
raise ValueError("ch shall be X, Y, Z or I") | Make Pauli matrix from an character.
Args:
ch (str): "X" or "Y" or "Z" or "I".
n (int, optional): Make Pauli matrix as n-th qubits.
Returns:
If ch is "X" => X, "Y" => Y, "Z" => Z, "I" => I
Raises:
ValueError: When ch is not "X", "Y", "Z" nor "I". |
def is_writer(self, check_pending=True):
me = self._current_thread()
if self._writer == me:
return True
if check_pending:
return me in self._pending_writers
else:
return False | Returns if the caller is the active writer or a pending writer. |
def to_xml(self, tag_name="buyer"):
for n, v in {"name": self.name, "address": self.address}.items():
if is_empty_or_none(v):
raise ValueError("'%s' attribute cannot be empty or None." % n)
if self.__require_id and is_empty_or_none(self.identifier):
raise ValueError("identifier attribute cannot be empty or None.")
doc = Document()
root = doc.createElement(tag_name)
self._create_text_node(root, "id", self.identifier)
self._create_text_node(root, "name", self.name, True)
if self.phone:
self._create_text_node(root, "phone", self.phone, True)
root.appendChild(self.address.to_xml())
return root | Returns an XMLi representation of the object.
@param tag_name:str Tag name
@return: Element |
def add_arguments(self, parser, bootstrap=False):
[item.add_argument(parser, bootstrap)
for item in self._get_items(bootstrap=False)] | Adds all items to the parser passed in.
Args:
parser (argparse.ArgumentParser): The parser to add all items to.
bootstrap (bool): Flag to indicate whether you only want to mark
bootstrapped items as required on the command-line. |
def send(self, sender, **named):
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses | Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ]. |
def fft_freqs(n_fft, fs):
return np.arange(0, (n_fft // 2 + 1)) / float(n_fft) * float(fs) | Return frequencies for DFT
Parameters
----------
n_fft : int
Number of points in the FFT.
fs : float
The sampling rate. |
def destroyCluster(self):
logger.debug("Destroying cluster %s" % self.clusterName)
instancesToTerminate = self._getNodesInCluster()
attempts = 0
while instancesToTerminate and attempts < 3:
self._terminateInstances(instances=instancesToTerminate)
instancesToTerminate = self._getNodesInCluster()
attempts += 1
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instanceGroup.destroy() | Try a few times to terminate all of the instances in the group. |
def find_by_reference_ids(reference_ids, _connection=None, page_size=100,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
if not isinstance(reference_ids, (list, tuple)):
err = "Video.find_by_reference_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = ','.join(reference_ids)
return connection.ItemResultSet(
'find_videos_by_reference_ids', Video, _connection, page_size,
page_number, sort_by, sort_order, reference_ids=ids) | List all videos identified by a list of reference ids |
def _get_serv(ret):
_options = _get_options(ret)
host = _options.get('host')
port = _options.get('port')
log.debug('memcache server: %s:%s', host, port)
if not host or not port:
log.error('Host or port not defined in salt config')
return
memcacheoptions = (host, port)
return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0) | Return a memcache server object |
def generation_time(self):
entry = self._proto.commandQueueEntry
if entry.HasField('generationTimeUTC'):
return parse_isostring(entry.generationTimeUTC)
return None | The generation time as set by Yamcs.
:type: :class:`~datetime.datetime` |
def resolve(self, dependency):
if isinstance(dependency, str):
name = dependency
else:
name = dependency._giveme_registered_name
return DeferredProperty(
partial(self.get, name)
) | Resolve dependency as instance attribute
of given class.
>>> class Users:
... db = injector.resolve(user_db)
...
... def get_by_id(self, user_id):
... return self.db.get(user_id)
When the attribute is first accessed, it
will be resolved from the corresponding
dependency function |
def forward_word(event):
buff = event.current_buffer
pos = buff.document.find_next_word_ending(count=event.arg)
if pos:
buff.cursor_position += pos | Move forward to the end of the next word. Words are composed of letters and
digits. |
def read(self, sensors):
payload = {'destDev': [], 'keys': list(set([s.key for s in sensors]))}
if self.sma_sid is None:
yield from self.new_session()
if self.sma_sid is None:
return False
body = yield from self._fetch_json(URL_VALUES, payload=payload)
if body.get('err') == 401:
_LOGGER.warning("401 error detected, closing session to force "
"another login attempt")
self.close_session()
return False
_LOGGER.debug(json.dumps(body))
for sen in sensors:
if sen.extract_value(body):
_LOGGER.debug("%s\t= %s %s",
sen.name, sen.value, sen.unit)
return True | Read a set of keys. |
def serialize_on_parent(
self,
parent,
value,
state
):
if value is None and self.required:
state.raise_error(MissingValue, self._missing_value_message(parent))
if not value and self.omit_empty:
return
element = _element_get_or_add_from_parent(parent, self.element_path)
self._serialize(element, value, state) | Serialize the value and add it to the parent element. |
def on_mouse_release(self, x: int, y: int, button, mods):
if button in [1, 4]:
self.example.mouse_release_event(
x, self.buffer_height - y,
1 if button == 1 else 2,
) | Handle mouse release events and forward to example window |
def _resolve_subkeys(key, separator='.'):
subkey = None
if separator in key:
index = key.index(separator)
subkey = key[index + 1:]
key = key[:index]
return key, subkey | Given a key which may actually be a nested key, return the top level
key and any nested subkeys as separate values.
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.