code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def clean_bytes(line):
text = line.decode('utf-8').replace('\r', '').strip('\n')
return re.sub(r'\x1b[^m]*m', '', text).replace("``", "`\u200b`").strip('\n') | Cleans a byte sequence of shell directives and decodes it. |
def get_index(self, filename):
index = self.fsmodel.index(filename)
if index.isValid() and index.model() is self.fsmodel:
return self.proxymodel.mapFromSource(index) | Return index associated with filename |
def generate_and_merge_schemas(samples):
merged = generate_schema_for_sample(next(iter(samples)))
for sample in samples:
merged = merge_schema(merged, generate_schema_for_sample(sample))
return merged | Iterates through the given samples, generating schemas
and merging them, returning the resulting merged schema. |
def all_elements_equal(value):
if is_scalar(value):
return True
return np.array(value == value.flatten()[0]).all() | Checks if all elements in the given value are equal to each other.
If the input is a single value the result is trivial. If not, we compare all the values to see
if they are exactly the same.
Args:
value (ndarray or number): a numpy array or a single number.
Returns:
bool: true if all... |
def _parse_scale(scale_exp):
m = re.search("(\w+?)\{(.*?)\}", scale_exp)
if m is None:
raise InvalidFormat('Unable to parse the given time period.')
scale = m.group(1)
range = m.group(2)
if scale not in SCALES:
raise InvalidFormat('%s is not a valid scale.' % scale)
ranges = re.s... | Parses a scale expression and returns the scale, and a list of ranges. |
def pipe_value(self, message):
'Send a new value into the ws pipe'
jmsg = json.dumps(message)
self.send(jmsg) | Send a new value into the ws pipe |
async def get(self, cmd, daap_data=True, timeout=None, **args):
def _get_request():
return self.http.get_data(
self._mkurl(cmd, *args),
headers=_DMAP_HEADERS,
timeout=timeout)
await self._assure_logged_in()
return await self._do(_get_re... | Perform a DAAP GET command. |
def clear_published_date(self):
if (self.get_published_date_metadata().is_read_only() or
self.get_published_date_metadata().is_required()):
raise errors.NoAccess()
self._my_map['publishedDate'] = self._published_date_default | Removes the puiblished date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
def find_next(lines, find_str, start_index):
mode = None
if isinstance(find_str, basestring):
mode = 'normal'
message = find_str
elif isinstance(find_str, Invert):
mode = 'invert'
message = str(find_str)
else:
raise TypeError("Unsupported message type")
for i ... | Find the next instance of find_str from lines starting from start_index.
:param lines: Lines to look through
:param find_str: String or Invert to look for
:param start_index: Index to start from
:return: (boolean, index, line) |
def stop_daemon(self, payload=None):
kill_signal = signals['9']
self.process_handler.kill_all(kill_signal, True)
self.running = False
return {'message': 'Pueue daemon shutting down',
'status': 'success'} | Kill current processes and initiate daemon shutdown.
The daemon will shut down after a last check on all killed processes. |
def BuildDefaultValue(self, value_cls):
try:
return value_cls()
except Exception as e:
logging.exception(e)
raise DefaultValueError(
"Can't create default for value %s: %s" % (value_cls.__name__, e)) | Renders default value of a given class.
Args:
value_cls: Default value of this class will be rendered. This class has to
be (or to be a subclass of) a self.value_class (i.e. a class that this
renderer is capable of rendering).
Returns:
An initialized default value.
Raises:
... |
def close(self):
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
ax.set_xscale('linear')
ax.set_yscale('linear')
ax.cla()
close(self) | Close the plot and release its memory. |
def from_sds(var, *args, **kwargs):
var.__dict__['dtype'] = HTYPE_TO_DTYPE[var.info()[3]]
shape = var.info()[2]
var.__dict__['shape'] = shape if isinstance(shape, (tuple, list)) else tuple(shape)
return da.from_array(var, *args, **kwargs) | Create a dask array from a SD dataset. |
def close(self) -> None:
self._channel.close()
self._channel = self._stub_v1 = self._stub_v2 = None | Close the gRPC channel and free the acquired resources. Using a closed client is
not supported. |
def retract(self, idx_or_declared_fact):
self.facts.retract(idx_or_declared_fact)
if not self.running:
added, removed = self.get_activations()
self.strategy.update_agenda(self.agenda, added, removed) | Retracts a specific fact, using its index
.. note::
This updates the agenda |
def run_in_terminal(self, func, render_cli_done=False, cooked_mode=True):
if render_cli_done:
self._return_value = True
self._redraw()
self.renderer.reset()
else:
self.renderer.erase()
self._return_value = None
if cooked_mode:
w... | Run function on the terminal above the prompt.
What this does is first hiding the prompt, then running this callable
(which can safely output to the terminal), and then again rendering the
prompt which causes the output of this function to scroll above the
prompt.
:param func: ... |
def create_session(self, ticket, payload=None, expires=None):
assert isinstance(self.session_storage_adapter, CASSessionAdapter)
logging.debug('[CAS] Creating session for ticket {}'.format(ticket))
self.session_storage_adapter.create(
ticket,
payload=payload,
... | Create a session record from a service ticket. |
def save_model(self, filename, num_iteration=None, start_iteration=0):
if num_iteration is None:
num_iteration = self.best_iteration
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
... | Save Booster to file.
Parameters
----------
filename : string
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, al... |
def close(self):
super(LockingDatabase, self).close()
if not self.readonly:
self.release_lock() | Closes the database, releasing lock. |
def prev(self, n=1):
i = abs(self.tell - n)
return self.get(i, n) | Get the previous n data from file.
Keyword argument:
n -- number of structs to be retrieved (default 1)
Must be greater than 0.
Return:
A data in the format of obj_fmt, if n = 1. A list of
structs, otherwise. |
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes):
"Plot signal channels"
if len(sig_style) == 1:
sig_style = n_sig * sig_style
if time_units == 'samples':
t = np.linspace(0, sig_len-1, sig_len)
else:
downsample_factor = {'seconds':fs, 'minutes':fs * 60,
... | Plot signal channels |
def _parse_description(self, description_text):
text = description_text
text = text.strip()
lines = text.split('\n')
data = {}
for line in lines:
if ":" in line:
idx = line.index(":")
key = line[:idx]
value = line[idx+1:... | Turn description to dictionary. |
def addToStore(store, identifier, name):
persistedFactory = store.findOrCreate(_PersistedFactory, identifier=identifier)
persistedFactory.name = name
return persistedFactory | Adds a persisted factory with given identifier and object name to
the given store.
This is intended to have the identifier and name partially
applied, so that a particular module with an exercise in it can
just have an ``addToStore`` function that remembers it in the
store.
If a persisted fact... |
def start_tcp_server(self, ip, port, name=None, timeout=None, protocol=None, family='ipv4'):
self._start_server(TCPServer, ip, port, name, timeout, protocol, family) | Starts a new TCP server to given `ip` and `port`.
Server can be given a `name`, default `timeout` and a `protocol`.
`family` can be either ipv4 (default) or ipv6. Notice that you have to
use `Accept Connection` keyword for server to receive connections.
Examples:
| Start TCP se... |
def get_arrays(self, type_img):
if type_img.lower() == 'lola':
return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image()
elif type_img.lower() == 'wac':
return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image()
else:
... | Return arrays the region of interest
Args:
type_img (str): Either lola or wac.
Returns:
A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the
longitudes, ``Y`` contains the latitude and ``Z`` the values
extracted for the region of interest.
... |
def get_class_from_settings_from_apps(settings_key):
cls_path = getattr(settings, settings_key, None)
if not cls_path:
raise NotImplementedError()
try:
app_label = cls_path.split('.')[-2]
model_name = cls_path.split('.')[-1]
except ValueError:
raise ImproperlyConfigured("... | Try and get a class from a settings path by lookin in installed apps. |
def configure_logging(verbosity):
root = logging.getLogger()
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s',
'%y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
loglevels = [logging.CRI... | Set up the global logging level.
Args:
verbosity (int): The logging verbosity |
def parse_content(self, content, allow_no_value=False):
super(IniConfigFile, self).parse_content(content)
config = RawConfigParser(allow_no_value=allow_no_value)
fp = io.StringIO(u"\n".join(content))
config.readfp(fp, filename=self.file_name)
self.data = config | Parses content of the config file.
In child class overload and call super to set flag
``allow_no_values`` and allow keys with no value in
config file::
def parse_content(self, content):
super(YourClass, self).parse_content(content,
... |
def commuting_sets_by_indices(pauli_sums, commutation_check):
assert isinstance(pauli_sums, list)
group_inds = []
group_terms = []
for i, pauli_sum in enumerate(pauli_sums):
for j, term in enumerate(pauli_sum):
if len(group_inds) == 0:
group_inds.append([(i, j)])
... | For a list of pauli sums, find commuting sets and keep track of which pauli sum they came from.
:param pauli_sums: A list of PauliSum
:param commutation_check: a function that checks if all elements of a list
and a single pauli term commute.
:return: A list of commuting sets. ... |
def register(self, es, append=None, modulo=None):
if not isinstance(es, CMAEvolutionStrategy):
raise TypeError("only class CMAEvolutionStrategy can be " +
"registered for logging")
self.es = es
if append is not None:
self.append = append
... | register a `CMAEvolutionStrategy` instance for logging,
``append=True`` appends to previous data logged under the same name,
by default previous data are overwritten. |
def _get_arg_tokens(cli):
arg = cli.input_processor.arg
return [
(Token.Prompt.Arg, '(arg: '),
(Token.Prompt.Arg.Text, str(arg)),
(Token.Prompt.Arg, ') '),
] | Tokens for the arg-prompt. |
def remove_sites_from_neighbours( self, remove_labels ):
if type( remove_labels ) is str:
remove_labels = [ remove_labels ]
self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels ) | Removes sites from the set of neighbouring sites if these have labels in remove_labels.
Args:
Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set.
Returns:
None |
def get_environments():
envs = []
for root, subfolders, files in os.walk('environments'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('environments'):], filename[:-len('.json')])
envs.append(get_envir... | Gets all environments found in the 'environments' directory |
def update_contact_of_client(self, contact_id, contact_dict):
return self._create_put_request(resource=CONTACTS, billomat_id=contact_id, send_data=contact_dict) | Updates a contact
:param contact_id: the id of the contact
:param contact_dict: dict
:return: dict |
def check_who_read(self, messages):
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
readers.append(p.participant.id)
... | Check who read each message. |
def _wrap_rpc_behavior(handler, fn):
if handler is None:
return None
if handler.request_streaming and handler.response_streaming:
behavior_fn = handler.stream_stream
handler_factory = grpc.stream_stream_rpc_method_handler
elif handler.request_streaming and not handler.response_stream... | Returns a new rpc handler that wraps the given function |
def close(self):
if self._writer is not None:
self.flush()
self._writer.close()
self._writer = None | Closes the record writer. |
def add_volume(self,colorchange=True,column=None,name='',str='{name}',**kwargs):
if not column:
column=self._d['volume']
up_color=kwargs.pop('up_color',self.theme['up_color'])
down_color=kwargs.pop('down_color',self.theme['down_color'])
study={'kind':'volume',
'name':name,
'params':{'colorchange'... | Add 'volume' study to QuantFigure.studies
Parameters:
colorchange : bool
If True then each volume bar will have a fill color
depending on if 'base' had a positive or negative
change compared to the previous value
If False then each volume bar will have a fill color
depending on if the volume... |
def create_topics(self, new_topics, timeout_ms=None, validate_only=False):
version = self._matching_api_version(CreateTopicsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version == 0:
if validate_only:
raise IncompatibleBrokerVersion(
... | Create new topics in the cluster.
:param new_topics: A list of NewTopic objects.
:param timeout_ms: Milliseconds to wait for new topics to be created
before the broker returns.
:param validate_only: If True, don't actually create new topics.
Not supported by all versions... |
def get_column_index(self, header):
try:
index = self._column_headers.index(header)
return index
except ValueError:
raise_suppressed(KeyError(("'{}' is not a header for any "
"column").format(header))) | Get index of a column from it's header.
Parameters
----------
header: str
header of the column.
Raises
------
ValueError:
If no column could be found corresponding to `header`. |
def __create_entry(self, entrytype, data, index, ttl=None):
if entrytype == 'HS_ADMIN':
op = 'creating HS_ADMIN entry'
msg = 'This method can not create HS_ADMIN entries.'
raise IllegalOperationException(operation=op, msg=msg)
entry = {'index':index, 'type':entrytype,... | Create an entry of any type except HS_ADMIN.
:param entrytype: THe type of entry to create, e.g. 'URL' or
'checksum' or ... Note: For entries of type 'HS_ADMIN', please
use __create_admin_entry(). For type '10320/LOC', please use
'add_additional_URL()'
:param data: T... |
def is_schema_of_common_names(schema: GraphQLSchema) -> bool:
query_type = schema.query_type
if query_type and query_type.name != "Query":
return False
mutation_type = schema.mutation_type
if mutation_type and mutation_type.name != "Mutation":
return False
subscription_type = schema.... | Check whether this schema uses the common naming convention.
GraphQL schema define root types for each type of operation. These types are the
same as any other type and can be named in any manner, however there is a common
naming convention:
schema {
query: Query
mutation: Mutation
}
... |
def random_filename(path=None):
filename = uuid4().hex
if path is not None:
filename = os.path.join(path, filename)
return filename | Make a UUID-based file name which is extremely unlikely
to exist already. |
def contribute_to_class(self, cls, name):
self.update_rel_to(cls)
self.set_attributes_from_name(name)
self.model = cls
if not self.remote_field.through and not cls._meta.abstract:
self.remote_field.through = create_many_to_many_intermediary_model(
self, cls)
... | Because django doesn't give us a nice way to provide
a through table without losing functionality. We have to
provide our own through table creation that uses the
FKToVersion field to be used for the from field. |
def register_trading_control(self, control):
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control) | Register a new TradingControl to be checked prior to order calls. |
def shape(cls, dataset):
if not dataset.data:
return (0, len(dataset.dimensions()))
rows, cols = 0, 0
ds = cls._inner_dataset_template(dataset)
for d in dataset.data:
ds.data = d
r, cols = ds.interface.shape(ds)
rows += r
return row... | Returns the shape of all subpaths, making it appear like a
single array of concatenated subpaths separated by NaN values. |
def _set_init_params(self, qrs_amp_recent, noise_amp_recent, rr_recent,
last_qrs_ind):
self.qrs_amp_recent = qrs_amp_recent
self.noise_amp_recent = noise_amp_recent
self.qrs_thr = max(0.25*self.qrs_amp_recent
+ 0.75*self.noise_amp_recent,
... | Set initial online parameters |
def importFile(self, srcUrl, sharedFileName=None):
self._assertContextManagerUsed()
return self._jobStore.importFile(srcUrl, sharedFileName=sharedFileName) | Imports the file at the given URL into job store.
See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.importFile` for a
full description |
def loadSessions(self, callback, bare_jid, device_ids):
if self.is_async:
self.__loadSessionsAsync(callback, bare_jid, device_ids, {})
else:
return self.__loadSessionsSync(bare_jid, device_ids) | Return a dict containing the session for each device id. By default, this method
calls loadSession for each device id. |
def bitstring_probs_to_z_moments(p):
zmat = np.array([[1, 1],
[1, -1]])
return _apply_local_transforms(p, (zmat for _ in range(p.ndim))) | Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separat... |
def _reduce_age(self, now):
if self.max_age:
keys = [
key for key, value in iteritems(self.data)
if now - value['date'] > self.max_age
]
for key in keys:
del self.data[key] | Reduce size of cache by date.
:param datetime.datetime now: Current time |
def files(self):
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
... | Return list of files in root directory |
def grammatical_join(l, initial_joins=", ", final_join=" and "):
return initial_joins.join(l[:-2] + [final_join.join(l[-2:])]) | Display a list of items nicely, with a different string before the final
item. Useful for using lists in sentences.
>>> grammatical_join(['apples', 'pears', 'bananas'])
'apples, pears and bananas'
>>> grammatical_join(['apples', 'pears', 'bananas'], initial_joins=";", final_join="; or ")
'apples; ... |
def get_output_content(job_id, max_size=1024, conn=None):
content = None
if RBO.index_list().contains(IDX_OUTPUT_JOB_ID).run(conn):
check_status = RBO.get_all(job_id, index=IDX_OUTPUT_JOB_ID).run(conn)
else:
check_status = RBO.filter({OUTPUTJOB_FIELD: {ID_FIELD: job_id}}).run(conn)
for s... | returns the content buffer for a job_id if that job output exists
:param job_id: <str> id for the job
:param max_size: <int> truncate after [max_size] bytes
:param conn: (optional)<connection> to run on
:return: <str> or <bytes> |
def ReadAllClientActionRequests(self, client_id, cursor=None):
query = ("SELECT request, UNIX_TIMESTAMP(leased_until), leased_by, "
"leased_count "
"FROM client_action_requests "
"WHERE client_id = %s")
cursor.execute(query, [db_utils.ClientIDToInt(client_id)])
ret = [... | Reads all client messages available for a given client_id. |
def confirm(prompt_str, default=False):
if default:
default_str = 'y'
prompt = '%s [Y/n]' % prompt_str
else:
default_str = 'n'
prompt = '%s [y/N]' % prompt_str
ans = click.prompt(prompt, default=default_str, show_default=False)
if ans.lower() in ('y', 'yes', 'yeah', 'yup'... | Show a confirmation prompt to a command-line user.
:param string prompt_str: prompt to give to the user
:param bool default: Default value to True or False |
def update_generators():
for generator in _GENERATOR_DB.keys():
install_templates_translations(generator)
add_variables_to_context(generator)
interlink_static_files(generator)
interlink_removed_content(generator)
interlink_translated_content(generator) | Update the context of all generators
Ads useful variables and translations into the template context
and interlink translations |
def cluster(
self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None
):
return Cluster(
cluster_id,
self,
location_id=location_id,
serve_nodes=serve_nodes,
default_storage_type=default_storage_type,
) | Factory to create a cluster associated with this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_create_cluster]
:end-before: [END bigtable_create_cluster]
:type cluster_id: str
:param cluster_id: The ID of the cluster.
... |
def remove_negative_entries(A):
r
A = A.tocoo()
data = A.data
row = A.row
col = A.col
pos = data > 0.0
datap = data[pos]
rowp = row[pos]
colp = col[pos]
Aplus = coo_matrix((datap, (rowp, colp)), shape=A.shape)
return Aplus | r"""Remove all negative entries from sparse matrix.
Aplus=max(0, A)
Parameters
----------
A : (M, M) scipy.sparse matrix
Input matrix
Returns
-------
Aplus : (M, M) scipy.sparse matrix
Input matrix with negative entries set to zero. |
def get(cls, *args, **kwargs):
if len(args) == 1:
pk = args[0]
elif kwargs:
if len(kwargs) == 1 and cls._field_is_pk(list(kwargs.keys())[0]):
pk = list(kwargs.values())[0]
else:
result = cls.collection(**kwargs).sort(by='nosort')
... | Retrieve one instance from db according to given kwargs.
Optionnaly, one arg could be used to retrieve it from pk. |
def _create_netmap_config(self):
netmap_path = os.path.join(self.working_dir, "NETMAP")
try:
with open(netmap_path, "w", encoding="utf-8") as f:
for bay in range(0, 16):
for unit in range(0, 4):
f.write("{ubridge_id}:{bay}/{unit}{io... | Creates the NETMAP file. |
def add_modifier(self, modifier, keywords, relative_pos,
action, parameter=None):
if relative_pos == 0:
raise ValueError("relative_pos cannot be 0")
modifier_dict = self._modifiers.get(modifier, {})
value = (action, parameter, relative_pos)
for keyword in... | Modify existing tasks based on presence of a keyword.
Parameters
----------
modifier : str
A string value which would trigger the given Modifier.
keywords : iterable of str
sequence of strings which are keywords for some task,
which has to be modified... |
def refit(self, data, label, decay_rate=0.9, **kwargs):
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(copy.deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, nc... | Refit the existing Booster by new data.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series ... |
def warn(self, collection):
super(CodeElement, self).warn(collection)
if not "implicit none" in self.modifiers:
collection.append("WARNING: implicit none not set in {}".format(self.name)) | Checks the module for documentation and best-practice warnings. |
def regions(self):
regions = []
elem = self.dimensions["region"].elem
for option_elem in elem.find_all("option"):
region = option_elem.text.strip()
regions.append(region)
return regions | Get a list of all regions |
def sets(self, keyword, value):
if isinstance(value, str):
value = KQMLString(value)
self.set(keyword, value) | Set the element of the list after the given keyword as string.
Parameters
----------
keyword : str
The keyword parameter to find in the list.
Putting a colon before the keyword is optional, if no colon is
given, it is added automatically (e.g. "keyword" will ... |
def bet_place(
self,
betting_market_id,
amount_to_bet,
backer_multiplier,
back_or_lay,
account=None,
**kwargs
):
from . import GRAPHENE_BETTING_ODDS_PRECISION
assert isinstance(amount_to_bet, Amount)
assert back_or_lay in ["back", "lay"... | Place a bet
:param str betting_market_id: The identifier for the market to bet
in
:param peerplays.amount.Amount amount_to_bet: Amount to bet with
:param int backer_multiplier: Multipler for backer
:param str back_or_lay: "back" or "lay" the bet
... |
def init_properties(env='dev', app='unnecessary', **_):
aws_env = boto3.session.Session(profile_name=env)
s3client = aws_env.resource('s3')
generated = get_details(app=app, env=env)
archaius = generated.archaius()
archaius_file = ('{path}/application.properties').format(path=archaius['path'])
tr... | Make sure _application.properties_ file exists in S3.
For Applications with Archaius support, there needs to be a file where the
cloud environment variable points to.
Args:
env (str): Deployment environment/account, i.e. dev, stage, prod.
app (str): GitLab Project name.
Returns:
... |
def record_content_length(self):
untldict = py2dict(self)
untldict.pop('meta', None)
return len(str(untldict)) | Calculate length of record, excluding metadata. |
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True):
lon, lat = _convert_measurements(data, measurement)
vals, vecs = cov_eig(lon, lat, bidirectional)
x, y, z = vecs[:, vec]
s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z))
return s[0], d[0] | Unifies ``fit_pole`` and ``fit_girdle``. |
def list_datacenters(conn=None, call=None):
if call != 'function':
raise SaltCloudSystemExit(
'The list_datacenters function must be called with '
'-f or --function.'
)
datacenters = []
if not conn:
conn = get_conn()
for item in conn.list_datacenters()['it... | List all the data centers
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-profitbricks-config |
def _contains_policies(self, resource_properties):
return resource_properties is not None \
and isinstance(resource_properties, dict) \
and self.POLICIES_PROPERTY_NAME in resource_properties | Is there policies data in this resource?
:param dict resource_properties: Properties of the resource
:return: True if we can process this resource. False, otherwise |
def configure(self, options, conf):
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive c... | Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''. |
def insertPrimaryDataset(self):
try :
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("primds", indata)
indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() })
self.dbsPrima... | API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required) |
def kill(self, container, signal=None):
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
if not isinstance(signal, six.string_types):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=p... | Kill a container or send a signal to a container.
Args:
container (str): The container to kill
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
def add_item(self, assessment_id, item_id):
if assessment_id.get_identifier_namespace() != 'assessment.Assessment':
raise errors.InvalidArgument
self._part_item_design_session.add_item(item_id, self._get_first_part_id(assessment_id)) | Adds an existing ``Item`` to an assessment.
arg: assessment_id (osid.id.Id): the ``Id`` of the
``Assessment``
arg: item_id (osid.id.Id): the ``Id`` of the ``Item``
raise: NotFound - ``assessment_id`` or ``item_id`` not found
raise: NullArgument - ``assessment_id`... |
def _surpress_formatting_errors(fn):
@wraps(fn)
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError:
return ""
return inner | I know this is dangerous and the wrong way to solve the problem, but when
using both row and columns summaries it's easier to just swallow errors
so users can format their tables how they need. |
def NumRegressors(npix, pld_order, cross_terms=True):
res = 0
for k in range(1, pld_order + 1):
if cross_terms:
res += comb(npix + k - 1, k)
else:
res += npix
return int(res) | Return the number of regressors for `npix` pixels
and PLD order `pld_order`.
:param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True` |
async def set_action(self, on=None, bri=None, hue=None, sat=None, xy=None,
ct=None, alert=None, effect=None, transitiontime=None,
bri_inc=None, sat_inc=None, hue_inc=None, ct_inc=None,
xy_inc=None, scene=None):
data = {
key: ... | Change action of a group. |
def _fixedpoint(D, tol=1e-7, maxiter=None):
N, K = D.shape
logp = log(D).mean(axis=0)
a0 = _init_a(D)
if maxiter is None:
maxiter = MAXINT
for i in xrange(maxiter):
a1 = _ipsi(psi(a0.sum()) + logp)
if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol:
return a1... | Simple fixed point iteration method for MLE of Dirichlet distribution |
def download_file(self, regex, dest_dir):
log = logging.getLogger(self.cls_logger + '.download_file')
if not isinstance(regex, basestring):
log.error('regex argument is not a string')
return None
if not isinstance(dest_dir, basestring):
log.error('dest_dir arg... | Downloads a file by regex from the specified S3 bucket
This method takes a regular expression as the arg, and attempts
to download the file to the specified dest_dir as the
destination directory. This method sets the downloaded filename
to be the same as it is on S3.
:param reg... |
def tally(self, name, value):
value = value or 0
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
self.usages[name]['used'] += int(value)
self.update_available(name) | Adds to the "used" metric for the given quota. |
def dysmetria_score(self, data_frame):
tap_data = data_frame[data_frame.action_type == 0]
ds = np.mean(np.sqrt((tap_data.x - tap_data.x_target) ** 2 + (tap_data.y - tap_data.y_target) ** 2))
duration = math.ceil(data_frame.td[-1])
return ds, duration | This method calculates accuracy of target taps in pixels
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return ds: dysmetria score in pixels
:rtype ds: float |
def queue_purge(self, queue, **kwargs):
return self.channel.queue_purge(queue=queue).message_count | Discard all messages in the queue. This will delete the messages
and results in an empty queue. |
def get_prefix_source(cls):
try:
return cls.override_prefix()
except AttributeError:
if hasattr(cls, '_prefix_source'):
return cls.site + cls._prefix_source
else:
return cls.site | Return the prefix source, by default derived from site. |
def create(self, auth, type, desc, defer=False):
return self._call('create', auth, [type, desc], defer) | Create something in Exosite.
Args:
auth: <cik>
type: What thing to create.
desc: Information about thing. |
def classify_elements(self,
file,
file_content_type=None,
model=None,
**kwargs):
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
... | Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
... |
def start(self):
fname = self.conf['file']
logging.info("Configfile watcher plugin: Starting to watch route spec "
"file '%s' for changes..." % fname)
route_spec = {}
try:
route_spec = read_route_spec_config(fname)
if route_spec:
... | Start the configfile change monitoring thread. |
def is_product_owner(self, team_id):
if self.is_super_admin():
return True
team_id = uuid.UUID(str(team_id))
return team_id in self.child_teams_ids | Ensure the user is a PRODUCT_OWNER. |
def gen_file_jinja(self, template_file, data, output, dest_path):
if not os.path.exists(dest_path):
os.makedirs(dest_path)
output = join(dest_path, output)
logger.debug("Generating: %s" % output)
env = Environment()
env.loader = FileSystemLoader(self.TEMPLATE_DIR)
... | Fills data to the project template, using jinja2. |
def list_targets_by_instance(self, instance_id, target_list=None):
if target_list is not None:
return [target for target in target_list
if target['instance_id'] == instance_id]
else:
ports = self._target_ports_by_instance(instance_id)
reachable_sub... | Returns a list of FloatingIpTarget objects of FIP association.
:param instance_id: ID of target VM instance
:param target_list: (optional) a list returned by list_targets().
If specified, looking up is done against the specified list
to save extra API calls to a back-end. Otherw... |
def db(self, entity, query_filters="size=10"):
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
historic_url = self.base_url + "api/0.1.0/historicData?" + query_filters
historic_headers = {
"apikey": self.entity_api_key... | This function allows an entity to access the historic data.
Args:
entity (string): Name of the device to listen to
query_filters (string): Elastic search response format string
example, "pretty=true&size=10" |
def imagej_shape(shape, rgb=None):
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional')
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise Va... | Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1) |
def write_array_empty(self, key, value):
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape | write a 0-len array |
def pad(obj, pad_length):
_check_supported(obj)
copied = deepcopy(obj)
copied.pad(pad_length)
return copied | Return a copy of the object with piano-roll padded with zeros at the end
along the time axis.
Parameters
----------
pad_length : int
The length to pad along the time axis with zeros. |
def map_block_storage(service, pool, image):
cmd = [
'rbd',
'map',
'{}/{}'.format(pool, image),
'--user',
service,
'--secret',
_keyfile_path(service),
]
check_call(cmd) | Map a RADOS block device for local use. |
def walk(self):
intervals = sorted(self._intervals)
def nextFull():
start, stop = intervals.pop(0)
while intervals:
if intervals[0][0] <= stop:
_, thisStop = intervals.pop(0)
if thisStop > stop:
stop ... | Get the non-overlapping read intervals that match the subject.
@return: A generator that produces (TYPE, (START, END)) tuples, where
where TYPE is either self.EMPTY or self.FULL and (START, STOP) is
the interval. The endpoint (STOP) of the interval is not considered
to be in... |
def str_from_file(path):
with open(path) as f:
s = f.read().strip()
return s | Return file contents as string. |
def xpathNsLookup(self, prefix):
ret = libxml2mod.xmlXPathNsLookup(self._o, prefix)
return ret | Search in the namespace declaration array of the context
for the given namespace name associated to the given prefix |
def _read_para_hip_mac_2(self, code, cbit, clen, *, desc, length, version):
_hmac = self._read_fileng(clen)
hip_mac_2 = dict(
type=desc,
critical=cbit,
length=clen,
hmac=_hmac,
)
_plen = length - clen
if _plen:
self._rea... | Read HIP HIP_MAC_2 parameter.
Structure of HIP HIP_MAC_2 parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.