docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Compute the error and the gradient.
This is the function optimized by :obj:`scipy.optimize.minimize`.
Args:
x (`array-like`): [`m` * `n`, ] matrix.
Returns:
`tuple`: containing:
- Error (`float`)
- Gradient (`np.array`) [`m`, `n`] | def _error_and_gradient(self, x):
coords = x.reshape((self.m, self.n))
d = squareform(pdist(coords))
diff = self.D - d
error = self._error(diff)
gradient = self._gradient(diff, d, coords)
return error, gradient.ravel() | 1,030,279 |
Construct a Projection from the output of an optimization.
Args:
result (:py:class:`scipy.optimize.OptimizeResult`): Object
returned by :py:func:`scipy.optimize.minimize`.
n (`int`): Number of dimensions.
m (`int`): Number of samples.
index (`lis... | def from_optimize_result(cls, result, n, m, index=None):
coords = pd.DataFrame(result.x.reshape((m, n)), index=index)
projection = cls(coords)
projection.stress = result.fun
return projection | 1,030,282 |
wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes | def wtime_to_minutes(time_string):
hours, mins, seconds = time_string.split(':')
return int(hours) * 60 + int(mins) + 1 | 1,031,860 |
Initialize the Kubernetes execution provider class
Args:
- Config (dict): Dictionary with all the config options.
KWargs :
- channel (channel object) : default=None A channel object | def __init__(self, config, channel=None):
self.channel = channel
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
self.kube_client = client.Extensi... | 1,032,037 |
Submit a job
Args:
- cmd_string :(String) - Name of the container to initiate
- blocksize :(float) - Number of replicas
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
... | def submit(self, cmd_string, blocksize, job_name="parsl.auto"):
if not self.resources:
job_name = "{0}-{1}".format(job_name, time.time()).split(".")[0]
self.deployment_name = '{}-{}-deployment'.format(job_name,
str(ti... | 1,032,038 |
Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False. | def cancel(self, job_ids):
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
self._delete_deployment(job)
self.resources[job]['status'] = 'CANCELLED'
... | 1,032,039 |
Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
... | def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engin... | 1,032,040 |
Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes. | def status(self, job_ids):
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if ... | 1,032,140 |
Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False. | def cancel(self, job_ids):
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIG... | 1,032,143 |
Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds, this is not really used now.
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : ... | def execute_no_wait(self, cmd, walltime, envs={}):
current_env = copy.deepcopy(self._envs)
current_env.update(envs)
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.us... | 1,032,415 |
Filter given settings to keep only key names available in
``DEFAULT_SETTINGS``.
Args:
settings (dict): Loaded settings.
Returns:
dict: Settings object filtered. | def clean(self, settings):
return {k: v for k, v in settings.items() if k in DEFAULT_SETTINGS} | 1,032,736 |
Set every given settings as object attributes.
Args:
settings (dict): Dictionnary of settings. | def set_settings(self, settings):
for k, v in settings.items():
setattr(self, k, v) | 1,032,737 |
Update object attributes from given settings
Args:
settings (dict): Dictionnary of elements to update settings.
Returns:
dict: Dictionnary of all current saved settings. | def update(self, settings):
settings = self.clean(settings)
# Update internal dict
self._settings.update(settings)
# Push every setting items as class object attributes
self.set_settings(settings)
return self._settings | 1,032,738 |
Initialize DEBUG and DEBUGALL.
Allows other modules to set DEBUG and DEBUGALL, so their
call to dprint or dprintx generate output.
Args:
deb1 (bool): value of DEBUG to set
deb2 (bool): optional - value of DEBUGALL to set,
defaults to False. | def init(deb1, deb2=False):
global DEBUG # pylint: disable=global-statement
global DEBUGALL # pylint: disable=global-statement
DEBUG = deb1
DEBUGALL = deb2 | 1,032,845 |
Print Text if DEBUGALL set, optionally with PrettyPrint.
Args:
passeditem (str): item to print
special (bool): determines if item prints with PrettyPrint
or regular print. | def dprintx(passeditem, special=False):
if DEBUGALL:
if special:
from pprint import pprint
pprint(passeditem)
else:
print("%s%s%s" % (C_TI, passeditem, C_NORM)) | 1,032,846 |
Get details for instances that match the qry_string.
Execute a query against the AWS EC2 client object, that is
based on the contents of qry_string.
Args:
qry_string (str): the query to be used against the aws ec2 client.
Returns:
qry_results (dict): raw information returned from AWS. | def get_inst_info(qry_string):
qry_prefix = "EC2C.describe_instances("
qry_real = qry_prefix + qry_string + ")"
qry_results = eval(qry_real) # pylint: disable=eval-used
return qry_results | 1,033,196 |
Get Image_Name for each instance in i_info.
Args:
i_info (dict): information on instances and details.
Returns:
i_info (dict): i_info is returned with the aminame
added for each instance. | def get_all_aminames(i_info):
for i in i_info:
try:
# pylint: disable=maybe-no-member
i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name
except AttributeError:
i_info[i]['aminame'] = "Unknown"
return i_info | 1,033,197 |
Get Image_Name for the image_id specified.
Args:
inst_img_id (str): image_id to get name value from.
Returns:
aminame (str): name of the image. | def get_one_aminame(inst_img_id):
try:
aminame = EC2R.Image(inst_img_id).name
except AttributeError:
aminame = "Unknown"
return aminame | 1,033,198 |
Start or Stop the Specified Instance.
Args:
inst_id (str): instance-id to perform command against
cmdtodo (str): command to perform (start or stop)
Returns:
response (dict): reponse returned from AWS after
performing specified action. | def startstop(inst_id, cmdtodo):
tar_inst = EC2R.Instance(inst_id)
thecmd = getattr(tar_inst, cmdtodo)
response = thecmd()
return response | 1,033,199 |
Export ranking to a file.
Args:
template_file_name (str): where is the template
(moustache template)
output_file_name (str): where create the file with the ranking
sort (str): field to sort the users | def export(self, template_file_name, output_file_name,
sort="public", data=None, limit=0):
exportedData = {}
exportedUsers = self.getSortedUsers()
template = self.__getTemplate(template_file_name)
position = 1
if not limit:
exportedData["users... | 1,033,216 |
Get backend engine from given name.
Args:
(string): Path to validate.
Raises:
boussole.exceptions.SettingsBackendError: If given backend name
does not match any available engine.
Returns:
object: Instance of selected backend engine. | def get_backend_engine(self, name, **kwargs):
if name not in self._engines:
msg = "Given settings backend is unknowed: {}"
raise SettingsBackendError(msg.format(name))
return self._engines[name](**kwargs) | 1,033,418 |
Validate that given paths are not the same.
Args:
(string): Path to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If there is more than one
occurence of the same path.
Returns:
bool: ``True`` if paths are validated. | def valid_paths(self, *args):
for i, path in enumerate(args, start=0):
cp = list(args)
current = cp.pop(i)
if current in cp:
raise SettingsInvalidError("Multiple occurences finded for "
"path: {}".format(curr... | 1,033,419 |
Commit project structure and configuration file
Args:
sourcedir (string): Source directory path.
targetdir (string): Compiled files target directory path.
abs_config (string): Configuration file absolute path.
abs_sourcedir (string): ``sourcedir`` expanded as abs... | def commit(self, sourcedir, targetdir, abs_config, abs_sourcedir,
abs_targetdir):
config_path, config_filename = os.path.split(abs_config)
if not os.path.exists(config_path):
os.makedirs(config_path)
if not os.path.exists(abs_sourcedir):
os.makedi... | 1,033,421 |
Initialize app logger to configure its level/handler/formatter/etc..
Todo:
* A mean to raise click.Abort or sys.exit when CRITICAL is used;
Args:
level (str): Level name (``debug``, ``info``, etc..).
Keyword Arguments:
printout (bool): If False, logs will never be outputed.
R... | def init_logger(level, printout=True):
root_logger = logging.getLogger("boussole")
root_logger.setLevel(level)
# Redirect outputs to the void space, mostly for usage within unittests
if not printout:
from io import StringIO
dummystream = StringIO()
handler = logging.StreamH... | 1,033,441 |
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its... | def compile_dependencies(self, sourcepath, include_self=False):
items = self.inspector.parents(sourcepath)
# Also add the current event related path
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items]) | 1,033,535 |
Called when a file or a directory is moved or renamed.
Many editors don't directly change a file, instead they make a
transitional file like ``*.part`` then move it to the final filename.
Args:
event: Watchdog event, either ``watchdog.events.DirMovedEvent`` or
``wat... | def on_moved(self, event):
if not self._event_error:
# We are only interested for final file, not transitional file
# from editors (like *.part)
pathtools_options = {
'included_patterns': self.patterns,
'excluded_patterns': self.ignore... | 1,033,536 |
Called when a file or directory is modified.
Args:
event: Watchdog event, ``watchdog.events.DirModifiedEvent`` or
``watchdog.events.FileModifiedEvent``. | def on_modified(self, event):
if not self._event_error:
self.logger.info(u"Change detected from an edit on: %s",
event.src_path)
self.compile_dependencies(event.src_path) | 1,033,538 |
Called when a file or directory is deleted.
Todo:
May be bugged with inspector and sass compiler since the does not
exists anymore.
Args:
event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or
``watchdog.events.FileDeletedEvent``. | def on_deleted(self, event):
if not self._event_error:
self.logger.info(u"Change detected from deletion of: %s",
event.src_path)
# Never try to compile the deleted source
self.compile_dependencies(event.src_path, include_self=False) | 1,033,539 |
Convert a traceback (i.e. as returned by `tracebacks()`) into an alignment
(i.e. as returned by `align`).
Arguments:
tb: A traceback.
a: the sequence defining the rows in the traceback matrix.
b: the sequence defining the columns in the traceback matrix.
Returns: An iterable of (index, i... | def _traceback_to_alignment(tb, a, b):
# We subtract 1 from the indices here because we're translating from the
# alignment matrix space (which has one extra row and column) to the space
# of the input sequences.
for idx, direction in tb:
if direction == Direction.DIAG:
yield (i... | 1,033,629 |
Dump settings content to filepath.
Args:
content (str): Settings content.
filepath (str): Settings file location. | def dump(self, content, filepath, indent=4):
with open(filepath, 'w') as fp:
json.dump(content, fp, indent=indent) | 1,033,645 |
Parse opened settings content using JSON parser.
Args:
filepath (str): Settings object, depends from backend
content (str): Settings content from opened file, depends from
backend.
Raises:
boussole.exceptions.SettingsBackendError: If parser can not d... | def parse(self, filepath, content):
try:
parsed = json.loads(content)
except ValueError:
msg = "No JSON object could be decoded from file: {}"
raise SettingsBackendError(msg.format(filepath))
return parsed | 1,033,646 |
Open a SCSS file (sourcepath) and find all involved file through
imports.
This will fill internal buffers ``_CHILDREN_MAP`` and ``_PARENTS_MAP``.
Args:
sourcepath (str): Source file path to start searching for imports.
Keyword Arguments:
library_paths (list): L... | def look_source(self, sourcepath, library_paths=None):
# Don't inspect again source that has allready be inspected as a
# children of a previous source
if sourcepath not in self._CHILDREN_MAP:
with io.open(sourcepath, 'r', encoding='utf-8') as fp:
finded_path... | 1,033,686 |
Recursively find all children that are imported from the given source
path.
Args:
sourcepath (str): Source file path to search for.
Keyword Arguments:
recursive (bool): Switch to enabled recursive finding (if True).
Default to True.
Returns:
... | def children(self, sourcepath, recursive=True):
return self._get_recursive_dependancies(
self._CHILDREN_MAP,
sourcepath,
recursive=True
) | 1,033,689 |
Recursively find all parents that import the given source path.
Args:
sourcepath (str): Source file path to search for.
Keyword Arguments:
recursive (bool): Switch to enabled recursive finding (if True).
Default to True.
Returns:
set: List o... | def parents(self, sourcepath, recursive=True):
return self._get_recursive_dependancies(
self._PARENTS_MAP,
sourcepath,
recursive=True
) | 1,033,690 |
Initialize a spor repository in `path` if one doesn't already exist.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: A repository already exists at `pat... | def initialize_repository(path, spor_dir='.spor'):
path = pathlib.Path(path)
spor_path = path / spor_dir
if spor_path.exists():
raise ValueError('spor directory already exists: {}'.format(spor_path))
spor_path.mkdir()
return Repository(path, spor_dir) | 1,033,726 |
Open an existing repository.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: No repository is found. | def open_repository(path, spor_dir='.spor'):
root = _find_root_dir(path, spor_dir)
return Repository(root, spor_dir) | 1,033,727 |
Get an Anchor by ID.
Args:
anchor_id: The ID of the anchor to retrieve.
Returns: An anchor instance.
Raises:
KeyError: The anchor can not be found. | def __getitem__(self, anchor_id):
file_path = self._anchor_path(anchor_id)
try:
with file_path.open(mode='rt') as handle:
return load_anchor(handle, self.root)
except OSError:
raise KeyError('No anchor with id {}'.format(anchor_id)) | 1,033,731 |
Update an anchor.
This will update an existing anchor if it exists, or it will create new
storage if not.
Args:
anchor_id: The ID of the anchor to update.
anchor: The anchor to store. | def __setitem__(self, anchor_id, anchor):
with self._anchor_path(anchor_id).open(mode='wt') as f:
save_anchor(f, anchor, self.root) | 1,033,732 |
Remove an anchor from storage.
Args:
anchor_id: The ID of the anchor to remove.
Raises:
KeyError: There is no anchor with that ID. | def __delitem__(self, anchor_id):
try:
self._anchor_path(anchor_id).unlink()
except OSError:
raise KeyError('No anchor with id {}'.format(anchor_id)) | 1,033,733 |
Patch a path to expand home directory and make absolute path.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Path to patch.
Returns:
str: Patched path to an absolute path. | def _patch_expand_path(self, settings, name, value):
if os.path.isabs(value):
return os.path.normpath(value)
# Expand home directory if any
value = os.path.expanduser(value)
# If the path is not yet an absolute directory, make it so from base
# directory if... | 1,034,146 |
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
list.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (list): List of paths to patch.
Returns:
list: Patched path list to an absolute path. | def _patch_expand_paths(self, settings, name, value):
return [self._patch_expand_path(settings, name, item)
for item in value] | 1,034,147 |
Validate path exists
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Path to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If path does not exists.
Returns:
str: Validated path. | def _validate_path(self, settings, name, value):
if not os.path.exists(value):
raise SettingsInvalidError("Path from setting '{name}' does not "
"exists: {value}".format(
name=name,
... | 1,034,148 |
Apply ``SettingsPostProcessor._validate_path`` to each element in
list.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (list): List of paths to patch.
Raises:
boussole.exceptions.SettingsInvalidError: Once a path does no... | def _validate_paths(self, settings, name, value):
return [self._validate_path(settings, name, item)
for item in value] | 1,034,149 |
Validate a required setting (value can not be empty)
Args:
settings (dict): Current settings.
name (str): Setting name.
value (str): Required value to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If value is empty.
Returns:
... | def _validate_required(self, settings, name, value):
if not value:
raise SettingsInvalidError(("Required value from setting '{name}' "
"must not be "
"empty.").format(name=name))
return value | 1,034,150 |
Looks for a configuration file in 3 locations:
- the current directory
- the user config directory (~/.config/scriptabit)
- the version installed with the package (using setuptools resource API)
Args:
basename (str): The base filename.
Returns:
str: The full path to th... | def get_config_file(basename):
locations = [
os.path.join(os.curdir, basename),
os.path.join(
os.path.expanduser("~"),
".config",
"scriptabit",
basename),
resource_filename(
Requirement.parse("scriptabit"),
os.path.... | 1,034,325 |
Copies the default configuration file into the user config directory.
Args:
basename (str): The base filename.
clobber (bool): If True, the default will be written even if a user
config already exists.
dst_dir (str): The destination directory. | def copy_default_config_to_user_directory(
basename,
clobber=False,
dst_dir='~/.config/scriptabit'):
dst_dir = os.path.expanduser(dst_dir)
dst = os.path.join(dst_dir, basename)
src = resource_filename(
Requirement.parse("scriptabit"),
os.path.join('scriptabit', b... | 1,034,326 |
Export ranking to a file.
Args:
template_file_name (str): where is the template
(moustache template)
output_file_name (str): where create the file with the ranking
sort (str): field to sort the users | def export(self, template_file_name, output_file_name,
sort="public", data=None, limit=0):
exportedData = {}
exportedUsers = self.__exportUsers(sort, limit)
exportedData["users"] = exportedUsers
exportedData["extraData"] = data
with open(template_file_na... | 1,034,498 |
Encode bytes/strings to base64.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- byte_str encoded as base64. | def enbase64(byte_str):
# Python 3: base64.b64encode() expects type byte
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, 'utf-8')
return base64.b64encode(byte_str) | 1,034,514 |
Decode base64 encoded bytes/strings.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- decoded string as type str for python2 and type byte for python3. | def debase64(byte_str):
# Python 3: base64.b64decode() expects type byte
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, 'utf-8')
return base64.b64decode(byte_str) | 1,034,515 |
Given a password, hash, salt this function verifies the password is equal to hash/salt.
Args:
- ``password``: The password to perform check on.
Returns:
- ``bool`` | def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64):
candidate_hash = generate_password_hash(password, salt, N, r, p, buflen)
return safe_str_cmp(password_hash, candidate_hash) | 1,034,517 |
Unquote given rule.
Args:
content (str): An import rule.
Raises:
InvalidImportRule: Raise exception if the rule is badly quoted
(not started or not ended quotes).
Returns:
string: The given rule unquoted. | def strip_quotes(self, content):
error_msg = "Following rule is badly quoted: {}"
if (content.startswith('"') and content.endswith('"')) or \
(content.startswith("'") and content.endswith("'")):
return content[1:-1]
# Quote starting but not ended
elif (con... | 1,034,757 |
Flatten returned import rules from regex.
Because import rules can contains multiple items in the same rule
(called multiline import rule), the regex ``REGEX_IMPORT_RULE``
return a list of unquoted items for each rule.
Args:
declarations (list): A SCSS source.
Retu... | def flatten_rules(self, declarations):
rules = []
for protocole, paths in declarations:
# If there is a protocole (like 'url), drop it
if protocole:
continue
# Unquote and possibly split multiple rule in the same declaration
rules... | 1,034,758 |
Parse a stylesheet document with a regex (``REGEX_IMPORT_RULE``)
to extract all import rules and return them.
Args:
content (str): A SCSS source.
Returns:
list: Finded paths in import rules. | def parse(self, content):
# Remove all comments before searching for import rules, to not catch
# commented breaked import rules
declarations = self.REGEX_IMPORT_RULE.findall(
self.remove_comments(content)
)
return self.flatten_rules(declarations) | 1,034,759 |
Check if file is a Sass partial source (see
`Sass partials Reference`_).
Args:
filepath (str): A file path. Can be absolute, relative or just a
filename.
Returns:
bool: True if file is a partial source, else False. | def is_partial(self, filepath):
path, filename = os.path.split(filepath)
return filename.startswith('_') | 1,034,809 |
Change final filename extension.
Args:
filepath (str): A file path (relative or absolute).
new_extension (str): New extension name (without leading dot) to
apply.
Returns:
str: Filepath with new extension. | def change_extension(self, filepath, new_extension):
filename, ext = os.path.splitext(filepath)
return '.'.join([filename, new_extension]) | 1,034,812 |
Return destination path from given source file path.
Destination is allways a file with extension ``.css``.
Args:
filepath (str): A file path. The path is allways relative to
sources directory. If not relative, ``targetdir`` won't be
joined.
abso... | def get_destination(self, filepath, targetdir=None):
dst = self.change_extension(filepath, 'css')
if targetdir:
dst = os.path.join(targetdir, dst)
return dst | 1,034,813 |
Gather data for instances matching args and call display func.
Args:
options (object): contains args and data from parser. | def cmd_list(options):
(i_info, param_str) = gather_data(options)
if i_info:
awsc.get_all_aminames(i_info)
param_str = "Instance List - " + param_str + "\n"
list_instances(i_info, param_str)
else:
print("No instances found with parameters: {}".format(param_str)) | 1,034,869 |
Start or Stop the specified instance.
Finds instances that match args and instance-state expected by the
command. Then, the target instance is determined, the action is
performed on the instance, and the eturn information is displayed.
Args:
options (object): contains args and data from parse... | def cmd_startstop(options):
statelu = {"start": "stopped", "stop": "running"}
options.inst_state = statelu[options.command]
debg.dprint("toggle set state: ", options.inst_state)
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
... | 1,034,870 |
Connect to the specified instance via ssh.
Finds instances that match the user specified args that are also
in the 'running' state. The target instance is determined, the
required connection information is retreived (IP, key and ssh
user-name), then an 'ssh' connection is made to the instance.
Ar... | def cmd_ssh(options):
import os
import subprocess
from os.path import expanduser
options.inst_state = "running"
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
home_dir = expanduser("~")
if options.user is None:
... | 1,034,871 |
Calculate instance login-username based on image-name.
Args:
tar_aminame (str): name of the image instance created with.
inst_name (str): name of the instance.
Returns:
username (str): name for ssh based on AMI-name. | def cmd_ssh_user(tar_aminame, inst_name):
if tar_aminame == "Unknown":
tar_aminame = inst_name
# first 5 chars of AMI-name can be anywhere in AMI-Name
userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root",
"cento": "centos", "openb": "root"}
usertemp = ['name'] + [val... | 1,034,872 |
Generate dictionary of results from query.
Decodes the large dict recturned from the AWS query.
Args:
qry_results (dict): results from awsc.get_inst_info
Returns:
i_info (dict): information on instances and details. | def process_results(qry_results):
i_info = {}
for i, j in enumerate(qry_results['Reservations']):
i_info[i] = {'id': j['Instances'][0]['InstanceId']}
i_info[i]['state'] = j['Instances'][0]['State']['Name']
i_info[i]['ami'] = j['Instances'][0]['ImageId']
i_info[i]['ssh_key'] ... | 1,034,874 |
Create query from the args specified and command chosen.
Creates a query string that incorporates the args in the options
object, and creates the title for the 'list' function.
Args:
options (object): contains args and data from parser
Returns:
qry_string (str): the query to be used ag... | def qry_create(options):
qry_string = filt_end = param_str = ""
filt_st = "Filters=["
param_str_default = "All"
if options.id:
qry_string += "InstanceIds=['%s']" % (options.id)
param_str += "id: '%s'" % (options.id)
param_str_default = ""
if options.instname:
(... | 1,034,876 |
Display a list of all instances and their details.
Iterates through all the instances in the dict, and displays
information for each instance.
Args:
i_info (dict): information on instances and details.
param_str (str): the title to display before the list.
numbered (bool): optional... | def list_instances(i_info, param_str, numbered=False):
print(param_str)
for i in i_info:
if numbered:
print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM))
print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}".
format(C_TI, C_NORM, C_STAT[i_inf... | 1,034,878 |
Display list of instances matching args and ask user to select target.
Instance list displayed and user asked to enter the number corresponding
to the desired target instance, or '0' to abort.
Args:
i_info (dict): information on instances and details.
command (str): command specified on th... | def user_picklist(i_info, command):
valid_entry = False
awsc.get_all_aminames(i_info)
list_instances(i_info, "", True)
msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})"
" [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI,
comm... | 1,034,881 |
prepare compliant, normalized metadata from inputs
Args:
kwargs: key-value pairs for metadata fields.
Raises:
InvalidDatalakeMetadata if required fields are missing and cannot
be inferred. | def __init__(self, *args, **kwargs):
# we want to own all of our bits so we can normalize them without
# altering the caller's data unexpectedly. So deepcopy.
args = deepcopy(args)
kwargs = deepcopy(kwargs)
super(Metadata, self).__init__(*args, **kwargs)
self._en... | 1,034,932 |
Write given content to destination path.
It will create needed directory structure first if it contain some
directories that does not allready exists.
Args:
content (str): Content to write to target file.
destination (str): Destination path for target file.
Ret... | def write_content(self, content, destination):
directory = os.path.dirname(destination)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with io.open(destination, 'w', encoding='utf-8') as f:
f.write(content)
return destination | 1,035,160 |
Check and return the final filepath to settings
Args:
path (str): Directory path where to search for settings file.
filename (str): Filename to use to search for settings file.
Raises:
boussole.exceptions.SettingsBackendError: If determined filepath
... | def check_filepath(self, path, filename):
settings_path = os.path.join(path, filename)
if not os.path.exists(settings_path) or \
not os.path.isfile(settings_path):
msg = "Unable to find settings file: {}"
raise SettingsBackendError(msg.format(settings_path))
... | 1,035,230 |
Open settings backend to return its content
Args:
filepath (str): Settings object, depends from backend
Returns:
string: File content. | def open(self, filepath):
with io.open(filepath, 'r', encoding='utf-8') as fp:
content = fp.read()
return content | 1,035,231 |
Initializes a new instance of the :see AsarArchive class.
Args:
filename (str):
The path to the *.asar file to read/write from/to.
asarfile (File):
A open *.asar file object.
files (dict):
Dictionary of files contained in the... | def __init__(self, filename, asarfile, files, baseoffset):
self.filename = filename
self.asarfile = asarfile
self.files = files
self.baseoffset = baseoffset | 1,035,470 |
Extracts the contents of the archive to the specifed directory.
Args:
destination (str):
Path to an empty directory to extract the files to. | def extract(self, destination):
if os.path.exists(destination):
raise OSError(20, 'Destination exists', destination)
self.__extract_directory(
'.',
self.files['files'],
destination
) | 1,035,471 |
Extracts a single directory to the specified directory on disk.
Args:
path (str):
Relative (to the root of the archive) path of the directory
to extract.
files (dict):
A dictionary of files from a *.asar file header.
destinat... | def __extract_directory(self, path, files, destination):
# assures the destination directory exists
destination_path = os.path.join(destination, path)
if not os.path.exists(destination_path):
os.makedirs(destination_path)
for name, contents in files.items():
... | 1,035,472 |
Extracts the specified file to the specified destination.
Args:
path (str):
Relative (to the root of the archive) path of the
file to extract.
fileinfo (dict):
Dictionary containing the offset and size of the file
(Extract... | def __extract_file(self, path, fileinfo, destination):
if 'offset' not in fileinfo:
self.__copy_extracted(path, destination)
return
self.asarfile.seek(
self.__absolute_offset(fileinfo['offset'])
)
# TODO: read in chunks, ain't going to read... | 1,035,473 |
Copies a file that was already extracted to the destination directory.
Args:
path (str):
Relative (to the root of the archive) of the file to copy.
destination (str):
Directory to extract the archive to. | def __copy_extracted(self, path, destination):
unpacked_dir = self.filename + '.unpacked'
if not os.path.isdir(unpacked_dir):
LOGGER.warn(
'Failed to copy extracted file %s, no extracted dir',
path
)
return
source_pa... | 1,035,474 |
Opens a *.asar file and constructs a new :see AsarArchive instance.
Args:
filename (str):
Path to the *.asar file to open for reading.
Returns (AsarArchive):
An insance of of the :AsarArchive class or None if reading failed. | def open(cls, filename):
asarfile = open(filename, 'rb')
# uses google's pickle format, which prefixes each field
# with its total length, the first field is a 32-bit unsigned
# integer, thus 4 bytes, we know that, so we skip it
asarfile.seek(4)
header_size = ... | 1,035,476 |
Check that at least one candidate exist into a directory.
Args:
basepath (str): Directory path where to search for candidate.
candidates (list): List of candidate file paths.
Returns:
list: List of existing candidates. | def check_candidate_exists(self, basepath, candidates):
checked = []
for item in candidates:
abspath = os.path.join(basepath, item)
if os.path.exists(abspath):
checked.append(abspath)
return checked | 1,035,694 |
Dump settings content to filepath.
Args:
content (str): Settings content.
filepath (str): Settings file location. | def dump(self, content, filepath, indent=4):
with open(filepath, 'w') as fp:
pyaml.dump(content, dst=fp, indent=indent) | 1,035,696 |
Parse opened settings content using YAML parser.
Args:
filepath (str): Settings object, depends from backend
content (str): Settings content from opened file, depends from
backend.
Raises:
boussole.exceptions.SettingsBackendError: If parser can not d... | def parse(self, filepath, content):
try:
parsed = yaml.load(content)
except yaml.YAMLError as exc:
msg = "No YAML object could be decoded from file: {}\n{}"
raise SettingsBackendError(msg.format(filepath, exc))
return parsed | 1,035,697 |
Get the header lines of a vcf file
Args:
source(iterable): A vcf file
Returns:
head (HeaderParser): A headerparser object | def get_vcf_header(source):
head = HeaderParser()
#Parse the header lines
for line in source:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('##'):
logger.debug("Found metadata line {0}".format(line))
head.parse_meta_data(lin... | 1,035,730 |
Constructor.
Args:
xml (str/file, default None): XML to be parsed. May be file-like
object.
resort (bool, default True): Sort the output alphabetically? | def __init__(self, xml=None, resort=True):
self.leader = None
self.oai_marc = False
self.controlfields = OrderedDict()
self.datafields = OrderedDict()
self.valid_i_chars = set(list(" 0123456789*"))
# resort output XML alphabetically
self.resorted = tools... | 1,035,738 |
Parse MARC XML document to dicts, which are contained in
self.controlfields and self.datafields.
Args:
xml (str or HTMLElement): input data
Also detect if this is oai marc format or not (see elf.oai_marc). | def _parse_string(self, xml):
if not isinstance(xml, HTMLElement):
xml = dhtmlparser.parseString(str(xml))
# check if there are any records
record = xml.find("record")
if not record:
raise ValueError("There is no <record> in your MARC XML document!")
... | 1,035,739 |
Parse control fields.
Args:
fields (list): list of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id". | def _parse_control_fields(self, fields, tag_id="tag"):
for field in fields:
params = field.params
# skip tags without parameters
if tag_id not in params:
continue
self.controlfields[params[tag_id]] = field.getContent().strip() | 1,035,740 |
Parse data fields.
Args:
fields (list): of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id"
sub_id (str): id of parameter, which h... | def _parse_data_fields(self, fields, tag_id="tag", sub_id="code"):
for field in fields:
params = field.params
if tag_id not in params:
continue
# take care of iX/indX (indicator) parameters
field_repr = OrderedDict([
[sel... | 1,035,741 |
This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
s... | def get_i_name(self, num, is_oai=None):
if num not in (1, 2):
raise ValueError("`num` parameter have to be 1 or 2!")
if is_oai is None:
is_oai = self.oai_marc
i_name = "ind" if not is_oai else "i"
return i_name + str(num) | 1,035,744 |
Method wrapper over :attr:`.controlfields` dictionary.
Args:
controlfield (str): Name of the controlfield.
alt (object, default None): Alternative value of the `controlfield`
when `controlfield` couldn't be found.
Returns:
str: record from given `con... | def get_ctl_field(self, controlfield, alt=None):
if not alt:
return self.controlfields[controlfield]
return self.controlfields.get(controlfield, alt) | 1,035,745 |
report scores and give a winner
|methcoro|
Args:
winner: :class:Participant instance
scores_csv: Comma separated set/game scores with player 1 score first (e.g. "1-3,3-0,3-2")
Raises:
ValueError: scores_csv has a wrong format
APIException | async def report_winner(self, winner: Participant, scores_csv: str):
await self._report(scores_csv, winner._id) | 1,035,890 |
add a file as an attachment
|methcoro|
Warning:
|unstable|
Args:
file_path: path to the file you want to add
description: *optional* description for your attachment
Returns:
Attachment:
Raises:
ValueError: file_path... | async def attach_file(self, file_path: str, description: str = None) -> Attachment:
with open(file_path, 'rb') as f:
return await self._attach(f.read(), description) | 1,035,894 |
add an url as an attachment
|methcoro|
Args:
url: url you want to add
description: *optional* description for your attachment
Returns:
Attachment:
Raises:
ValueError: url must not be None
APIException | async def attach_url(self, url: str, description: str = None) -> Attachment:
return await self._attach(url=url, description=description) | 1,035,895 |
destroy a match attachment
|methcoro|
Args:
a: the attachment you want to destroy
Raises:
APIException | async def destroy_attachment(self, a: Attachment):
await self.connection('DELETE', 'tournaments/{}/matches/{}/attachments/{}'.format(self._tournament_id, self._id, a._id))
if a in self.attachments:
self.attachments.remove(a) | 1,035,896 |
Find the aggregated subobjects of an object.
These are the public attributes.
Args:
class_: The class whose subobjects to return.
Yields:
Tuples (name, type, required) describing subobjects. | def class_subobjects(
class_: Type) -> Generator[Tuple[str, Type, bool], None, None]:
argspec = inspect.getfullargspec(class_.__init__)
defaults = argspec.defaults if argspec.defaults else []
num_optional = len(defaults)
first_optional = len(argspec.args) - num_optional
for i, attr_nam... | 1,035,905 |
__init__
Defines attributes for Personator Object.
Args:
custID (str): ID for Melissa Data account | def __init__(self, custID):
self.custID = custID
self.addr1 = None
self.addr2 = None
self.city = None
self.postal = None
self.province = None
self.country = None
self.name = None
self.phone = None
self.recordID = None | 1,036,042 |
parse_results
Parses the MelissaData response.
Args:
data (dict): Contains MelissaData response
Returns:
results, either contains a dict with corrected address info or -1 for an invalid address. | def parse_results(self, data):
results = []
if len(data["Records"]) < 1:
return -1
codes = data["Records"][0]["Results"]
for code in codes.split(","):
results.append(str(code))
self.addr1 = data["Records"][0]["AddressLine1"]
self.addr2 =... | 1,036,044 |
Executes given functions with given models.
Args:
models: models to execute
func: function name to execute
Returns: | def creating_schema_and_index(self, models, func):
waiting_models = []
self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads)
if waiting_models:
print("WAITING MODELS ARE CHECKING...")
self.creating_schema_and_index(waiting_models, fu... | 1,036,281 |
Creates search schemas.
Args:
model: model to execute
waiting_models: if riak can't return response immediately, model is taken to queue.
After first execution session, method is executed with waiting models and controlled.
And be ensured that all given models ar... | def create_schema(self, model, waiting_models):
bucket_name = model._get_bucket_name()
index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
ins = model(fake_context)
fields = self.get_schema_fields(ins._collect_index_fields())
new_schema = self.compile_sche... | 1,036,282 |
Creates search indexes.
Args:
model: model to execute
waiting_models: if riak can't return response immediately, model is taken to queue.
After first execution session, method is executed with waiting models and controlled.
And be ensured that all given models ar... | def create_index(self, model, waiting_models):
bucket_name = model._get_bucket_name()
bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
bucket = bucket_type.bucket(bucket_name)
try:
... | 1,036,283 |
Iterate over all ``<record>`` tags in `xml`.
Args:
xml (str/file): Input string with XML. UTF-8 is prefered encoding,
unicode should be ok.
Yields:
MARCXMLRecord: For each corresponding ``<record>``. | def record_iterator(xml):
# handle file-like objects
if hasattr(xml, "read"):
xml = xml.read()
dom = None
try:
dom = dhtmlparser.parseString(xml)
except UnicodeError:
dom = dhtmlparser.parseString(xml.encode("utf-8"))
for record_xml in dom.findB("record"):
... | 1,036,287 |
saves the model instance to riak
Args:
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dol... | def save_model(self, model, meta_data=None, index_fields=None):
return self.adapter.save_model(model, meta_data, index_fields) | 1,036,298 |
Creates a model instance with the given data.
Args:
data: Model data returned from DB.
key: Object key
Returns:
pyoko.Model object. | def _make_model(self, data, key=None):
if data['deleted'] and not self.adapter.want_deleted:
raise ObjectDoesNotExist('Deleted object returned')
model = self._model_class(self._current_context,
_pass_perm_checks=self._pass_perm_checks)
mode... | 1,036,299 |
Applies query filters for excluding matching records from result set.
Args:
**filters: Query filters as keyword arguments.
Returns:
Self. Queryset object.
Examples:
>>> Person.objects.exclude(age=None)
>>> Person.objects.filter(name__startswith=... | def exclude(self, **filters):
exclude = {'-%s' % key: value for key, value in filters.items()}
return self.filter(**exclude) | 1,036,302 |
Deletes an object if it exists in database according to given query
parameters and returns True otherwise does nothing and returns False.
Args:
**kwargs: query parameters
Returns(bool): True or False | def delete_if_exists(self, **kwargs):
try:
self.get(**kwargs).blocking_delete()
return True
except ObjectDoesNotExist:
return False | 1,036,304 |
Returns list of dicts (field names as keys) for given fields.
Args:
\*args: List of fields to be returned as dict.
Returns:
list of dicts for given fields.
Example:
>>> Person.objects.filter(age__gte=16, name__startswith='jo').values('name', 'lastname') | def values(self, *args):
return [dict(zip(args, values_list))
for values_list in self.values_list(flatten=False, *args)] | 1,036,309 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.