code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def is_taps_aff(self):
request = requests.get('https://www.taps-aff.co.uk/api/%s' % self.location)
if request.status_code == 200:
try:
taps = request.json()['taps']['status']
if taps == 'aff':
return True
elif taps == 'oan':
return False
else:
raise RuntimeError("Unexpected taps value: %s" % taps)
except ValueError:
raise RuntimeError("Unexpected response from service")
else:
raise IOError("Failure downloading from Api") | Returns True if taps aff for this location |
def checkAndCreate(self, key, payload,
hostgroupConf,
hostgroupParent,
puppetClassesId):
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
if 'classes' in hostgroupConf.keys():
classList = list()
for c in hostgroupConf['classes']:
classList.append(puppetClassesId[c])
if not self[key].checkAndCreateClasses(classList):
print("Failed in classes")
return False
if 'params' in hostgroupConf.keys():
if not self[key].checkAndCreateParams(hostgroupConf['params']):
print("Failed in params")
return False
return oid | Function checkAndCreate
check And Create procedure for an hostgroup
- check the hostgroup is not existing
- create the hostgroup
- Add puppet classes from puppetClassesId
- Add params from hostgroupConf
@param key: The hostgroup name or ID
@param payload: The description of the hostgroup
@param hostgroupConf: The configuration of the host group from the
foreman.conf
@param hostgroupParent: The id of the parent hostgroup
@param puppetClassesId: The dict of puppet classes ids in foreman
@return RETURN: The ItemHostsGroup object of an host |
def instance(*args, **kwargs):
if not hasattr(Config, "_instance") or Config._instance is None:
Config._instance = Config(*args, **kwargs)
return Config._instance | Singleton to return only one instance of Config.
:returns: instance of Config |
def deploy_ext(self):
if self.mods.get('file'):
self.shell.send(
self.mods['file'],
os.path.join(self.thin_dir, 'salt-ext_mods.tgz'),
)
return True | Deploy the ext_mods tarball |
def in_to_out(self, in_path, out_path=None):
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
logger.debug(f"opening source file: {in_path}")
with open(in_path) as infile:
obj = self.object_representer.load(infile)
if out_path:
logger.debug(
f"opening destination file for writing: {out_path}")
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
logger.debug(f"moving temp file to: {in_path}")
move_temp_file(outfile.name, infile.name) | Load file into object, formats, writes object to out.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None. |
def get_meta_references(self, datas):
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names | Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names. |
def _pretrain_layer_and_gen_feed(self, layer_obj, set_params_func,
train_set, validation_set, graph):
layer_obj.fit(train_set, train_set,
validation_set, validation_set, graph=graph)
with graph.as_default():
set_params_func(layer_obj, graph)
next_train = layer_obj.transform(train_set, graph=graph)
if validation_set is not None:
next_valid = layer_obj.transform(validation_set, graph=graph)
else:
next_valid = None
return next_train, next_valid | Pretrain a single autoencoder and encode the data for the next layer.
:param layer_obj: layer model
:param set_params_func: function used to set the parameters after
pretraining
:param train_set: training set
:param validation_set: validation set
:param graph: tf object for the rbm
:return: encoded train data, encoded validation data |
async def release_key(self, key, chat=None, user=None):
if not self.storage.has_bucket():
raise RuntimeError('This storage does not provide Leaky Bucket')
if user is None and chat is None:
user = types.User.get_current()
chat = types.Chat.get_current()
bucket = await self.storage.get_bucket(chat=chat, user=user)
if bucket and key in bucket:
del bucket['key']
await self.storage.set_bucket(chat=chat, user=user, bucket=bucket)
return True
return False | Release blocked key
:param key:
:param chat:
:param user:
:return: |
def _get_cursor(self):
_options = self._get_options()
conn = MySQLdb.connect(host=_options['host'],
user=_options['user'],
passwd=_options['pass'],
db=_options['db'], port=_options['port'],
ssl=_options['ssl'])
cursor = conn.cursor()
try:
yield cursor
except MySQLdb.DatabaseError as err:
log.exception('Error in ext_pillar MySQL: %s', err.args)
finally:
conn.close() | Yield a MySQL cursor |
def titles2marc(self, key, values):
first, rest = values[0], values[1:]
self.setdefault('245', []).append({
'a': first.get('title'),
'b': first.get('subtitle'),
'9': first.get('source'),
})
return [
{
'a': value.get('title'),
'b': value.get('subtitle'),
'9': value.get('source'),
} for value in rest
] | Populate the ``246`` MARC field.
Also populates the ``245`` MARC field through side effects. |
def complete_pool_members(arg):
res = []
for member in Prefix.list({ 'pool_id': pool.id }):
res.append(member.prefix)
return _complete_string(arg, res) | Complete member prefixes of pool |
def main():
command = Command.lookup(args.get(0))
if len(args) == 0 or args.contains(('-h', '--help', 'help')):
display_info(args)
sys.exit(1)
elif args.contains(('-v', '--version')):
display_version()
sys.exit(1)
elif command:
arg = args.get(0)
args.remove(arg)
command.__call__(command, args)
sys.exit()
else:
show_error(colored.red('Error! Unknown command \'{0}\'.\n'
.format(args.get(0))))
display_info(args)
sys.exit(1) | Primary Tarbell command dispatch. |
def _ioctl(self, func, arg):
if self._fd is None:
raise WatchdogError("Watchdog device is closed")
if os.name != 'nt':
import fcntl
fcntl.ioctl(self._fd, func, arg, True) | Runs the specified ioctl on the underlying fd.
Raises WatchdogError if the device is closed.
Raises OSError or IOError (Python 2) when the ioctl fails. |
def get_state(self, sls, saltenv, cachedir=None):
if '.' in sls:
sls = sls.replace('.', '/')
sls_url = salt.utils.url.create(sls + '.sls')
init_url = salt.utils.url.create(sls + '/init.sls')
for path in [sls_url, init_url]:
dest = self.cache_file(path, saltenv, cachedir=cachedir)
if dest:
return {'source': path, 'dest': dest}
return {} | Get a state file from the master and store it in the local minion
cache; return the location of the file |
def add_child(self, id_, child_id):
if bool(self._rls.get_relationships_by_genus_type_for_peers(id_, child_id, self._relationship_type).available()):
raise errors.AlreadyExists()
rfc = self._ras.get_relationship_form_for_create(id_, child_id, [])
rfc.set_display_name(str(id_) + ' to ' + str(child_id) + ' Parent-Child Relationship')
rfc.set_description(self._relationship_type.get_display_name().get_text() + ' relationship for parent: ' + str(id_) + ' and child: ' + str(child_id))
rfc.set_genus_type(self._relationship_type)
self._ras.create_relationship(rfc) | Adds a child to a ``Id``.
arg: id (osid.id.Id): the ``Id`` of the node
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``child_id`` is already a child of
``id``
raise: NotFound - ``id`` or ``child_id`` not found
raise: NullArgument - ``id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def steal_page(self, page):
if page.doc == self:
return
self.fs.mkdir_p(self.path)
new_page = ImgPage(self, self.nb_pages)
logger.info("%s --> %s" % (str(page), str(new_page)))
new_page._steal_content(page) | Steal a page from another document |
def get_dataset_meta(label):
data_url = data_urls[label]
if type(data_url) == str:
data_url = [data_url]
if type(data_url) == list:
data_url.extend([None, None, None, None])
data_url = data_url[:4]
url, expected_hash, hash_path, relative_donwload_dir = data_url
if hash_path is None:
hash_path = label
return data_url, url, expected_hash, hash_path, relative_donwload_dir | Gives you metadata for dataset chosen via 'label' param
:param label: label = key in data_url dict (that big dict containing all possible datasets)
:return: tuple (data_url, url, expected_hash, hash_path, relative_download_dir)
relative_download_dir says where will be downloaded the file from url and eventually unzipped |
async def async_delete_device(self, device_id: int) -> None:
device = self._devices[device_id]
response = await self._protocol.async_execute(
GetDeviceCommand(device.category, device.group_number, device.unit_number))
if isinstance(response, DeviceInfoResponse):
response = await self._protocol.async_execute(
DeleteDeviceCommand(device.category, response.index))
if isinstance(response, DeviceDeletedResponse):
self._devices._delete(device)
if self._on_device_deleted:
try:
self._on_device_deleted(self, device)
except Exception:
_LOGGER.error(
"Unhandled exception in on_device_deleted callback",
exc_info=True)
if isinstance(response, DeviceNotFoundResponse):
raise ValueError("Device to be deleted was not found") | Delete an enrolled device.
:param device_id: unique identifier for the device to be deleted |
def hbar_stack(self, stackers, **kw):
result = []
for kw in _double_stack(stackers, "left", "right", **kw):
result.append(self.hbar(**kw))
return result | Generate multiple ``HBar`` renderers for levels stacked left to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2106* and *2017*, then the following call to ``hbar_stack`` will
will create two ``HBar`` renderers that stack:
.. code-block:: python
p.hbar_stack(['2016', '2017'], x=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.hbar(bottom=stack(), top=stack('2016'), x=10, width=0.9, color='blue', source=source, name='2016')
p.hbar(bottom=stack('2016'), top=stack('2016', '2017'), x=10, width=0.9, color='red', source=source, name='2017') |
def initialize(self, size=0):
fs, path = self._get_fs()
if fs.exists(path):
fp = fs.open(path, mode='r+b')
else:
fp = fs.open(path, mode='wb')
try:
fp.truncate(size)
except Exception:
fp.close()
self.delete()
raise
finally:
fp.close()
self._size = size
return self.fileurl, size, None | Initialize file on storage and truncate to given size. |
def geopy_geolocator():
global geolocator
if geolocator is None:
try:
from geopy.geocoders import Nominatim
except ImportError:
return None
geolocator = Nominatim(user_agent=geolocator_user_agent)
return geolocator
return geolocator | Lazy loader for geocoder from geopy. This currently loads the
`Nominatim` geocode and returns an instance of it, taking ~2 us. |
def set_buf_size(fd):
if OS_PIPE_SZ and hasattr(fcntl, 'F_SETPIPE_SZ'):
fcntl.fcntl(fd, fcntl.F_SETPIPE_SZ, OS_PIPE_SZ) | Set up os pipe buffer size, if applicable |
def eat_string(self, string):
pos = self.pos
if self.eos or pos + len(string) > self.length:
return None
col = self.col
row = self.row
for char in string:
col += 1
pos += 1
if char == '\n':
col = 0
row += 1
self.pos = pos
self.col = col
self.row = row
if not self.has_space():
self.eos = 1 | Move current position by length of string and count lines by \n. |
def list():
infos = manager.get_all()
if not infos:
print("No known TensorBoard instances running.")
return
print("Known TensorBoard instances:")
for info in infos:
template = " - port {port}: {data_source} (started {delta} ago; pid {pid})"
print(template.format(
port=info.port,
data_source=manager.data_source_from_info(info),
delta=_time_delta_from_info(info),
pid=info.pid,
)) | Print a listing of known running TensorBoard instances.
TensorBoard instances that were killed uncleanly (e.g., with SIGKILL
or SIGQUIT) may appear in this list even if they are no longer
running. Conversely, this list may be missing some entries if your
operating system's temporary directory has been cleared since a
still-running TensorBoard instance started. |
def clear(self):
self.log(u"Clearing cache...")
for file_handler, file_info in self.cache.values():
self.log([u" Removing file '%s'", file_info])
gf.delete_file(file_handler, file_info)
self._initialize_cache()
self.log(u"Clearing cache... done") | Clear the cache and remove all the files from disk. |
def associate(self, floating_ip_id, port_id):
pid, ip_address = port_id.split('_', 1)
update_dict = {'port_id': pid,
'fixed_ip_address': ip_address}
self.client.update_floatingip(floating_ip_id,
{'floatingip': update_dict}) | Associates the floating IP to the port.
``port_id`` represents a VNIC of an instance.
``port_id`` argument is different from a normal neutron port ID.
A value passed as ``port_id`` must be one of target_id returned by
``list_targets``, ``get_target_by_instance`` or
``list_targets_by_instance`` method. |
def items(self, offset=None, limit=20, since=None, before=None, *args, **kwargs):
return ItemList(self, offset=offset, limit=limit, since=since, before=before, cached=self.is_cached) | Get a feed's items.
:param offset: Amount of items to skip before returning data
:param since: Return items added after this id (ordered old -> new)
:param before: Return items added before this id (ordered new -> old)
:param limit: Amount of items to return |
def add_note(self, content):
args = {
'project_id': self.id,
'content': content
}
_perform_command(self.owner, 'note_add', args) | Add a note to the project.
.. warning:: Requires Todoist premium.
:param content: The note content.
:type content: str
>>> from pytodoist import todoist
>>> user = todoist.login('john.doe@gmail.com', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.add_note('Remember to update to the latest version.') |
def Sign(self, data, signing_key, verify_key=None):
if signing_key.KeyLen() < 2048:
logging.warning("signing key is too short.")
self.signature = signing_key.Sign(data)
self.signature_type = self.SignatureType.RSA_PKCS1v15
self.digest = hashlib.sha256(data).digest()
self.digest_type = self.HashType.SHA256
self.data = data
if verify_key is None:
verify_key = signing_key.GetPublicKey()
self.Verify(verify_key)
return self | Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining. |
def log_normal(self, x):
d = self.mu.shape[0]
xc = x - self.mu
if len(x.shape) == 1:
exp_term = numpy.sum(numpy.multiply(xc, numpy.dot(self.inv, xc)))
else:
exp_term = numpy.sum(numpy.multiply(xc, numpy.dot(xc, self.inv)), axis=1)
return -.5 * (d * numpy.log(2 * numpy.pi) + numpy.log(self.det) + exp_term) | Returns the log density of probability of x or the one dimensional
array of all log probabilities if many vectors are given.
@param x : may be of (n,) shape |
def _validate_alias_command_level(alias, command):
alias_collision_table = AliasManager.build_collision_table([alias])
if not alias_collision_table:
return
command_collision_table = AliasManager.build_collision_table([command])
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
if set(alias_collision_levels) & set(command_collision_levels):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command)) | Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to. |
def asarray2d(a):
arr = np.asarray(a)
if arr.ndim == 1:
arr = arr.reshape(-1, 1)
return arr | Cast to 2d array |
def metadata_to_double_percent_options(metadata):
options = []
if 'cell_depth' in metadata:
options.append('%' * metadata.pop('cell_depth'))
if 'title' in metadata:
options.append(metadata.pop('title'))
if 'cell_type' in metadata:
options.append('[{}]'.format(metadata.pop('cell_type')))
metadata = metadata_to_json_options(metadata)
if metadata != '{}':
options.append(metadata)
return ' '.join(options) | Metadata to double percent lines |
def external_dependencies(self):
found = []
for dep in self.dependent_images:
if isinstance(dep, six.string_types):
if dep not in found:
yield dep
found.append(dep) | Return all the external images this Dockerfile will depend on
These are images from self.dependent_images that aren't defined in this configuration. |
def lock(self):
component = self.component
while True:
if isinstance(
component,
smartcard.pcsc.PCSCCardConnection.PCSCCardConnection):
hresult = SCardBeginTransaction(component.hcard)
if 0 != hresult:
raise CardConnectionException(
'Failed to lock with SCardBeginTransaction: ' +
SCardGetErrorMessage(hresult))
else:
pass
break
if hasattr(component, 'component'):
component = component.component
else:
break | Lock card with SCardBeginTransaction. |
def load_train_file(config_file_path):
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
for varname in ["PYLEARN2_TRAIN_FILE_NAME",
"PYLEARN2_TRAIN_FILE_FULL_STEM"]:
environ.putenv(varname, config_file_full_stem)
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
environ.putenv("PYLEARN2_TRAIN_DIR", directory)
environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] )
environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] )
return yaml_parse.load_path(config_file_path) | Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables |
def chainproperty(func):
func = assertionproperty(func)
setattr(AssertionBuilder, func.fget.__name__, func)
return func | Extend sure with a custom chain property. |
def config_insync(self):
status = self.get('config/insync').get('configInSync', False)
if status is None:
status = False
return status | Returns whether the config is in sync, i.e. whether the running
configuration is the same as that on disk.
Returns:
bool |
def create_hammersley_samples(order, dim=1, burnin=-1, primes=()):
if dim == 1:
return create_halton_samples(
order=order, dim=1, burnin=burnin, primes=primes)
out = numpy.empty((dim, order), dtype=float)
out[:dim-1] = create_halton_samples(
order=order, dim=dim-1, burnin=burnin, primes=primes)
out[dim-1] = numpy.linspace(0, 1, order+2)[1:-1]
return out | Create samples from the Hammersley set.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Args:
order (int):
The order of the Hammersley sequence. Defines the number of samples.
dim (int):
The number of dimensions in the Hammersley sequence.
burnin (int):
Skip the first ``burnin`` samples. If negative, the maximum of
``primes`` is used.
primes (tuple):
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
Returns:
(numpy.ndarray):
Hammersley set with ``shape == (dim, order)``. |
def _parse_roles(self):
roles = {}
for keystone_role, flask_role in self.config.roles.items():
roles.setdefault(flask_role, set()).add(keystone_role)
return roles | Generate a dictionary for configured roles from oslo_config.
Due to limitations in ini format, it's necessary to specify
roles in a flatter format than a standard dictionary. This
function serves to transform these roles into a standard
python dictionary. |
def memoized_property(fget):
attr_name = '_{}'.format(fget.__name__)
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized) | Decorator to create memoized properties. |
def amplitude(self, caldb, calv, atten=0):
amp = (10 ** (float(self._intensity+atten-caldb)/20)*calv)
return amp | Calculates the voltage amplitude for this stimulus, using
internal intensity value and the given reference intensity & voltage
:param caldb: calibration intensity in dbSPL
:type caldb: float
:param calv: calibration voltage that was used to record the intensity provided
:type calv: float |
def get_override_votes(self, obj):
if hasattr(obj, "meta"):
if obj.meta.override_ap_votes:
all_votes = None
for ce in obj.candidate_elections.all():
if all_votes:
all_votes = all_votes | ce.votes.all()
else:
all_votes = ce.votes.all()
return VotesSerializer(all_votes, many=True).data
return False | Votes entered into backend.
Only used if ``override_ap_votes = True``. |
def get_ssl(database):
if database['engine'] == 'postgresql':
keys = ['sslmode', 'sslcert', 'sslkey',
'sslrootcert', 'sslcrl', 'sslcompression']
else:
keys = ['ssl_ca', 'ssl_capath', 'ssl_cert', 'ssl_key',
'ssl_cipher', 'ssl_check_hostname']
ssl = {}
for key in keys:
value = database.get(key, None)
if value is not None:
ssl[key] = value
return ssl | Returns SSL options for the selected engine |
def agent_heartbeat(self, agent_id, metrics, run_states):
mutation = gql(
)
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) | Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute. |
def _finalise_figure(fig, **kwargs):
title = kwargs.get("title") or None
show = kwargs.get("show") or False
save = kwargs.get("save") or False
savefile = kwargs.get("savefile") or "EQcorrscan_figure.png"
return_fig = kwargs.get("return_figure") or False
if title:
fig.suptitle(title)
if show:
fig.show()
if save:
fig.savefig(savefile)
print("Saved figure to {0}".format(savefile))
if return_fig:
return fig
return None | Internal function to wrap up a figure.
Possible arguments:
:type title: str
:type show: bool
:type save: bool
:type savefile: str
:type return_figure: bool |
def _pad_block(self, handle):
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra)) | Pad the file with 0s to the end of the next block boundary. |
def add(self, doc):
array = doc.to_array(self.attrs)
if len(array.shape) == 1:
array = array.reshape((array.shape[0], 1))
self.tokens.append(array)
spaces = doc.to_array(SPACY)
assert array.shape[0] == spaces.shape[0]
spaces = spaces.reshape((spaces.shape[0], 1))
self.spaces.append(numpy.asarray(spaces, dtype=bool))
self.strings.update(w.text for w in doc) | Add a doc's annotations to the binder for serialization. |
def flush(cls, *args):
return _remove_keys([], [(cls._make_key(args) if args else cls.PREFIX) + '*']) | Removes all keys of this namespace
Without args, clears all keys starting with cls.PREFIX
if called with args, clears keys starting with given cls.PREFIX + args
Args:
*args: Arbitrary number of arguments.
Returns:
List of removed keys. |
def get_default_project_directory():
server_config = Config.instance().get_section_config("Server")
path = os.path.expanduser(server_config.get("projects_path", "~/GNS3/projects"))
path = os.path.normpath(path)
try:
os.makedirs(path, exist_ok=True)
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not create project directory: {}".format(e))
return path | Return the default location for the project directory
depending of the operating system |
def _mask(self, tensor, length, padding_value=0):
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked') | Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences. |
def zeros(shape, dtype=None, **kwargs):
if dtype is None:
dtype = _numpy.float32
return _internal._zeros(shape=shape, dtype=dtype, **kwargs) | Returns a new symbol of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array.
dtype : str or numpy.dtype, optional
The value type of the inner value, default to ``np.float32``.
Returns
-------
out : Symbol
The created Symbol. |
def register_pb_devices(num_pbs: int = 100):
tango_db = Database()
LOG.info("Registering PB devices:")
dev_info = DbDevInfo()
dev_info._class = 'ProcessingBlockDevice'
dev_info.server = 'processing_block_ds/1'
for index in range(num_pbs):
dev_info.name = 'sip_sdp/pb/{:05d}'.format(index)
LOG.info("\t%s", dev_info.name)
tango_db.add_device(dev_info) | Register PBs devices.
Note(BMo): Ideally we do not want to register any devices here. There
does not seem to be a way to create a device server with no registered
devices in Tango. This is (probably) because Tango devices must have been
registered before the server starts ... |
def prepare_intervals(data, region_file, work_dir):
target_file = os.path.join(work_dir, "%s-target.interval_list" % dd.get_sample_name(data))
if not utils.file_uptodate(target_file, region_file):
with file_transaction(data, target_file) as tx_out_file:
params = ["-T", "PreprocessIntervals", "-R", dd.get_ref_file(data),
"--interval-merging-rule", "OVERLAPPING_ONLY",
"-O", tx_out_file]
if dd.get_coverage_interval(data) == "genome":
params += ["--bin-length", "1000", "--padding", "0"]
else:
params += ["-L", region_file, "--bin-length", "0", "--padding", "250"]
_run_with_memory_scaling(params, tx_out_file, data)
return target_file | Prepare interval regions for targeted and gene based regions. |
def dir_list(self, tgt_env):
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return ret
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for blob in tree.traverse():
if isinstance(blob, git.Tree):
ret.add(add_mountpoint(relpath(blob.path)))
if self.mountpoint(tgt_env):
ret.add(self.mountpoint(tgt_env))
return ret | Get list of directories for the target environment using GitPython |
def add_frequency(self, name, value):
logger.debug("Adding frequency {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['frequencies'].append({'label': name, 'value': value}) | Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field |
def set_partition(self, partition):
assert len(partition) == self.numgrp
self.partition, self.prev_partition = partition, self.partition | Store the partition in self.partition, and
move the old self.partition into self.prev_partition |
def get_restored(self):
return self._header.initial.restore_time > 0, self._header.initial.restore_time | Check for restored game. |
def stream_subsegments(self):
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) | Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment. |
def prefix_iter(self, ns_uri):
ni = self.__lookup_uri(ns_uri)
return iter(ni.prefixes) | Gets an iterator over the prefixes for the given namespace. |
def pad(self, sid, date):
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
return
if last_date == pd.NaT:
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date) | Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date` |
def __format_error(self, error_list_tag):
error = {'domain': self.domain(),
'reason': self.reason(),
'message': self.message()}
error.update(self.extra_fields() or {})
return {'error': {error_list_tag: [error],
'code': self.status_code(),
'message': self.message()}} | Format this error into a JSON response.
Args:
error_list_tag: A string specifying the name of the tag to use for the
error list.
Returns:
A dict containing the reformatted JSON error response. |
def cache_node_list(nodes, provider, opts):
if 'update_cachedir' not in opts or not opts['update_cachedir']:
return
base = os.path.join(init_cachedir(), 'active')
driver = next(six.iterkeys(opts['providers'][provider]))
prov_dir = os.path.join(base, driver, provider)
if not os.path.exists(prov_dir):
os.makedirs(prov_dir)
missing_node_cache(prov_dir, nodes, provider, opts)
for node in nodes:
diff_node_cache(prov_dir, node, nodes[node], opts)
path = os.path.join(prov_dir, '{0}.p'.format(node))
mode = 'wb' if six.PY3 else 'w'
with salt.utils.files.fopen(path, mode) as fh_:
salt.utils.msgpack.dump(nodes[node], fh_, encoding=MSGPACK_ENCODING) | If configured to do so, update the cloud cachedir with the current list of
nodes. Also fires configured events pertaining to the node list.
.. versionadded:: 2014.7.0 |
def copy(self):
copy = JunctionTree(self.edges())
copy.add_nodes_from(self.nodes())
if self.factors:
factors_copy = [factor.copy() for factor in self.factors]
copy.add_factors(*factors_copy)
return copy | Returns a copy of JunctionTree.
Returns
-------
JunctionTree : copy of JunctionTree
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.models import JunctionTree
>>> G = JunctionTree()
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2))
>>> phi2 = DiscreteFactor(['a', 'c'], [1, 2], np.random.rand(2))
>>> G.add_factors(phi1,phi2)
>>> modelCopy = G.copy()
>>> modelCopy.edges()
[(('a', 'b'), ('a', 'b', 'c')), (('a', 'c'), ('a', 'b', 'c'))]
>>> G.factors
[<DiscreteFactor representing phi(a:1, b:2) at 0xb720ee4c>,
<DiscreteFactor representing phi(a:1, c:2) at 0xb4e1e06c>]
>>> modelCopy.factors
[<DiscreteFactor representing phi(a:1, b:2) at 0xb4bd11ec>,
<DiscreteFactor representing phi(a:1, c:2) at 0xb4bd138c>] |
def play_sound(self, sound_file):
self.stop_sound()
if sound_file:
cmd = self.check_commands(["ffplay", "paplay", "play"])
if cmd:
if cmd == "ffplay":
cmd = "ffplay -autoexit -nodisp -loglevel 0"
sound_file = os.path.expanduser(sound_file)
c = shlex.split("{} {}".format(cmd, sound_file))
self._audio = Popen(c) | Plays sound_file if possible. |
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.Series(..) "
"constructor instead.", FutureWarning, stacklevel=2)
if isinstance(arr, ABCSparseArray):
from pandas.core.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype,
copy=copy, fastpath=fastpath) | Construct Series from array.
.. deprecated :: 0.23.0
Use pd.Series(..) constructor instead. |
def countdown_timer(seconds=10):
tick = 0.1
n_ticks = int(seconds / tick)
widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(
widgets=widgets, max_value=n_ticks
).start()
for i in range(n_ticks):
pbar.update(i)
sleep(tick)
pbar.finish() | Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero. |
def rm_field(self, name):
if not name in self._fields:
raise ValueError
self._fields.remove(name)
del self.__dict__[name] | Remove a field from the datamat.
Parameters:
name : string
Name of the field to be removed |
def createproject(self, name, **kwargs):
data = {'name': name}
if kwargs:
data.update(kwargs)
request = requests.post(
self.projects_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
elif request.status_code == 403:
if 'Your own projects limit is 0' in request.text:
print(request.text)
return False
else:
return False | Creates a new project owned by the authenticated user.
:param name: new project name
:param path: custom repository name for new project. By default generated based on name
:param namespace_id: namespace for the new project (defaults to user)
:param description: short project description
:param issues_enabled:
:param merge_requests_enabled:
:param wiki_enabled:
:param snippets_enabled:
:param public: if true same as setting visibility_level = 20
:param visibility_level:
:param sudo:
:param import_url:
:return: |
def int_input(message, low, high, show_range = True):
int_in = low - 1
while (int_in < low) or (int_in > high):
if show_range:
suffix = ' (integer between ' + str(low) + ' and ' + str(high) + ')'
else:
suffix = ''
inp = input('Enter a ' + message + suffix + ': ')
if re.match('^-?[0-9]+$', inp) is not None:
int_in = int(inp)
else:
print(colored('Must be an integer, try again!', 'red'))
return int_in | Ask a user for a int input between two values
args:
message (str): Prompt for user
low (int): Low value, user entered value must be > this value to be accepted
high (int): High value, user entered value must be < this value to be accepted
show_range (boolean, Default True): Print hint to user the range
returns:
int_in (int): Input integer |
def setup_logging(self):
is_custom_logging = len(self.options.logging_config) > 0
is_custom_logging = is_custom_logging and os.path.isfile(self.options.logging_config)
is_custom_logging = is_custom_logging and not self.options.dry_run
if is_custom_logging:
Logger.configure_by_file(self.options.logging_config)
else:
logging_format = "%(asctime)-15s - %(name)s - %(message)s"
if self.options.dry_run:
logging_format = "%(name)s - %(message)s"
Logger.configure_default(logging_format, self.logging_level) | Setup of application logging. |
def append_row(self, index, value):
if index in self._index:
raise IndexError('index already in Series')
self._index.append(index)
self._data.append(value) | Appends a row of value to the end of the data. Be very careful with this function as for sorted Series it will
not enforce sort order. Use this only for speed when needed, be careful.
:param index: index
:param value: value
:return: nothing |
def view_as_consumer(
wrapped_view: typing.Callable[[HttpRequest], HttpResponse],
mapped_actions: typing.Optional[
typing.Dict[str, str]
]=None) -> Type[AsyncConsumer]:
if mapped_actions is None:
mapped_actions = {
'create': 'PUT',
'update': 'PATCH',
'list': 'GET',
'retrieve': 'GET'
}
class DjangoViewWrapper(DjangoViewAsConsumer):
view = wrapped_view
actions = mapped_actions
return DjangoViewWrapper | Wrap a django View so that it will be triggered by actions over this json
websocket consumer. |
def get_sourcefile(self):
buff = self.get_attribute("SourceFile")
if buff is None:
return None
with unpack(buff) as up:
(ref,) = up.unpack_struct(_H)
return self.deref_const(ref) | the name of thie file this class was compiled from, or None if not
indicated
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.10 |
def on_person_new(self, people):
self.debug("()")
changed = []
with self._people_lock:
for p in people:
person = Person.from_person(p)
if person.id in self._people:
self.warning(
u"{} already in audience".format(person.id)
)
self._people[person.id] = person
changed.append(person)
for plugin in self.plugins:
try:
plugin.on_person_new(changed)
except:
self.exception(
u"Failed to send new people to {}".format(plugin.name)
) | New people joined the audience
:param people: People that just joined the audience
:type people: list[paps.person.Person]
:rtype: None |
def suspendJustTabProviders(installation):
if installation.suspended:
raise RuntimeError("Installation already suspended")
powerups = list(installation.allPowerups)
for p in powerups:
if INavigableElement.providedBy(p):
p.store.powerDown(p, INavigableElement)
sne = SuspendedNavigableElement(store=p.store, originalNE=p)
p.store.powerUp(sne, INavigableElement)
p.store.powerUp(sne, ISuspender)
installation.suspended = True | Replace INavigableElements with facades that indicate their suspension. |
def sendto(self, transport, addr):
msg = bytes(self) + b'\r\n'
logger.debug("%s:%s < %s", *(addr + (self,)))
transport.sendto(msg, addr) | Send request to a given address via given transport.
Args:
transport (asyncio.DatagramTransport):
Write transport to send the message on.
addr (Tuple[str, int]):
IP address and port pair to send the message to. |
def readFILTER(self):
filterId = self.readUI8()
filter = SWFFilterFactory.create(filterId)
filter.parse(self)
return filter | Read a SWFFilter |
def load_HEP_data(
ROOT_filename = "output.root",
tree_name = "nominal",
maximum_number_of_events = None
):
ROOT_file = open_ROOT_file(ROOT_filename)
tree = ROOT_file.Get(tree_name)
number_of_events = tree.GetEntries()
data = datavision.Dataset()
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_events_loaded = 0
log.info("")
index = 0
for event in tree:
if maximum_number_of_events is not None and\
number_of_events_loaded >= int(maximum_number_of_events):
log.info(
"loaded maximum requested number of events " +
"({maximum_number_of_events})\r".format(
maximum_number_of_events = maximum_number_of_events
)
)
break
print progress.add_datum(fraction = (index + 2) / number_of_events),
if select_event(event):
index += 1
data.variable(index = index, name = "el_1_pt", value = event.el_pt[0])
number_of_events_loaded += 1
log.info("")
return data | Load HEP data and return dataset. |
def _get_fname_len(self, bufflen=128):
buff = self.meta.peek(bufflen)
strlen = buff.find('\0')
for i, b in enumerate(buff[strlen:]):
if b != '\0':
return strlen+i
return bufflen | Returns the number of bytes designated for the filename. |
def export(self, id, exclude_captures=False):
return self.service.export(self.base, id, params={'exclude_captures': exclude_captures}) | Export a result.
:param id: Result ID as an int.
:param exclude_captures: If bool `True`, don't export capture files
:rtype: tuple `(io.BytesIO, 'filename')` |
def delete(gandi, domain, zone_id, name, type, value):
if not zone_id:
result = gandi.domain.info(domain)
zone_id = result['zone_id']
if not zone_id:
gandi.echo('No zone records found, domain %s doesn\'t seems to be '
'managed at Gandi.' % domain)
return
if not name and not type and not value:
proceed = click.confirm('This command without parameters --type, '
'--name or --value will remove all records'
' in this zone file. Are you sur to '
'perform this action ?')
if not proceed:
return
record = {'name': name, 'type': type, 'value': value}
result = gandi.record.delete(zone_id, record)
return result | Delete a record entry for a domain |
def check_errors(self, response):
" Check some common errors."
content = response.content
if 'status' not in content:
raise self.GeneralError('We expect a status field.')
if content['status'] == 'success':
response._content = content
return
if 'msgs' not in content:
raise self.GeneralError('We expcet messages in case of error.')
try:
messages = list(content['msgs'])
except:
raise self.GeneralError("Messages must be a list.")
for msg in messages:
if 'LVL' in msg and msg['LVL'] == 'ERROR':
if msg['ERR_CD'] == 'NOT_FOUND':
raise self.NotFoundError(msg['INFO'])
elif msg['ERR_CD'] == 'TARGET_EXISTS':
raise self.TargetExistsError(msg['INFO'])
else:
raise self.DynectError(msg['INFO'])
raise self.GeneralError("We need at least one error message.") | Check some common errors. |
async def on_isupport_excepts(self, value):
if not value:
value = BAN_EXCEPT_MODE
self._channel_modes.add(value)
self._channel_modes_behaviour[rfc1459.protocol.BEHAVIOUR_LIST].add(value) | Server allows ban exceptions. |
def remove_network_from_dhcp_agent(self, dhcp_agent, network_id):
return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % (
dhcp_agent, network_id)) | Remove a network from dhcp agent. |
def save_token(self, access_token):
self.write(access_token.token, access_token.__dict__)
unique_token_key = self._unique_token_key(access_token.client_id,
access_token.grant_type,
access_token.user_id)
self.write(unique_token_key, access_token.__dict__)
if access_token.refresh_token is not None:
self.write(access_token.refresh_token, access_token.__dict__) | Stores the access token and additional data in redis.
See :class:`oauth2.store.AccessTokenStore`. |
def mark_good(self, server_addr):
self.list[server_addr].update({'quality': CMServerList.Good, 'timestamp': time()}) | Mark server address as good
:param server_addr: (ip, port) tuple
:type server_addr: :class:`tuple` |
def get_par_box(domain, last=False):
u_range = domain[0]
v_range = domain[1]
verts = [(u_range[0], v_range[0]), (u_range[1], v_range[0]), (u_range[1], v_range[1]), (u_range[0], v_range[1])]
if last:
verts.append(verts[0])
return tuple(verts) | Returns the bounding box of the surface parametric domain in ccw direction.
:param domain: parametric domain
:type domain: list, tuple
:param last: if True, adds the first vertex to the end of the return list
:type last: bool
:return: edges of the parametric domain
:rtype: tuple |
def delete_service_settings_on_service_delete(sender, instance, **kwargs):
service = instance
try:
service_settings = service.settings
except ServiceSettings.DoesNotExist:
return
if not service_settings.shared:
service.settings.delete() | Delete not shared service settings without services |
def get_nodesitemtypeinsertion(cls, itemgroup, indent) -> str:
blanks = ' ' * (indent * 4)
subs = [
f'{blanks}<complexType name="nodes_{itemgroup}Type">',
f'{blanks} <sequence>',
f'{blanks} <element ref="hpcb:selections"',
f'{blanks} minOccurs="0"/>',
f'{blanks} <element ref="hpcb:devices"',
f'{blanks} minOccurs="0"/>']
type_ = 'getitemType' if itemgroup == 'getitems' else 'setitemType'
for name in ('sim', 'obs', 'sim.series', 'obs.series'):
subs.extend([
f'{blanks} <element name="{name}"',
f'{blanks} type="hpcb:{type_}"',
f'{blanks} minOccurs="0"',
f'{blanks} maxOccurs="unbounded"/>'])
subs.extend([
f'{blanks} </sequence>',
f'{blanks}</complexType>',
f''])
return '\n'.join(subs) | Return a string defining the required types for the given
combination of an exchange item group and |Node| objects.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_nodesitemtypeinsertion(
... 'setitems', 1)) # doctest: +ELLIPSIS
<complexType name="nodes_setitemsType">
<sequence>
<element ref="hpcb:selections"
minOccurs="0"/>
<element ref="hpcb:devices"
minOccurs="0"/>
<element name="sim"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="sim.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
<element name="obs.series"
type="hpcb:setitemType"
minOccurs="0"
maxOccurs="unbounded"/>
</sequence>
</complexType>
<BLANKLINE> |
def dump_image_data(dataset_dir, data_dir, dataset, color_array_info, root=None, compress=True):
if root is None:
root = {}
root['vtkClass'] = 'vtkImageData'
container = root
container['spacing'] = dataset.GetSpacing()
container['origin'] = dataset.GetOrigin()
container['extent'] = dataset.GetExtent()
dump_all_arrays(dataset_dir, data_dir, dataset, container, compress)
return root | Dump image data object to vtkjs |
def sanitize_mimetype(mimetype, filename=None):
if mimetype in MIMETYPE_WHITELIST:
return mimetype
if mimetype in MIMETYPE_PLAINTEXT or \
(filename and filename.lower() in MIMETYPE_TEXTFILES):
return 'text/plain'
return 'application/octet-stream' | Sanitize a MIME type so the browser does not render the file. |
def check_hash(path, checksum, hash_type='md5'):
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) | Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum |
def _create_path(self):
if self.driver == 'sqlite' and 'memory' not in self.dsn and self.dsn != 'sqlite://':
dir_ = os.path.dirname(self.path)
if dir_ and not os.path.exists(dir_):
try:
os.makedirs(dir_)
except Exception:
pass
if not os.path.exists(dir_):
raise Exception("Couldn't create directory " + dir_) | Create the path to hold the database, if one wwas specified. |
def set_list(self, mutagen_file, values):
self.store(mutagen_file, [self.serialize(value) for value in values]) | Set all values for the field using this style. `values`
should be an iterable. |
def detect(self):
if PY3:
import subprocess
else:
import commands as subprocess
try:
theip = subprocess.getoutput(self.opts_command)
except Exception:
theip = None
self.set_current_value(theip)
return theip | Detect and return the IP address. |
def fullName(self):
if self.parentName and self.name:
return self.parentName + '_' + self.name
return self.name or self.parentName | A full name, intended to uniquely identify a parameter |
def getGrid(self, use_mask=True):
grid_card_name = "WATERSHED_MASK"
if not use_mask:
grid_card_name = "ELEVATION"
return self.getGridByCard(grid_card_name) | Returns GDALGrid object of GSSHA model bounds
Paramters:
use_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.
Returns:
GDALGrid |
def substitute(search, replace, text):
'Regex substitution function. Replaces regex ``search`` with ``replace`` in ``text``'
return re.sub(re.compile(str(search)), replace, text) | Regex substitution function. Replaces regex ``search`` with ``replace`` in ``text`` |
def to_database(self, manager=None):
network = pybel.to_database(self.model, manager=manager)
return network | Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.