code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_user_information(self):
"""Gets the current user information, including sensor ID
Args:
None
Returns:
dictionary object containing information about the current user
"""
url = "https://api.neur.io/v1/users/current"
headers = self.__gen_headers()
headers["Content-Type"] = "application/json"
r = requests.get(url, headers=headers)
return r.json() | Gets the current user information, including sensor ID
Args:
None
Returns:
dictionary object containing information about the current user | Below is the the instruction that describes the task:
### Input:
Gets the current user information, including sensor ID
Args:
None
Returns:
dictionary object containing information about the current user
### Response:
def get_user_information(self):
"""Gets the current user information, including sensor ID
Args:
None
Returns:
dictionary object containing information about the current user
"""
url = "https://api.neur.io/v1/users/current"
headers = self.__gen_headers()
headers["Content-Type"] = "application/json"
r = requests.get(url, headers=headers)
return r.json() |
def prog(text):
"""
Decorator used to specify the program name for the console script
help message.
:param text: The text to use for the program name.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.prog = text
return func
return decorator | Decorator used to specify the program name for the console script
help message.
:param text: The text to use for the program name. | Below is the the instruction that describes the task:
### Input:
Decorator used to specify the program name for the console script
help message.
:param text: The text to use for the program name.
### Response:
def prog(text):
"""
Decorator used to specify the program name for the console script
help message.
:param text: The text to use for the program name.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.prog = text
return func
return decorator |
def linseg(self, value, samples):
'''
Create a linear section moving from current value to new value over acertain number of
samples.
:param value: New value
:param samples: Length of segment in samples
:return:
'''
if self.params.length > self.pos and samples > 0:
len = min(samples, self.params.length - self.pos)
end = value if len == samples else self.latest + (value - self.latest)*len/samples
self.data[self.pos:self.pos + len] = np.linspace(self.latest, end, num=len, endpoint=False, dtype=np.float)
self.pos += len
self.latest = value
return self | Create a linear section moving from current value to new value over acertain number of
samples.
:param value: New value
:param samples: Length of segment in samples
:return: | Below is the the instruction that describes the task:
### Input:
Create a linear section moving from current value to new value over acertain number of
samples.
:param value: New value
:param samples: Length of segment in samples
:return:
### Response:
def linseg(self, value, samples):
'''
Create a linear section moving from current value to new value over acertain number of
samples.
:param value: New value
:param samples: Length of segment in samples
:return:
'''
if self.params.length > self.pos and samples > 0:
len = min(samples, self.params.length - self.pos)
end = value if len == samples else self.latest + (value - self.latest)*len/samples
self.data[self.pos:self.pos + len] = np.linspace(self.latest, end, num=len, endpoint=False, dtype=np.float)
self.pos += len
self.latest = value
return self |
def flat_data(self):
"""
Function to pass our modified values to the original ones
"""
def flat_field(value):
"""
Flat item
"""
try:
value.flat_data()
return value
except AttributeError:
return value
modified_data = self.__modified_data__ if self.__modified_data__ is not None else self.__original_data__
if modified_data is not None:
self.__original_data__ = [flat_field(value) for value in modified_data]
self.__modified_data__ = None | Function to pass our modified values to the original ones | Below is the the instruction that describes the task:
### Input:
Function to pass our modified values to the original ones
### Response:
def flat_data(self):
"""
Function to pass our modified values to the original ones
"""
def flat_field(value):
"""
Flat item
"""
try:
value.flat_data()
return value
except AttributeError:
return value
modified_data = self.__modified_data__ if self.__modified_data__ is not None else self.__original_data__
if modified_data is not None:
self.__original_data__ = [flat_field(value) for value in modified_data]
self.__modified_data__ = None |
def email_addresses595(self, key, value):
"""Populates the ``email_addresses`` field using the 595 MARCXML field.
Also populates ``_private_notes`` as a side effect.
"""
emails = self.get('email_addresses', [])
if value.get('o'):
emails.append({
'value': value.get('o'),
'current': False,
'hidden': True,
})
if value.get('m'):
emails.append({
'value': value.get('m'),
'current': True,
'hidden': True,
})
notes = self.get('_private_notes', [])
new_note = (
{
'source': value.get('9'),
'value': _private_note,
} for _private_note in force_list(value.get('a'))
)
notes.extend(new_note)
self['_private_notes'] = notes
return emails | Populates the ``email_addresses`` field using the 595 MARCXML field.
Also populates ``_private_notes`` as a side effect. | Below is the the instruction that describes the task:
### Input:
Populates the ``email_addresses`` field using the 595 MARCXML field.
Also populates ``_private_notes`` as a side effect.
### Response:
def email_addresses595(self, key, value):
"""Populates the ``email_addresses`` field using the 595 MARCXML field.
Also populates ``_private_notes`` as a side effect.
"""
emails = self.get('email_addresses', [])
if value.get('o'):
emails.append({
'value': value.get('o'),
'current': False,
'hidden': True,
})
if value.get('m'):
emails.append({
'value': value.get('m'),
'current': True,
'hidden': True,
})
notes = self.get('_private_notes', [])
new_note = (
{
'source': value.get('9'),
'value': _private_note,
} for _private_note in force_list(value.get('a'))
)
notes.extend(new_note)
self['_private_notes'] = notes
return emails |
def set_rtscts(self, enable):
'''enable/disable RTS/CTS if applicable'''
try:
self.port.setRtsCts(enable)
except Exception:
self.port.rtscts = enable
self.rtscts = enable | enable/disable RTS/CTS if applicable | Below is the the instruction that describes the task:
### Input:
enable/disable RTS/CTS if applicable
### Response:
def set_rtscts(self, enable):
'''enable/disable RTS/CTS if applicable'''
try:
self.port.setRtsCts(enable)
except Exception:
self.port.rtscts = enable
self.rtscts = enable |
def ListFiles(self, ext_attrs=None):
"""List all the files in the directory."""
del ext_attrs # Unused.
if not self.IsDirectory():
raise IOError("%s is not a directory" % self.pathspec.CollapsePath())
for f in self.fd.as_directory():
try:
name = _DecodeUTF8WithWarning(f.info.name.name)
# Drop these useless entries.
if name in [".", ".."] or name in self.BLACKLIST_FILES:
continue
# First we yield a standard response using the default attributes.
yield self.MakeStatResponse(f, tsk_attribute=None, append_name=name)
# Now send back additional named attributes for the ADS.
for attribute in f:
if attribute.info.type in [
pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA, pytsk3.TSK_FS_ATTR_TYPE_DEFAULT
]:
if attribute.info.name:
yield self.MakeStatResponse(
f, append_name=name, tsk_attribute=attribute)
except AttributeError:
pass | List all the files in the directory. | Below is the the instruction that describes the task:
### Input:
List all the files in the directory.
### Response:
def ListFiles(self, ext_attrs=None):
"""List all the files in the directory."""
del ext_attrs # Unused.
if not self.IsDirectory():
raise IOError("%s is not a directory" % self.pathspec.CollapsePath())
for f in self.fd.as_directory():
try:
name = _DecodeUTF8WithWarning(f.info.name.name)
# Drop these useless entries.
if name in [".", ".."] or name in self.BLACKLIST_FILES:
continue
# First we yield a standard response using the default attributes.
yield self.MakeStatResponse(f, tsk_attribute=None, append_name=name)
# Now send back additional named attributes for the ADS.
for attribute in f:
if attribute.info.type in [
pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA, pytsk3.TSK_FS_ATTR_TYPE_DEFAULT
]:
if attribute.info.name:
yield self.MakeStatResponse(
f, append_name=name, tsk_attribute=attribute)
except AttributeError:
pass |
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e)) | Return content from local or remote file. | Below is the the instruction that describes the task:
### Input:
Return content from local or remote file.
### Response:
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e)) |
def format_vars(args):
"""Format the given vars in the form: 'flag=value'"""
variables = []
for key, value in args.items():
if value:
variables += ['{0}={1}'.format(key, value)]
return variables | Format the given vars in the form: 'flag=value | Below is the the instruction that describes the task:
### Input:
Format the given vars in the form: 'flag=value
### Response:
def format_vars(args):
"""Format the given vars in the form: 'flag=value'"""
variables = []
for key, value in args.items():
if value:
variables += ['{0}={1}'.format(key, value)]
return variables |
def check_extensions(extensions: Set[str], allow_multifile: bool = False):
"""
Utility method to check that all extensions in the provided set are valid
:param extensions:
:param allow_multifile:
:return:
"""
check_var(extensions, var_types=set, var_name='extensions')
# -- check them one by one
for ext in extensions:
check_extension(ext, allow_multifile=allow_multifile) | Utility method to check that all extensions in the provided set are valid
:param extensions:
:param allow_multifile:
:return: | Below is the the instruction that describes the task:
### Input:
Utility method to check that all extensions in the provided set are valid
:param extensions:
:param allow_multifile:
:return:
### Response:
def check_extensions(extensions: Set[str], allow_multifile: bool = False):
"""
Utility method to check that all extensions in the provided set are valid
:param extensions:
:param allow_multifile:
:return:
"""
check_var(extensions, var_types=set, var_name='extensions')
# -- check them one by one
for ext in extensions:
check_extension(ext, allow_multifile=allow_multifile) |
def center_data(data, vmin, vmax):
"""Clips data on [vmin, vmax]; then rescales to [0,1]"""
ans = data - vmin
ans /= (vmax - vmin)
return np.clip(ans, 0, 1) | Clips data on [vmin, vmax]; then rescales to [0,1] | Below is the the instruction that describes the task:
### Input:
Clips data on [vmin, vmax]; then rescales to [0,1]
### Response:
def center_data(data, vmin, vmax):
"""Clips data on [vmin, vmax]; then rescales to [0,1]"""
ans = data - vmin
ans /= (vmax - vmin)
return np.clip(ans, 0, 1) |
def _get_relative_pythonpath(self):
"""Return PYTHONPATH list as relative paths"""
# Workaround to replace os.path.relpath (new in Python v2.6):
offset = len(self.root_path)+len(os.pathsep)
return [path[offset:] for path in self.pythonpath] | Return PYTHONPATH list as relative paths | Below is the the instruction that describes the task:
### Input:
Return PYTHONPATH list as relative paths
### Response:
def _get_relative_pythonpath(self):
"""Return PYTHONPATH list as relative paths"""
# Workaround to replace os.path.relpath (new in Python v2.6):
offset = len(self.root_path)+len(os.pathsep)
return [path[offset:] for path in self.pythonpath] |
def ridgecircle(self, x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a | happy cat by HG Beyer | Below is the the instruction that describes the task:
### Input:
happy cat by HG Beyer
### Response:
def ridgecircle(self, x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a |
def encode(raw):
"""Encode SLIP message."""
return raw \
.replace(bytes([SLIP_ESC]), bytes([SLIP_ESC, SLIP_ESC_ESC])) \
.replace(bytes([SLIP_END]), bytes([SLIP_ESC, SLIP_ESC_END])) | Encode SLIP message. | Below is the the instruction that describes the task:
### Input:
Encode SLIP message.
### Response:
def encode(raw):
"""Encode SLIP message."""
return raw \
.replace(bytes([SLIP_ESC]), bytes([SLIP_ESC, SLIP_ESC_ESC])) \
.replace(bytes([SLIP_END]), bytes([SLIP_ESC, SLIP_ESC_END])) |
def datetime2gtd(time: Union[str, datetime.datetime, np.datetime64],
glon: Union[float, List[float], np.ndarray] = np.nan) -> Tuple[int, float, float]:
"""
Inputs:
time: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse
glon: Numpy 2-D array of geodetic longitudes (degrees)
Outputs:
iyd: day of year
utsec: seconds from midnight utc
stl: local solar time
"""
# %%
T = np.atleast_1d(time)
glon = np.asarray(glon)
doy = np.empty_like(T, int)
utsec = np.empty_like(T, float)
stl = np.empty((T.size, *glon.shape))
for i, t in enumerate(T):
if isinstance(t, str):
t = parse(t)
elif isinstance(t, np.datetime64):
t = t.astype(datetime.datetime)
elif isinstance(t, (datetime.datetime, datetime.date)):
pass
else:
raise TypeError('unknown time datatype {}'.format(type(t)))
# %% Day of year
doy[i] = int(t.strftime('%j'))
# %% seconds since utc midnight
utsec[i] = datetime2utsec(t)
stl[i, ...] = utsec[i] / 3600. + glon / 15.
return doy, utsec, stl | Inputs:
time: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse
glon: Numpy 2-D array of geodetic longitudes (degrees)
Outputs:
iyd: day of year
utsec: seconds from midnight utc
stl: local solar time | Below is the the instruction that describes the task:
### Input:
Inputs:
time: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse
glon: Numpy 2-D array of geodetic longitudes (degrees)
Outputs:
iyd: day of year
utsec: seconds from midnight utc
stl: local solar time
### Response:
def datetime2gtd(time: Union[str, datetime.datetime, np.datetime64],
glon: Union[float, List[float], np.ndarray] = np.nan) -> Tuple[int, float, float]:
"""
Inputs:
time: Numpy 1-D array of datetime.datetime OR string for dateutil.parser.parse
glon: Numpy 2-D array of geodetic longitudes (degrees)
Outputs:
iyd: day of year
utsec: seconds from midnight utc
stl: local solar time
"""
# %%
T = np.atleast_1d(time)
glon = np.asarray(glon)
doy = np.empty_like(T, int)
utsec = np.empty_like(T, float)
stl = np.empty((T.size, *glon.shape))
for i, t in enumerate(T):
if isinstance(t, str):
t = parse(t)
elif isinstance(t, np.datetime64):
t = t.astype(datetime.datetime)
elif isinstance(t, (datetime.datetime, datetime.date)):
pass
else:
raise TypeError('unknown time datatype {}'.format(type(t)))
# %% Day of year
doy[i] = int(t.strftime('%j'))
# %% seconds since utc midnight
utsec[i] = datetime2utsec(t)
stl[i, ...] = utsec[i] / 3600. + glon / 15.
return doy, utsec, stl |
def decode(stream, strict=True):
"""
Decodes a SOL stream. L{strict} mode ensures that the sol stream is as spec
compatible as possible.
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
# read the version
version = stream.read(2)
if version != HEADER_VERSION:
raise pyamf.DecodeError('Unknown SOL version in header')
# read the length
length = stream.read_ulong()
if strict and stream.remaining() != length:
raise pyamf.DecodeError('Inconsistent stream header length')
# read the signature
signature = stream.read(10)
if signature != HEADER_SIGNATURE:
raise pyamf.DecodeError('Invalid signature')
length = stream.read_ushort()
root_name = stream.read_utf8_string(length)
# read padding
if stream.read(3) != PADDING_BYTE * 3:
raise pyamf.DecodeError('Invalid padding read')
decoder = pyamf.get_decoder(stream.read_uchar())
decoder.stream = stream
values = {}
while 1:
if stream.at_eof():
break
name = decoder.readString()
value = decoder.readElement()
# read the padding
if stream.read(1) != PADDING_BYTE:
raise pyamf.DecodeError('Missing padding byte')
values[name] = value
return (root_name, values) | Decodes a SOL stream. L{strict} mode ensures that the sol stream is as spec
compatible as possible.
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs. | Below is the the instruction that describes the task:
### Input:
Decodes a SOL stream. L{strict} mode ensures that the sol stream is as spec
compatible as possible.
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs.
### Response:
def decode(stream, strict=True):
"""
Decodes a SOL stream. L{strict} mode ensures that the sol stream is as spec
compatible as possible.
@return: A C{tuple} containing the C{root_name} and a C{dict} of name,
value pairs.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
# read the version
version = stream.read(2)
if version != HEADER_VERSION:
raise pyamf.DecodeError('Unknown SOL version in header')
# read the length
length = stream.read_ulong()
if strict and stream.remaining() != length:
raise pyamf.DecodeError('Inconsistent stream header length')
# read the signature
signature = stream.read(10)
if signature != HEADER_SIGNATURE:
raise pyamf.DecodeError('Invalid signature')
length = stream.read_ushort()
root_name = stream.read_utf8_string(length)
# read padding
if stream.read(3) != PADDING_BYTE * 3:
raise pyamf.DecodeError('Invalid padding read')
decoder = pyamf.get_decoder(stream.read_uchar())
decoder.stream = stream
values = {}
while 1:
if stream.at_eof():
break
name = decoder.readString()
value = decoder.readElement()
# read the padding
if stream.read(1) != PADDING_BYTE:
raise pyamf.DecodeError('Missing padding byte')
values[name] = value
return (root_name, values) |
def init(*args, **kwargs):
"""Returns an initialized instance of the Batch class"""
# set up cellpy logger
default_log_level = kwargs.pop("default_log_level", None)
import cellpy.log as log
log.setup_logging(custom_log_dir=prms.Paths["filelogdir"],
default_level=default_log_level)
b = Batch(*args, **kwargs)
return b | Returns an initialized instance of the Batch class | Below is the the instruction that describes the task:
### Input:
Returns an initialized instance of the Batch class
### Response:
def init(*args, **kwargs):
"""Returns an initialized instance of the Batch class"""
# set up cellpy logger
default_log_level = kwargs.pop("default_log_level", None)
import cellpy.log as log
log.setup_logging(custom_log_dir=prms.Paths["filelogdir"],
default_level=default_log_level)
b = Batch(*args, **kwargs)
return b |
def _check_file_exists_unix(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
if self.direction == "put":
self.ssh_ctl_chan._enter_shell()
remote_cmd = "ls {}".format(self.file_system)
remote_out = self.ssh_ctl_chan.send_command(
remote_cmd, expect_string=r"[\$#]"
)
self.ssh_ctl_chan._return_cli()
return self.dest_file in remote_out
elif self.direction == "get":
return os.path.exists(self.dest_file) | Check if the dest_file already exists on the file system (return boolean). | Below is the the instruction that describes the task:
### Input:
Check if the dest_file already exists on the file system (return boolean).
### Response:
def _check_file_exists_unix(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
if self.direction == "put":
self.ssh_ctl_chan._enter_shell()
remote_cmd = "ls {}".format(self.file_system)
remote_out = self.ssh_ctl_chan.send_command(
remote_cmd, expect_string=r"[\$#]"
)
self.ssh_ctl_chan._return_cli()
return self.dest_file in remote_out
elif self.direction == "get":
return os.path.exists(self.dest_file) |
def _clauses(lexer, varname, nvars):
"""Return a tuple of DIMACS CNF clauses."""
tok = next(lexer)
toktype = type(tok)
if toktype is OP_not or toktype is IntegerToken:
lexer.unpop_token(tok)
first = _clause(lexer, varname, nvars)
rest = _clauses(lexer, varname, nvars)
return (first, ) + rest
# null
else:
lexer.unpop_token(tok)
return tuple() | Return a tuple of DIMACS CNF clauses. | Below is the the instruction that describes the task:
### Input:
Return a tuple of DIMACS CNF clauses.
### Response:
def _clauses(lexer, varname, nvars):
"""Return a tuple of DIMACS CNF clauses."""
tok = next(lexer)
toktype = type(tok)
if toktype is OP_not or toktype is IntegerToken:
lexer.unpop_token(tok)
first = _clause(lexer, varname, nvars)
rest = _clauses(lexer, varname, nvars)
return (first, ) + rest
# null
else:
lexer.unpop_token(tok)
return tuple() |
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent | Finds the closest object of a given type. | Below is the the instruction that describes the task:
### Input:
Finds the closest object of a given type.
### Response:
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent |
def create_or_update_tags(self, tags):
"""
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i+1)
return self.get_status('CreateOrUpdateTags', params, verb='POST') | Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags. | Below is the the instruction that describes the task:
### Input:
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
### Response:
def create_or_update_tags(self, tags):
"""
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i+1)
return self.get_status('CreateOrUpdateTags', params, verb='POST') |
def read_file(fname, *args, **kwargs):
"""Read data from a file saved in the standard IAMC format
or a table with year/value columns
"""
if not isstr(fname):
raise ValueError('reading multiple files not supported, '
'please use `pyam.IamDataFrame.append()`')
logger().info('Reading `{}`'.format(fname))
format_kwargs = {}
# extract kwargs that are intended for `format_data`
for c in [i for i in IAMC_IDX + ['year', 'time', 'value'] if i in kwargs]:
format_kwargs[c] = kwargs.pop(c)
return format_data(read_pandas(fname, *args, **kwargs), **format_kwargs) | Read data from a file saved in the standard IAMC format
or a table with year/value columns | Below is the the instruction that describes the task:
### Input:
Read data from a file saved in the standard IAMC format
or a table with year/value columns
### Response:
def read_file(fname, *args, **kwargs):
"""Read data from a file saved in the standard IAMC format
or a table with year/value columns
"""
if not isstr(fname):
raise ValueError('reading multiple files not supported, '
'please use `pyam.IamDataFrame.append()`')
logger().info('Reading `{}`'.format(fname))
format_kwargs = {}
# extract kwargs that are intended for `format_data`
for c in [i for i in IAMC_IDX + ['year', 'time', 'value'] if i in kwargs]:
format_kwargs[c] = kwargs.pop(c)
return format_data(read_pandas(fname, *args, **kwargs), **format_kwargs) |
def clean_text(self, address, **kwargs):
"""Basic clean-up."""
address = self.LINE_BREAKS.sub(', ', address)
address = self.COMMATA.sub(', ', address)
address = collapse_spaces(address)
if len(address):
return address | Basic clean-up. | Below is the the instruction that describes the task:
### Input:
Basic clean-up.
### Response:
def clean_text(self, address, **kwargs):
"""Basic clean-up."""
address = self.LINE_BREAKS.sub(', ', address)
address = self.COMMATA.sub(', ', address)
address = collapse_spaces(address)
if len(address):
return address |
def bounds(self):
"""
Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner
"""
corners = self.bounds_corners
bounds = np.array([corners.min(axis=0),
corners.max(axis=0)])
return bounds | Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner | Below is the the instruction that describes the task:
### Input:
Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner
### Response:
def bounds(self):
"""
Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner
"""
corners = self.bounds_corners
bounds = np.array([corners.min(axis=0),
corners.max(axis=0)])
return bounds |
def _handle_retryable_error(self, e, retry_count):
"""Sleep based on the type of :class:`RetryableAsanaError`"""
if isinstance(e, error.RateLimitEnforcedError):
time.sleep(e.retry_after)
else:
time.sleep(self.RETRY_DELAY * (self.RETRY_BACKOFF ** retry_count)) | Sleep based on the type of :class:`RetryableAsanaError` | Below is the the instruction that describes the task:
### Input:
Sleep based on the type of :class:`RetryableAsanaError`
### Response:
def _handle_retryable_error(self, e, retry_count):
"""Sleep based on the type of :class:`RetryableAsanaError`"""
if isinstance(e, error.RateLimitEnforcedError):
time.sleep(e.retry_after)
else:
time.sleep(self.RETRY_DELAY * (self.RETRY_BACKOFF ** retry_count)) |
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Relocated Directory
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('RE record not yet initialized!')
return b'RE' + struct.pack('=BB', RRRERecord.length(), SU_ENTRY_VERSION) | Generate a string representing the Rock Ridge Relocated Directory
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record. | Below is the the instruction that describes the task:
### Input:
Generate a string representing the Rock Ridge Relocated Directory
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
### Response:
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Relocated Directory
record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('RE record not yet initialized!')
return b'RE' + struct.pack('=BB', RRRERecord.length(), SU_ENTRY_VERSION) |
def inspect_workers(self):
"""Updates the workers status.
Returns the workers which have unexpectedly ended.
"""
workers = tuple(self.workers.values())
expired = tuple(w for w in workers if not w.is_alive())
for worker in expired:
self.workers.pop(worker.pid)
return ((w.pid, w.exitcode) for w in expired if w.exitcode != 0) | Updates the workers status.
Returns the workers which have unexpectedly ended. | Below is the the instruction that describes the task:
### Input:
Updates the workers status.
Returns the workers which have unexpectedly ended.
### Response:
def inspect_workers(self):
"""Updates the workers status.
Returns the workers which have unexpectedly ended.
"""
workers = tuple(self.workers.values())
expired = tuple(w for w in workers if not w.is_alive())
for worker in expired:
self.workers.pop(worker.pid)
return ((w.pid, w.exitcode) for w in expired if w.exitcode != 0) |
def _clear_context():
'''
Clear any lxc variables set in __context__
'''
for var in [x for x in __context__ if x.startswith('lxc.')]:
log.trace('Clearing __context__[\'%s\']', var)
__context__.pop(var, None) | Clear any lxc variables set in __context__ | Below is the the instruction that describes the task:
### Input:
Clear any lxc variables set in __context__
### Response:
def _clear_context():
'''
Clear any lxc variables set in __context__
'''
for var in [x for x in __context__ if x.startswith('lxc.')]:
log.trace('Clearing __context__[\'%s\']', var)
__context__.pop(var, None) |
def _parse_search_results_html(self, doc):
""" parse search result html, return subgroups
subgroups: [{ 'title': title, 'link': link}]
"""
subgroups = []
soup = bs4.BeautifulSoup(doc, 'lxml')
ele_divs = soup.select('div.item.prel')
if not ele_divs:
return subgroups
for item in ele_divs:
ele_a = item.select('p.tt > a')
if not ele_a:
continue
link = ele_a[0].get('href')
title = ele_a[0].get_text().strip()
subgroups.append({
'title': title,
'link': link,
})
return subgroups | parse search result html, return subgroups
subgroups: [{ 'title': title, 'link': link}] | Below is the the instruction that describes the task:
### Input:
parse search result html, return subgroups
subgroups: [{ 'title': title, 'link': link}]
### Response:
def _parse_search_results_html(self, doc):
""" parse search result html, return subgroups
subgroups: [{ 'title': title, 'link': link}]
"""
subgroups = []
soup = bs4.BeautifulSoup(doc, 'lxml')
ele_divs = soup.select('div.item.prel')
if not ele_divs:
return subgroups
for item in ele_divs:
ele_a = item.select('p.tt > a')
if not ele_a:
continue
link = ele_a[0].get('href')
title = ele_a[0].get_text().strip()
subgroups.append({
'title': title,
'link': link,
})
return subgroups |
def reload(self):
"""Reloades the configuration
This method will reload the configuration instance using the last
known filename. Note this method will initially clear the
configuration and reload all entries.
"""
for section in self.sections():
self.remove_section(section)
self.autoload() | Reloades the configuration
This method will reload the configuration instance using the last
known filename. Note this method will initially clear the
configuration and reload all entries. | Below is the the instruction that describes the task:
### Input:
Reloades the configuration
This method will reload the configuration instance using the last
known filename. Note this method will initially clear the
configuration and reload all entries.
### Response:
def reload(self):
"""Reloades the configuration
This method will reload the configuration instance using the last
known filename. Note this method will initially clear the
configuration and reload all entries.
"""
for section in self.sections():
self.remove_section(section)
self.autoload() |
def p_qualifier(p):
"""qualifier : qualifierName
| qualifierName ':' flavorList
| qualifierName qualifierParameter
| qualifierName qualifierParameter ':' flavorList
"""
# pylint: disable=too-many-branches
qname = p[1]
ns = p.parser.handle.default_namespace
qval = None
flavorlist = []
if len(p) == 3:
qval = p[2]
elif len(p) == 4:
flavorlist = p[3]
elif len(p) == 5:
qval = p[2]
flavorlist = p[4]
try:
qualdecl = p.parser.qualcache[ns][qname]
except KeyError:
try:
quals = p.parser.handle.EnumerateQualifiers()
except CIMError as ce:
if ce.status_code != CIM_ERR_INVALID_NAMESPACE:
ce.file_line = (p.parser.file, p.lexer.lineno)
raise
if p.parser.verbose:
p.parser.log(
_format("Creating namespace {0!A}", ns))
p.parser.server.create_namespace(ns)
quals = None
if quals:
for qual in quals:
p.parser.qualcache[ns][qual.name] = qual
else:
for fname in ['qualifiers', 'qualifiers_optional']:
qualfile = p.parser.mofcomp.find_mof(fname)
if qualfile:
p.parser.mofcomp.compile_file(qualfile, ns)
try:
qualdecl = p.parser.qualcache[ns][qname]
except KeyError:
ce = CIMError(
CIM_ERR_FAILED,
_format("Unknown Qualifier: {0!A}", qname))
ce.file_line = (p.parser.file, p.lexer.lineno)
raise ce
flavors = _build_flavors(p[0], flavorlist, qualdecl)
if qval is None:
if qualdecl.type == 'boolean':
qval = True
else:
qval = qualdecl.value # default value
else:
qval = cimvalue(qval, qualdecl.type)
p[0] = CIMQualifier(qname, qval, type=qualdecl.type, **flavors) | qualifier : qualifierName
| qualifierName ':' flavorList
| qualifierName qualifierParameter
| qualifierName qualifierParameter ':' flavorList | Below is the the instruction that describes the task:
### Input:
qualifier : qualifierName
| qualifierName ':' flavorList
| qualifierName qualifierParameter
| qualifierName qualifierParameter ':' flavorList
### Response:
def p_qualifier(p):
"""qualifier : qualifierName
| qualifierName ':' flavorList
| qualifierName qualifierParameter
| qualifierName qualifierParameter ':' flavorList
"""
# pylint: disable=too-many-branches
qname = p[1]
ns = p.parser.handle.default_namespace
qval = None
flavorlist = []
if len(p) == 3:
qval = p[2]
elif len(p) == 4:
flavorlist = p[3]
elif len(p) == 5:
qval = p[2]
flavorlist = p[4]
try:
qualdecl = p.parser.qualcache[ns][qname]
except KeyError:
try:
quals = p.parser.handle.EnumerateQualifiers()
except CIMError as ce:
if ce.status_code != CIM_ERR_INVALID_NAMESPACE:
ce.file_line = (p.parser.file, p.lexer.lineno)
raise
if p.parser.verbose:
p.parser.log(
_format("Creating namespace {0!A}", ns))
p.parser.server.create_namespace(ns)
quals = None
if quals:
for qual in quals:
p.parser.qualcache[ns][qual.name] = qual
else:
for fname in ['qualifiers', 'qualifiers_optional']:
qualfile = p.parser.mofcomp.find_mof(fname)
if qualfile:
p.parser.mofcomp.compile_file(qualfile, ns)
try:
qualdecl = p.parser.qualcache[ns][qname]
except KeyError:
ce = CIMError(
CIM_ERR_FAILED,
_format("Unknown Qualifier: {0!A}", qname))
ce.file_line = (p.parser.file, p.lexer.lineno)
raise ce
flavors = _build_flavors(p[0], flavorlist, qualdecl)
if qval is None:
if qualdecl.type == 'boolean':
qval = True
else:
qval = qualdecl.value # default value
else:
qval = cimvalue(qval, qualdecl.type)
p[0] = CIMQualifier(qname, qval, type=qualdecl.type, **flavors) |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
imean = self._get_mean(C, rup, dists, sites)
if imt.name in "SA PGA":
# Convert units to g,
# but only for PGA and SA (not PGV):
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
istddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
stddevs = np.log(10.0 ** np.array(istddevs))
return mean + self.adjustment_factor, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | Below is the the instruction that describes the task:
### Input:
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
### Response:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
imean = self._get_mean(C, rup, dists, sites)
if imt.name in "SA PGA":
# Convert units to g,
# but only for PGA and SA (not PGV):
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
istddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
stddevs = np.log(10.0 ** np.array(istddevs))
return mean + self.adjustment_factor, stddevs |
def grant(self, fail_on_found=False, **kwargs):
"""Add a user or a team to a role. Required information:
1) Type of the role
2) Resource of the role, inventory, credential, or any other
3) A user or a team to add to the role
=====API DOCS=====
Add a user or a team to a role. Required information:
* Type of the role.
* Resource of the role, inventory, credential, or any other.
* A user or a team to add to the role.
:param fail_on_found: Flag that if set, the operation fails if a user/team already has the role.
:type fail_on_found: bool
:param `**kwargs`: The user to be associated and the role to associate.
:returns: parsed JSON of role grant.
:rtype: dict
=====API DOCS=====
"""
return self.role_write(fail_on_found=fail_on_found, **kwargs) | Add a user or a team to a role. Required information:
1) Type of the role
2) Resource of the role, inventory, credential, or any other
3) A user or a team to add to the role
=====API DOCS=====
Add a user or a team to a role. Required information:
* Type of the role.
* Resource of the role, inventory, credential, or any other.
* A user or a team to add to the role.
:param fail_on_found: Flag that if set, the operation fails if a user/team already has the role.
:type fail_on_found: bool
:param `**kwargs`: The user to be associated and the role to associate.
:returns: parsed JSON of role grant.
:rtype: dict
=====API DOCS===== | Below is the the instruction that describes the task:
### Input:
Add a user or a team to a role. Required information:
1) Type of the role
2) Resource of the role, inventory, credential, or any other
3) A user or a team to add to the role
=====API DOCS=====
Add a user or a team to a role. Required information:
* Type of the role.
* Resource of the role, inventory, credential, or any other.
* A user or a team to add to the role.
:param fail_on_found: Flag that if set, the operation fails if a user/team already has the role.
:type fail_on_found: bool
:param `**kwargs`: The user to be associated and the role to associate.
:returns: parsed JSON of role grant.
:rtype: dict
=====API DOCS=====
### Response:
def grant(self, fail_on_found=False, **kwargs):
"""Add a user or a team to a role. Required information:
1) Type of the role
2) Resource of the role, inventory, credential, or any other
3) A user or a team to add to the role
=====API DOCS=====
Add a user or a team to a role. Required information:
* Type of the role.
* Resource of the role, inventory, credential, or any other.
* A user or a team to add to the role.
:param fail_on_found: Flag that if set, the operation fails if a user/team already has the role.
:type fail_on_found: bool
:param `**kwargs`: The user to be associated and the role to associate.
:returns: parsed JSON of role grant.
:rtype: dict
=====API DOCS=====
"""
return self.role_write(fail_on_found=fail_on_found, **kwargs) |
def visit_str(self, node, _):
"""Regex rule for quoted string allowing escaped quotes inside.
Arguments
---------
node : parsimonious.nodes.Node.
_ (children) : list, unused
Result
------
str
The wanted string, with quoted characters unquoted.
Example
-------
>>> BaseParser('"foo"', default_rule='STR').data
'foo'
>>> BaseParser("'foo'", default_rule='STR').data
'foo'
>>> BaseParser('''"foo b'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser('''"foo b\'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data
"foo b'ar"
Notes
-----
The regex works this way:
Two quotes (single or double, the starting one and the ending one should be the same)
surrounding zero or more of "any character that's not a quote (same as the
starting/ending ones) or a backslash" or "a backslash followed by any character".
"""
# remove surrounding quotes and remove single backslashes
return self.visit_str.re_single_backslash.sub('', node.text[1:-1]) | Regex rule for quoted string allowing escaped quotes inside.
Arguments
---------
node : parsimonious.nodes.Node.
_ (children) : list, unused
Result
------
str
The wanted string, with quoted characters unquoted.
Example
-------
>>> BaseParser('"foo"', default_rule='STR').data
'foo'
>>> BaseParser("'foo'", default_rule='STR').data
'foo'
>>> BaseParser('''"foo b'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser('''"foo b\'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data
"foo b'ar"
Notes
-----
The regex works this way:
Two quotes (single or double, the starting one and the ending one should be the same)
surrounding zero or more of "any character that's not a quote (same as the
starting/ending ones) or a backslash" or "a backslash followed by any character". | Below is the the instruction that describes the task:
### Input:
Regex rule for quoted string allowing escaped quotes inside.
Arguments
---------
node : parsimonious.nodes.Node.
_ (children) : list, unused
Result
------
str
The wanted string, with quoted characters unquoted.
Example
-------
>>> BaseParser('"foo"', default_rule='STR').data
'foo'
>>> BaseParser("'foo'", default_rule='STR').data
'foo'
>>> BaseParser('''"foo b'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser('''"foo b\'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data
"foo b'ar"
Notes
-----
The regex works this way:
Two quotes (single or double, the starting one and the ending one should be the same)
surrounding zero or more of "any character that's not a quote (same as the
starting/ending ones) or a backslash" or "a backslash followed by any character".
### Response:
def visit_str(self, node, _):
"""Regex rule for quoted string allowing escaped quotes inside.
Arguments
---------
node : parsimonious.nodes.Node.
_ (children) : list, unused
Result
------
str
The wanted string, with quoted characters unquoted.
Example
-------
>>> BaseParser('"foo"', default_rule='STR').data
'foo'
>>> BaseParser("'foo'", default_rule='STR').data
'foo'
>>> BaseParser('''"foo b'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser('''"foo b\'ar"''', default_rule='STR').data
"foo b'ar"
>>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data
"foo b'ar"
Notes
-----
The regex works this way:
Two quotes (single or double, the starting one and the ending one should be the same)
surrounding zero or more of "any character that's not a quote (same as the
starting/ending ones) or a backslash" or "a backslash followed by any character".
"""
# remove surrounding quotes and remove single backslashes
return self.visit_str.re_single_backslash.sub('', node.text[1:-1]) |
def convert_point(self, point):
'''
Converts the relative position of @point into an absolute position. To
be used for event considerations, blitting is handled directly by the
Container().
'''
return self.container.convert_point(Vector(point) + self.pos) | Converts the relative position of @point into an absolute position. To
be used for event considerations, blitting is handled directly by the
Container(). | Below is the the instruction that describes the task:
### Input:
Converts the relative position of @point into an absolute position. To
be used for event considerations, blitting is handled directly by the
Container().
### Response:
def convert_point(self, point):
'''
Converts the relative position of @point into an absolute position. To
be used for event considerations, blitting is handled directly by the
Container().
'''
return self.container.convert_point(Vector(point) + self.pos) |
def dump(self):
"""
Dump topology to disk
"""
try:
topo = project_to_topology(self)
path = self._topology_file()
log.debug("Write %s", path)
with open(path + ".tmp", "w+", encoding="utf-8") as f:
json.dump(topo, f, indent=4, sort_keys=True)
shutil.move(path + ".tmp", path)
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not write topology: {}".format(e)) | Dump topology to disk | Below is the the instruction that describes the task:
### Input:
Dump topology to disk
### Response:
def dump(self):
"""
Dump topology to disk
"""
try:
topo = project_to_topology(self)
path = self._topology_file()
log.debug("Write %s", path)
with open(path + ".tmp", "w+", encoding="utf-8") as f:
json.dump(topo, f, indent=4, sort_keys=True)
shutil.move(path + ".tmp", path)
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not write topology: {}".format(e)) |
def set_thumbnail(self, **kwargs):
"""
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
"""
self.thumbnail = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail | Below is the the instruction that describes the task:
### Input:
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
### Response:
def set_thumbnail(self, **kwargs):
"""
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
"""
self.thumbnail = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} |
def by_symbol(symbol, country_code=None):
"""Get list of possible currencies for symbol; filter by country_code
Look for all currencies that use the `symbol`. If there are currencies used
in the country of `country_code`, return only those; otherwise return all
found currencies.
Parameters:
symbol: unicode Currency symbol.
country_code: Optional[unicode] Iso3166 alpha2 country code.
Returns:
List[Currency]: Currency objects for `symbol`; filter by country_code.
"""
res = _data()['symbol'].get(symbol)
if res:
tmp_res = []
for d in res:
if country_code in d.countries:
tmp_res += [d]
if tmp_res:
return tmp_res
if country_code is None:
return res | Get list of possible currencies for symbol; filter by country_code
Look for all currencies that use the `symbol`. If there are currencies used
in the country of `country_code`, return only those; otherwise return all
found currencies.
Parameters:
symbol: unicode Currency symbol.
country_code: Optional[unicode] Iso3166 alpha2 country code.
Returns:
List[Currency]: Currency objects for `symbol`; filter by country_code. | Below is the the instruction that describes the task:
### Input:
Get list of possible currencies for symbol; filter by country_code
Look for all currencies that use the `symbol`. If there are currencies used
in the country of `country_code`, return only those; otherwise return all
found currencies.
Parameters:
symbol: unicode Currency symbol.
country_code: Optional[unicode] Iso3166 alpha2 country code.
Returns:
List[Currency]: Currency objects for `symbol`; filter by country_code.
### Response:
def by_symbol(symbol, country_code=None):
"""Get list of possible currencies for symbol; filter by country_code
Look for all currencies that use the `symbol`. If there are currencies used
in the country of `country_code`, return only those; otherwise return all
found currencies.
Parameters:
symbol: unicode Currency symbol.
country_code: Optional[unicode] Iso3166 alpha2 country code.
Returns:
List[Currency]: Currency objects for `symbol`; filter by country_code.
"""
res = _data()['symbol'].get(symbol)
if res:
tmp_res = []
for d in res:
if country_code in d.countries:
tmp_res += [d]
if tmp_res:
return tmp_res
if country_code is None:
return res |
def get_max_tail_check(y_Arai, y_tail, t_Arai, tail_temps, n_tail):
"""
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs
"""
if not n_tail:
return float('nan'), []
tail_compare = []
y_Arai_compare = []
for temp in tail_temps[:n_tail]:
tail_index = list(tail_temps).index(temp)
tail_check = y_tail[tail_index]
tail_compare.append(tail_check)
arai_index = list(t_Arai).index(temp)
nrm_orig = y_Arai[arai_index]
y_Arai_compare.append(nrm_orig)
diffs = numpy.array(y_Arai_compare) - numpy.array(tail_compare)
abs_diffs = abs(diffs)
max_check = max(abs_diffs)
return max_check, diffs | input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs | Below is the the instruction that describes the task:
### Input:
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs
### Response:
def get_max_tail_check(y_Arai, y_tail, t_Arai, tail_temps, n_tail):
"""
input: y_Arai, y_tail, t_Arai, tail_temps, n_tail
output: max_check, diffs
"""
if not n_tail:
return float('nan'), []
tail_compare = []
y_Arai_compare = []
for temp in tail_temps[:n_tail]:
tail_index = list(tail_temps).index(temp)
tail_check = y_tail[tail_index]
tail_compare.append(tail_check)
arai_index = list(t_Arai).index(temp)
nrm_orig = y_Arai[arai_index]
y_Arai_compare.append(nrm_orig)
diffs = numpy.array(y_Arai_compare) - numpy.array(tail_compare)
abs_diffs = abs(diffs)
max_check = max(abs_diffs)
return max_check, diffs |
def from_wif_file(path: str) -> SigningKeyType:
"""
Return SigningKey instance from Duniter WIF file
:param path: Path to WIF file
"""
with open(path, 'r') as fh:
wif_content = fh.read()
# check data field
regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE)
match = search(regex, wif_content)
if not match:
raise Exception('Error: Bad format WIF v1 file')
# capture hexa wif key
wif_hex = match.groups()[0]
return SigningKey.from_wif_hex(wif_hex) | Return SigningKey instance from Duniter WIF file
:param path: Path to WIF file | Below is the the instruction that describes the task:
### Input:
Return SigningKey instance from Duniter WIF file
:param path: Path to WIF file
### Response:
def from_wif_file(path: str) -> SigningKeyType:
"""
Return SigningKey instance from Duniter WIF file
:param path: Path to WIF file
"""
with open(path, 'r') as fh:
wif_content = fh.read()
# check data field
regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE)
match = search(regex, wif_content)
if not match:
raise Exception('Error: Bad format WIF v1 file')
# capture hexa wif key
wif_hex = match.groups()[0]
return SigningKey.from_wif_hex(wif_hex) |
def getFieldMax(self, fieldName):
"""
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``.
"""
stats = self.getStats()
if stats == None:
return None
maxValues = stats.get('max', None)
if maxValues == None:
return None
index = self.getFieldNames().index(fieldName)
return maxValues[index] | If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``. | Below is the the instruction that describes the task:
### Input:
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``.
### Response:
def getFieldMax(self, fieldName):
"""
If underlying implementation does not support min/max stats collection,
or if a field type does not support min/max (non scalars), the return
value will be None.
:param fieldName: (string) name of field to get max
:returns: current maximum value for the field ``fieldName``.
"""
stats = self.getStats()
if stats == None:
return None
maxValues = stats.get('max', None)
if maxValues == None:
return None
index = self.getFieldNames().index(fieldName)
return maxValues[index] |
def resolve_absolute_name(self, name):
'''
Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved
'''
current = self
while current.enclosing:
current = current.enclosing
if name != '/':
components = name.split('/')[1:]
for component in components:
current = current.get_field_by_name(component)
return current | Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved | Below is the the instruction that describes the task:
### Input:
Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved
### Response:
def resolve_absolute_name(self, name):
'''
Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved
'''
current = self
while current.enclosing:
current = current.enclosing
if name != '/':
components = name.split('/')[1:]
for component in components:
current = current.get_field_by_name(component)
return current |
def parse_message(self, tup_tree):
"""
::
<!ELEMENT MESSAGE (SIMPLEREQ | MULTIREQ | SIMPLERSP | MULTIRSP |
SIMPLEEXPREQ | MULTIEXPREQ | SIMPLEEXPRSP |
MULTIEXPRSP)
<!ATTLIST MESSAGE
ID CDATA #REQUIRED
PROTOCOLVERSION CDATA #REQUIRED>
"""
self.check_node(tup_tree, 'MESSAGE', ('ID', 'PROTOCOLVERSION'))
child = self.one_child(tup_tree,
('SIMPLEREQ', 'MULTIREQ', 'SIMPLERSP',
'MULTIRSP', 'SIMPLEEXPREQ', 'MULTIEXPREQ',
'SIMPLEEXPRSP', 'MULTIEXPRSP'))
return name(tup_tree), attrs(tup_tree), child | ::
<!ELEMENT MESSAGE (SIMPLEREQ | MULTIREQ | SIMPLERSP | MULTIRSP |
SIMPLEEXPREQ | MULTIEXPREQ | SIMPLEEXPRSP |
MULTIEXPRSP)
<!ATTLIST MESSAGE
ID CDATA #REQUIRED
PROTOCOLVERSION CDATA #REQUIRED> | Below is the the instruction that describes the task:
### Input:
::
<!ELEMENT MESSAGE (SIMPLEREQ | MULTIREQ | SIMPLERSP | MULTIRSP |
SIMPLEEXPREQ | MULTIEXPREQ | SIMPLEEXPRSP |
MULTIEXPRSP)
<!ATTLIST MESSAGE
ID CDATA #REQUIRED
PROTOCOLVERSION CDATA #REQUIRED>
### Response:
def parse_message(self, tup_tree):
"""
::
<!ELEMENT MESSAGE (SIMPLEREQ | MULTIREQ | SIMPLERSP | MULTIRSP |
SIMPLEEXPREQ | MULTIEXPREQ | SIMPLEEXPRSP |
MULTIEXPRSP)
<!ATTLIST MESSAGE
ID CDATA #REQUIRED
PROTOCOLVERSION CDATA #REQUIRED>
"""
self.check_node(tup_tree, 'MESSAGE', ('ID', 'PROTOCOLVERSION'))
child = self.one_child(tup_tree,
('SIMPLEREQ', 'MULTIREQ', 'SIMPLERSP',
'MULTIRSP', 'SIMPLEEXPREQ', 'MULTIEXPREQ',
'SIMPLEEXPRSP', 'MULTIEXPRSP'))
return name(tup_tree), attrs(tup_tree), child |
def save_shared_file(self, sharekey=None):
"""
Save a SharedFile to your Shake.
Args:
sharekey (str): Sharekey for the file to save.
Returns:
SharedFile saved to your shake.
"""
endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey)
data = self._make_request("POST", endpoint=endpoint, data=None)
try:
sf = SharedFile.NewFromJSON(data)
sf.saved = True
return sf
except:
raise Exception("{0}".format(data['error'])) | Save a SharedFile to your Shake.
Args:
sharekey (str): Sharekey for the file to save.
Returns:
SharedFile saved to your shake. | Below is the the instruction that describes the task:
### Input:
Save a SharedFile to your Shake.
Args:
sharekey (str): Sharekey for the file to save.
Returns:
SharedFile saved to your shake.
### Response:
def save_shared_file(self, sharekey=None):
"""
Save a SharedFile to your Shake.
Args:
sharekey (str): Sharekey for the file to save.
Returns:
SharedFile saved to your shake.
"""
endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey)
data = self._make_request("POST", endpoint=endpoint, data=None)
try:
sf = SharedFile.NewFromJSON(data)
sf.saved = True
return sf
except:
raise Exception("{0}".format(data['error'])) |
def check_encoder_decoder_args(args) -> None:
"""
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
"""
encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout
encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states
if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0:
check_condition(args.rnn_cell_type == C.LSTM_TYPE,
"Recurrent dropout without memory loss only supported for LSTMs right now.") | Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse. | Below is the the instruction that describes the task:
### Input:
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
### Response:
def check_encoder_decoder_args(args) -> None:
"""
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
"""
encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout
encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states
if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0:
check_condition(args.rnn_cell_type == C.LSTM_TYPE,
"Recurrent dropout without memory loss only supported for LSTMs right now.") |
def nunique(expr):
"""
The distinct count.
:param expr:
:return:
"""
output_type = types.int64
if isinstance(expr, SequenceExpr):
return NUnique(_value_type=output_type, _inputs=[expr])
elif isinstance(expr, SequenceGroupBy):
return GroupedNUnique(_data_type=output_type, _inputs=[expr.to_column()], _grouped=expr.input)
elif isinstance(expr, CollectionExpr):
unique_input = _extract_unique_input(expr)
if unique_input:
return nunique(unique_input)
else:
return NUnique(_value_type=types.int64, _inputs=expr._project_fields)
elif isinstance(expr, GroupBy):
if expr._to_agg:
inputs = expr.input[expr._to_agg.names]._project_fields
else:
inputs = expr.input._project_fields
return GroupedNUnique(_data_type=types.int64, _inputs=inputs,
_grouped=expr) | The distinct count.
:param expr:
:return: | Below is the the instruction that describes the task:
### Input:
The distinct count.
:param expr:
:return:
### Response:
def nunique(expr):
"""
The distinct count.
:param expr:
:return:
"""
output_type = types.int64
if isinstance(expr, SequenceExpr):
return NUnique(_value_type=output_type, _inputs=[expr])
elif isinstance(expr, SequenceGroupBy):
return GroupedNUnique(_data_type=output_type, _inputs=[expr.to_column()], _grouped=expr.input)
elif isinstance(expr, CollectionExpr):
unique_input = _extract_unique_input(expr)
if unique_input:
return nunique(unique_input)
else:
return NUnique(_value_type=types.int64, _inputs=expr._project_fields)
elif isinstance(expr, GroupBy):
if expr._to_agg:
inputs = expr.input[expr._to_agg.names]._project_fields
else:
inputs = expr.input._project_fields
return GroupedNUnique(_data_type=types.int64, _inputs=inputs,
_grouped=expr) |
def init(self, key_value_pairs):
"""Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore.
"""
for k, v in key_value_pairs.items():
if k not in DatastoreLegacy.store[self.domain]:
DatastoreLegacy.store[self.domain][k] = v | Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore. | Below is the the instruction that describes the task:
### Input:
Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore.
### Response:
def init(self, key_value_pairs):
"""Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore.
"""
for k, v in key_value_pairs.items():
if k not in DatastoreLegacy.store[self.domain]:
DatastoreLegacy.store[self.domain][k] = v |
def vamp_e_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None):
""" Computes the VAMP-E score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vampE : float
VAMP-E score
"""
# SVD of symmetrized operator in empirical distribution
U, s, V = _svd_sym_koopman(K, C00_train, Ctt_train)
if k is not None:
U = U[:, :k]
S = np.diag(s[:k])
V = V[:, :k]
score = np.trace(2.0 * mdot(V, S, U.T, C0t_test) - mdot(V, S, U.T, C00_test, U, S, V.T, Ctt_test))
return score | Computes the VAMP-E score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vampE : float
VAMP-E score | Below is the the instruction that describes the task:
### Input:
Computes the VAMP-E score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vampE : float
VAMP-E score
### Response:
def vamp_e_score(K, C00_train, C0t_train, Ctt_train, C00_test, C0t_test, Ctt_test, k=None):
""" Computes the VAMP-E score of a kinetic model.
Ranks the kinetic model described by the estimation of covariances C00, C0t and Ctt,
defined by:
:math:`C_{0t}^{train} = E_t[x_t x_{t+\tau}^T]`
:math:`C_{tt}^{train} = E_t[x_{t+\tau} x_{t+\tau}^T]`
These model covariances might have been subject to symmetrization or reweighting,
depending on the type of model used.
The covariances C00, C0t and Ctt of the test data are direct empirical estimates.
singular vectors U and V using the test data
with covariances C00, C0t, Ctt. U and V should come from the SVD of the symmetrized
transition matrix or Koopman matrix:
:math:`(C00^{train})^{-(1/2)} C0t^{train} (Ctt^{train})^{-(1/2)} = U S V.T`
Parameters:
-----------
K : ndarray(n, k)
left singular vectors of the symmetrized transition matrix or Koopman matrix
C00_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{00}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_train : ndarray(n, n)
time-lagged covariance matrix of the training data, defined by
:math:`C_{0t}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_train : ndarray(n, n)
covariance matrix of the training data, defined by
:math:`C_{tt}^{train} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
C00_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{00}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_t^T`
C0t_test : ndarray(n, n)
time-lagged covariance matrix of the test data, defined by
:math:`C_{0t}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_t x_{t+\tau}^T`
Ctt_test : ndarray(n, n)
covariance matrix of the test data, defined by
:math:`C_{tt}^{test} = (T-\tau)^{-1} \sum_{t=0}^{T-\tau} x_{t+\tau} x_{t+\tau}^T`
k : int
number of slow processes to consider in the score
Returns:
--------
vampE : float
VAMP-E score
"""
# SVD of symmetrized operator in empirical distribution
U, s, V = _svd_sym_koopman(K, C00_train, Ctt_train)
if k is not None:
U = U[:, :k]
S = np.diag(s[:k])
V = V[:, :k]
score = np.trace(2.0 * mdot(V, S, U.T, C0t_test) - mdot(V, S, U.T, C00_test, U, S, V.T, Ctt_test))
return score |
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc):
"""
Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle.
"""
def PotentialFunc(xdata, Radius):
"""
calculates the potential given the position (in volts)
and the radius of the particle.
Parameters
----------
xdata : ndarray
Positon data (in volts)
Radius : float
Radius in units of nm
Returns
-------
Potential : ndarray
Dynamical Spring Potential at positions given by xdata
"""
mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density
yfit=(kBT_Gamma/mass)
Y = yfit*SpringPotnlFunc(xdata)
return Y
return PotentialFunc | Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle. | Below is the the instruction that describes the task:
### Input:
Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle.
### Response:
def MakeDynamicPotentialFunc(kBT_Gamma, density, SpringPotnlFunc):
"""
Creates the function that calculates the potential given
the position (in volts) and the radius of the particle.
Parameters
----------
kBT_Gamma : float
Value of kB*T/Gamma
density : float
density of the nanoparticle
SpringPotnlFunc : function
Function which takes the value of position (in volts)
and returns the spring potential
Returns
-------
PotentialFunc : function
function that calculates the potential given
the position (in volts) and the radius of the
particle.
"""
def PotentialFunc(xdata, Radius):
"""
calculates the potential given the position (in volts)
and the radius of the particle.
Parameters
----------
xdata : ndarray
Positon data (in volts)
Radius : float
Radius in units of nm
Returns
-------
Potential : ndarray
Dynamical Spring Potential at positions given by xdata
"""
mass = ((4/3)*np.pi*((Radius*10**-9)**3))*density
yfit=(kBT_Gamma/mass)
Y = yfit*SpringPotnlFunc(xdata)
return Y
return PotentialFunc |
def step_undefined_step_snippets_should_exist_for_table(context):
"""
Checks if undefined-step snippets are provided.
EXAMPLE:
Then undefined-step snippets should exist for:
| Step |
| When an undefined step is used |
| Then another undefined step is used |
"""
assert context.table, "REQUIRES: table"
for row in context.table.rows:
step = row["Step"]
step_undefined_step_snippet_should_exist_for(context, step) | Checks if undefined-step snippets are provided.
EXAMPLE:
Then undefined-step snippets should exist for:
| Step |
| When an undefined step is used |
| Then another undefined step is used | | Below is the the instruction that describes the task:
### Input:
Checks if undefined-step snippets are provided.
EXAMPLE:
Then undefined-step snippets should exist for:
| Step |
| When an undefined step is used |
| Then another undefined step is used |
### Response:
def step_undefined_step_snippets_should_exist_for_table(context):
"""
Checks if undefined-step snippets are provided.
EXAMPLE:
Then undefined-step snippets should exist for:
| Step |
| When an undefined step is used |
| Then another undefined step is used |
"""
assert context.table, "REQUIRES: table"
for row in context.table.rows:
step = row["Step"]
step_undefined_step_snippet_should_exist_for(context, step) |
def do_interact(self, arg):
"""
interact
Start an interative interpreter whose global namespace
contains all the names found in the current scope.
"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact("*interactive*", local=ns) | interact
Start an interative interpreter whose global namespace
contains all the names found in the current scope. | Below is the the instruction that describes the task:
### Input:
interact
Start an interative interpreter whose global namespace
contains all the names found in the current scope.
### Response:
def do_interact(self, arg):
"""
interact
Start an interative interpreter whose global namespace
contains all the names found in the current scope.
"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact("*interactive*", local=ns) |
def get_content_item_inlines(plugins=None, base=BaseContentItemInline):
"""
Dynamically generate genuine django inlines for all registered content item types.
When the `plugins` parameter is ``None``, all plugin inlines are returned.
"""
COPY_FIELDS = (
'form', 'raw_id_fields', 'filter_vertical', 'filter_horizontal',
'radio_fields', 'prepopulated_fields', 'formfield_overrides', 'readonly_fields',
)
if plugins is None:
plugins = extensions.plugin_pool.get_plugins()
inlines = []
for plugin in plugins: # self.model._supported_...()
# Avoid errors that are hard to trace
if not isinstance(plugin, extensions.ContentPlugin):
raise TypeError("get_content_item_inlines() expects to receive ContentPlugin instances, not {0}".format(plugin))
ContentItemType = plugin.model
# Create a new Type that inherits CmsPageItemInline
# Read the static fields of the ItemType to override default appearance.
# This code is based on FeinCMS, (c) Simon Meers, BSD licensed
class_name = '%s_AutoInline' % ContentItemType.__name__
attrs = {
'__module__': plugin.__class__.__module__,
'model': ContentItemType,
# Add metadata properties for template
'name': plugin.verbose_name,
'plugin': plugin,
'type_name': plugin.type_name,
'extra_fieldsets': plugin.fieldsets,
'cp_admin_form_template': plugin.admin_form_template,
'cp_admin_init_template': plugin.admin_init_template,
}
# Copy a restricted set of admin fields to the inline model too.
for name in COPY_FIELDS:
if getattr(plugin, name):
attrs[name] = getattr(plugin, name)
inlines.append(type(class_name, (base,), attrs))
# For consistency, enforce ordering
inlines.sort(key=lambda inline: inline.name.lower())
return inlines | Dynamically generate genuine django inlines for all registered content item types.
When the `plugins` parameter is ``None``, all plugin inlines are returned. | Below is the the instruction that describes the task:
### Input:
Dynamically generate genuine django inlines for all registered content item types.
When the `plugins` parameter is ``None``, all plugin inlines are returned.
### Response:
def get_content_item_inlines(plugins=None, base=BaseContentItemInline):
"""
Dynamically generate genuine django inlines for all registered content item types.
When the `plugins` parameter is ``None``, all plugin inlines are returned.
"""
COPY_FIELDS = (
'form', 'raw_id_fields', 'filter_vertical', 'filter_horizontal',
'radio_fields', 'prepopulated_fields', 'formfield_overrides', 'readonly_fields',
)
if plugins is None:
plugins = extensions.plugin_pool.get_plugins()
inlines = []
for plugin in plugins: # self.model._supported_...()
# Avoid errors that are hard to trace
if not isinstance(plugin, extensions.ContentPlugin):
raise TypeError("get_content_item_inlines() expects to receive ContentPlugin instances, not {0}".format(plugin))
ContentItemType = plugin.model
# Create a new Type that inherits CmsPageItemInline
# Read the static fields of the ItemType to override default appearance.
# This code is based on FeinCMS, (c) Simon Meers, BSD licensed
class_name = '%s_AutoInline' % ContentItemType.__name__
attrs = {
'__module__': plugin.__class__.__module__,
'model': ContentItemType,
# Add metadata properties for template
'name': plugin.verbose_name,
'plugin': plugin,
'type_name': plugin.type_name,
'extra_fieldsets': plugin.fieldsets,
'cp_admin_form_template': plugin.admin_form_template,
'cp_admin_init_template': plugin.admin_init_template,
}
# Copy a restricted set of admin fields to the inline model too.
for name in COPY_FIELDS:
if getattr(plugin, name):
attrs[name] = getattr(plugin, name)
inlines.append(type(class_name, (base,), attrs))
# For consistency, enforce ordering
inlines.sort(key=lambda inline: inline.name.lower())
return inlines |
def _item_to_blob(iterator, item):
"""Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.Blob`
:returns: The next blob in the page.
"""
name = item.get("name")
blob = Blob(name, bucket=iterator.bucket)
blob._set_properties(item)
return blob | Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.Blob`
:returns: The next blob in the page. | Below is the the instruction that describes the task:
### Input:
Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.Blob`
:returns: The next blob in the page.
### Response:
def _item_to_blob(iterator, item):
"""Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.Blob`
:returns: The next blob in the page.
"""
name = item.get("name")
blob = Blob(name, bucket=iterator.bucket)
blob._set_properties(item)
return blob |
def create_scans(urls_file):
"""
This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run
"""
cli_logger.debug('Starting to process batch input file')
created_scans = []
for line in urls_file:
line = line.strip()
if line.startswith('#'):
continue
if not line:
continue
try:
protocol, domain, port, path = parse_url(line)
except ValueError, ve:
cli_logger.debug(str(ve))
continue
for scan in created_scans:
if scan.matches(protocol, domain, port):
scan.add_path(path)
args = (path, scan.get_root_url())
cli_logger.debug('Added %s to %s' % args)
break
else:
scan = BatchScan(protocol, domain, port, path)
created_scans.append(scan)
cli_logger.debug('Added a new scan to %s' % scan.get_root_url())
cli_logger.debug('Created a total of %s scans' % len(created_scans))
return created_scans | This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run | Below is the the instruction that describes the task:
### Input:
This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run
### Response:
def create_scans(urls_file):
"""
This method is rather simple, it will group the urls to be scanner together
based on (protocol, domain and port).
:param urls_file: The filename with all the URLs
:return: A list of scans to be run
"""
cli_logger.debug('Starting to process batch input file')
created_scans = []
for line in urls_file:
line = line.strip()
if line.startswith('#'):
continue
if not line:
continue
try:
protocol, domain, port, path = parse_url(line)
except ValueError, ve:
cli_logger.debug(str(ve))
continue
for scan in created_scans:
if scan.matches(protocol, domain, port):
scan.add_path(path)
args = (path, scan.get_root_url())
cli_logger.debug('Added %s to %s' % args)
break
else:
scan = BatchScan(protocol, domain, port, path)
created_scans.append(scan)
cli_logger.debug('Added a new scan to %s' % scan.get_root_url())
cli_logger.debug('Created a total of %s scans' % len(created_scans))
return created_scans |
def _notify_delete(self, index_or_slice):
"""Notify about a deletion at an index_or_slice.
:return: a function that notifies about an add at the same place.
"""
if isinstance(index_or_slice, int):
length = len(self)
if -length <= index_or_slice < length:
self._notify_remove_at(index_or_slice)
return lambda: self._notify_add_at(index_or_slice)
elif isinstance(index_or_slice, slice):
slice_ = slice(*index_or_slice.indices(len(self)))
self._notify_remove(slice_)
return lambda: self._notify_add(index_or_slice) | Notify about a deletion at an index_or_slice.
:return: a function that notifies about an add at the same place. | Below is the the instruction that describes the task:
### Input:
Notify about a deletion at an index_or_slice.
:return: a function that notifies about an add at the same place.
### Response:
def _notify_delete(self, index_or_slice):
"""Notify about a deletion at an index_or_slice.
:return: a function that notifies about an add at the same place.
"""
if isinstance(index_or_slice, int):
length = len(self)
if -length <= index_or_slice < length:
self._notify_remove_at(index_or_slice)
return lambda: self._notify_add_at(index_or_slice)
elif isinstance(index_or_slice, slice):
slice_ = slice(*index_or_slice.indices(len(self)))
self._notify_remove(slice_)
return lambda: self._notify_add(index_or_slice) |
def notify_badge_added_certified(sender, kind=''):
'''
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
'''
if kind == CERTIFIED and isinstance(sender, Organization):
recipients = [member.user for member in sender.members]
subject = _(
'Your organization "%(name)s" has been certified',
name=sender.name
)
mail.send(
subject,
recipients,
'badge_added_certified',
organization=sender,
badge=sender.get_badge(kind)
) | Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded. | Below is the the instruction that describes the task:
### Input:
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
### Response:
def notify_badge_added_certified(sender, kind=''):
'''
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
'''
if kind == CERTIFIED and isinstance(sender, Organization):
recipients = [member.user for member in sender.members]
subject = _(
'Your organization "%(name)s" has been certified',
name=sender.name
)
mail.send(
subject,
recipients,
'badge_added_certified',
organization=sender,
badge=sender.get_badge(kind)
) |
def _ensure_core_connections(self):
"""
If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.
"""
for session in tuple(self.sessions):
for pool in tuple(session._pools.values()):
pool.ensure_core_connections() | If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met. | Below is the the instruction that describes the task:
### Input:
If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.
### Response:
def _ensure_core_connections(self):
"""
If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.
"""
for session in tuple(self.sessions):
for pool in tuple(session._pools.values()):
pool.ensure_core_connections() |
def paint( self, painter, option, widget ):
"""
Paints this item.
:param painter | <QPainter>
option | <QGraphicsOption>
widget | <QWidget>
"""
painter.save()
pen = QPen(self.color())
pen.setWidth(2)
painter.setPen(pen)
painter.drawPath(self._basePath)
painter.setRenderHint(painter.Antialiasing)
pen.setColor(QColor('white'))
painter.setPen(pen)
painter.setBrush(self.color())
for ellipse in self._ellipses:
painter.drawEllipse(ellipse, 6, 6)
painter.restore() | Paints this item.
:param painter | <QPainter>
option | <QGraphicsOption>
widget | <QWidget> | Below is the the instruction that describes the task:
### Input:
Paints this item.
:param painter | <QPainter>
option | <QGraphicsOption>
widget | <QWidget>
### Response:
def paint( self, painter, option, widget ):
"""
Paints this item.
:param painter | <QPainter>
option | <QGraphicsOption>
widget | <QWidget>
"""
painter.save()
pen = QPen(self.color())
pen.setWidth(2)
painter.setPen(pen)
painter.drawPath(self._basePath)
painter.setRenderHint(painter.Antialiasing)
pen.setColor(QColor('white'))
painter.setPen(pen)
painter.setBrush(self.color())
for ellipse in self._ellipses:
painter.drawEllipse(ellipse, 6, 6)
painter.restore() |
def _access_user_info(self):
"""
Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse`
"""
url = self.user_info_url.format(**self.user.__dict__)
return self.access(url) | Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse` | Below is the the instruction that describes the task:
### Input:
Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse`
### Response:
def _access_user_info(self):
"""
Accesses the :attr:`.user_info_url`.
:returns:
:class:`.UserInfoResponse`
"""
url = self.user_info_url.format(**self.user.__dict__)
return self.access(url) |
def FindDevice(self, address):
'''Find a specific device by bluetooth address.
'''
for obj in mockobject.objects.keys():
if obj.startswith('/org/bluez/') and 'dev_' in obj:
o = mockobject.objects[obj]
if o.props[DEVICE_IFACE]['Address'] \
== dbus.String(address, variant_level=1):
return obj
raise dbus.exceptions.DBusException('No such device.',
name='org.bluez.Error.NoSuchDevice') | Find a specific device by bluetooth address. | Below is the the instruction that describes the task:
### Input:
Find a specific device by bluetooth address.
### Response:
def FindDevice(self, address):
'''Find a specific device by bluetooth address.
'''
for obj in mockobject.objects.keys():
if obj.startswith('/org/bluez/') and 'dev_' in obj:
o = mockobject.objects[obj]
if o.props[DEVICE_IFACE]['Address'] \
== dbus.String(address, variant_level=1):
return obj
raise dbus.exceptions.DBusException('No such device.',
name='org.bluez.Error.NoSuchDevice') |
def get(self, identity):
"""
Constructs a EntityContext
:param identity: Unique identity of the Entity
:returns: twilio.rest.authy.v1.service.entity.EntityContext
:rtype: twilio.rest.authy.v1.service.entity.EntityContext
"""
return EntityContext(self._version, service_sid=self._solution['service_sid'], identity=identity, ) | Constructs a EntityContext
:param identity: Unique identity of the Entity
:returns: twilio.rest.authy.v1.service.entity.EntityContext
:rtype: twilio.rest.authy.v1.service.entity.EntityContext | Below is the the instruction that describes the task:
### Input:
Constructs a EntityContext
:param identity: Unique identity of the Entity
:returns: twilio.rest.authy.v1.service.entity.EntityContext
:rtype: twilio.rest.authy.v1.service.entity.EntityContext
### Response:
def get(self, identity):
"""
Constructs a EntityContext
:param identity: Unique identity of the Entity
:returns: twilio.rest.authy.v1.service.entity.EntityContext
:rtype: twilio.rest.authy.v1.service.entity.EntityContext
"""
return EntityContext(self._version, service_sid=self._solution['service_sid'], identity=identity, ) |
def query_for_observations(mjd, observable, runid_list):
"""Do a QUERY on the TAP service for all observations that are part of runid,
where taken after mjd and have calibration 'observable'.
Schema is at: http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/tables
mjd : float
observable: str ( 2 or 1 )
runid: tuple eg. ('13AP05', '13AP06')
"""
data = {"QUERY": ("SELECT Observation.target_name as TargetName, "
"COORD1(CENTROID(Plane.position_bounds)) AS RA,"
"COORD2(CENTROID(Plane.position_bounds)) AS DEC, "
"Plane.time_bounds_lower AS StartDate, "
"Plane.time_exposure AS ExposureTime, "
"Observation.instrument_name AS Instrument, "
"Plane.energy_bandpassName AS Filter, "
"Observation.observationID AS dataset_name, "
"Observation.proposal_id AS ProposalID, "
"Observation.proposal_pi AS PI "
"FROM caom2.Observation AS Observation "
"JOIN caom2.Plane AS Plane ON "
"Observation.obsID = Plane.obsID "
"WHERE ( Observation.collection = 'CFHT' ) "
"AND Plane.time_bounds_lower > %d "
"AND Plane.calibrationLevel=%s "
"AND Observation.proposal_id IN %s ") % (mjd, observable, str(runid_list)),
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
result = requests.get(storage.TAP_WEB_SERVICE, params=data, verify=False)
assert isinstance(result, requests.Response)
logging.debug("Doing TAP Query using url: %s" % (str(result.url)))
temp_file = tempfile.NamedTemporaryFile()
with open(temp_file.name, 'w') as outfile:
outfile.write(result.text)
try:
vot = parse(temp_file.name).get_first_table()
except Exception as ex:
logging.error(str(ex))
logging.error(result.text)
raise ex
vot.array.sort(order='StartDate')
t = vot.array
temp_file.close()
logging.debug("Got {} lines from tap query".format(len(t)))
return t | Do a QUERY on the TAP service for all observations that are part of runid,
where taken after mjd and have calibration 'observable'.
Schema is at: http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/tables
mjd : float
observable: str ( 2 or 1 )
runid: tuple eg. ('13AP05', '13AP06') | Below is the the instruction that describes the task:
### Input:
Do a QUERY on the TAP service for all observations that are part of runid,
where taken after mjd and have calibration 'observable'.
Schema is at: http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/tables
mjd : float
observable: str ( 2 or 1 )
runid: tuple eg. ('13AP05', '13AP06')
### Response:
def query_for_observations(mjd, observable, runid_list):
"""Do a QUERY on the TAP service for all observations that are part of runid,
where taken after mjd and have calibration 'observable'.
Schema is at: http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/tables
mjd : float
observable: str ( 2 or 1 )
runid: tuple eg. ('13AP05', '13AP06')
"""
data = {"QUERY": ("SELECT Observation.target_name as TargetName, "
"COORD1(CENTROID(Plane.position_bounds)) AS RA,"
"COORD2(CENTROID(Plane.position_bounds)) AS DEC, "
"Plane.time_bounds_lower AS StartDate, "
"Plane.time_exposure AS ExposureTime, "
"Observation.instrument_name AS Instrument, "
"Plane.energy_bandpassName AS Filter, "
"Observation.observationID AS dataset_name, "
"Observation.proposal_id AS ProposalID, "
"Observation.proposal_pi AS PI "
"FROM caom2.Observation AS Observation "
"JOIN caom2.Plane AS Plane ON "
"Observation.obsID = Plane.obsID "
"WHERE ( Observation.collection = 'CFHT' ) "
"AND Plane.time_bounds_lower > %d "
"AND Plane.calibrationLevel=%s "
"AND Observation.proposal_id IN %s ") % (mjd, observable, str(runid_list)),
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
result = requests.get(storage.TAP_WEB_SERVICE, params=data, verify=False)
assert isinstance(result, requests.Response)
logging.debug("Doing TAP Query using url: %s" % (str(result.url)))
temp_file = tempfile.NamedTemporaryFile()
with open(temp_file.name, 'w') as outfile:
outfile.write(result.text)
try:
vot = parse(temp_file.name).get_first_table()
except Exception as ex:
logging.error(str(ex))
logging.error(result.text)
raise ex
vot.array.sort(order='StartDate')
t = vot.array
temp_file.close()
logging.debug("Got {} lines from tap query".format(len(t)))
return t |
def delete(self, symbol):
"""
Deletes a Symbol.
Parameters
----------
symbol : str or Symbol
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
elif isinstance(symbol, Symbol):
sym = symbol
else:
raise Exception("Invalid symbol {}".format((repr(symbol))))
# Has to handle the case where the table would exist already
# and where it wouldn't.
try:
sym.datatable = Table(sym.name, Base.metadata, autoload=True)
sym.datatable.drop(self.eng, checkfirst=True)
except NoSuchTableError:
print "No worries, {} never existed to begin with.".format(sym.name)
self.ses.delete(sym)
self.ses.commit() | Deletes a Symbol.
Parameters
----------
symbol : str or Symbol | Below is the the instruction that describes the task:
### Input:
Deletes a Symbol.
Parameters
----------
symbol : str or Symbol
### Response:
def delete(self, symbol):
"""
Deletes a Symbol.
Parameters
----------
symbol : str or Symbol
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
elif isinstance(symbol, Symbol):
sym = symbol
else:
raise Exception("Invalid symbol {}".format((repr(symbol))))
# Has to handle the case where the table would exist already
# and where it wouldn't.
try:
sym.datatable = Table(sym.name, Base.metadata, autoload=True)
sym.datatable.drop(self.eng, checkfirst=True)
except NoSuchTableError:
print "No worries, {} never existed to begin with.".format(sym.name)
self.ses.delete(sym)
self.ses.commit() |
def wait_stop(self):
"""
Stop the stream and wait for it to stop.
See :meth:`stop` for the general stopping conditions. You can assume
that :meth:`stop` is the first thing this coroutine calls.
"""
if not self.running:
return
self.stop()
try:
yield from self._task
except asyncio.CancelledError:
pass | Stop the stream and wait for it to stop.
See :meth:`stop` for the general stopping conditions. You can assume
that :meth:`stop` is the first thing this coroutine calls. | Below is the the instruction that describes the task:
### Input:
Stop the stream and wait for it to stop.
See :meth:`stop` for the general stopping conditions. You can assume
that :meth:`stop` is the first thing this coroutine calls.
### Response:
def wait_stop(self):
"""
Stop the stream and wait for it to stop.
See :meth:`stop` for the general stopping conditions. You can assume
that :meth:`stop` is the first thing this coroutine calls.
"""
if not self.running:
return
self.stop()
try:
yield from self._task
except asyncio.CancelledError:
pass |
def logger(self):
""":class:`logging.Logger` of this instance"""
if not self.is_main:
return self.main.logger
try:
return self._logger
except AttributeError:
name = '%s.%s.%s' % (self.__module__, self.__class__.__name__,
self.num)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger | :class:`logging.Logger` of this instance | Below is the the instruction that describes the task:
### Input:
:class:`logging.Logger` of this instance
### Response:
def logger(self):
""":class:`logging.Logger` of this instance"""
if not self.is_main:
return self.main.logger
try:
return self._logger
except AttributeError:
name = '%s.%s.%s' % (self.__module__, self.__class__.__name__,
self.num)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger |
def get_tpm_status(d_info):
"""Get the TPM support status.
Get the TPM support status of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:returns: TPM support status
"""
# note:
# Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0'
#
# $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0
#
# Raw response:
# 80 28 00 C0 C0: True
# 80 28 00 -- --: False (other values than 'C0 C0')
ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'],
userid=d_info['irmc_username'],
password=d_info['irmc_password'])
try:
response = _send_raw_command(ipmicmd, GET_TPM_STATUS)
if response['code'] != 0:
raise IPMIFailure(
"IPMI operation '%(operation)s' failed: %(error)s" %
{'operation': "GET TMP status",
'error': response.get('error')})
out = ' '.join('{:02X}'.format(x) for x in response['data'])
return out is not None and out[-5:] == 'C0 C0'
except ipmi_exception.IpmiException as e:
raise IPMIFailure(
"IPMI operation '%(operation)s' failed: %(error)s" %
{'operation': "GET TMP status", 'error': e}) | Get the TPM support status.
Get the TPM support status of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:returns: TPM support status | Below is the the instruction that describes the task:
### Input:
Get the TPM support status.
Get the TPM support status of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:returns: TPM support status
### Response:
def get_tpm_status(d_info):
"""Get the TPM support status.
Get the TPM support status of the node.
:param d_info: the list of ipmitool parameters for accessing a node.
:returns: TPM support status
"""
# note:
# Get TPM support status : ipmi cmd '0xF5', valid flags '0xC0'
#
# $ ipmitool raw 0x2E 0xF5 0x80 0x28 0x00 0x81 0xC0
#
# Raw response:
# 80 28 00 C0 C0: True
# 80 28 00 -- --: False (other values than 'C0 C0')
ipmicmd = ipmi_command.Command(bmc=d_info['irmc_address'],
userid=d_info['irmc_username'],
password=d_info['irmc_password'])
try:
response = _send_raw_command(ipmicmd, GET_TPM_STATUS)
if response['code'] != 0:
raise IPMIFailure(
"IPMI operation '%(operation)s' failed: %(error)s" %
{'operation': "GET TMP status",
'error': response.get('error')})
out = ' '.join('{:02X}'.format(x) for x in response['data'])
return out is not None and out[-5:] == 'C0 C0'
except ipmi_exception.IpmiException as e:
raise IPMIFailure(
"IPMI operation '%(operation)s' failed: %(error)s" %
{'operation': "GET TMP status", 'error': e}) |
def calc_db(peak, refval, mphonecaldb=0):
u"""
Converts voltage difference into decibels : 20*log10(peak/refval)
:param peak: amplitude
:type peak: float or np.array
:param refval: This can be either a another sound peak(or RMS val), to get the dB difference, or the microphone mphone_sensitivity
:type refval: float
:param mphonecaldb: If using the microphone sensitivity for refval, provide the dB SPL the microphone was calibrated at. Otherwise, leave as 0
:type mphonecaldb: int
:returns: float -- decibels difference (comparision), or dB SPL (using microphone sensitivity)
"""
if refval == 0:
return np.nan
if hasattr(peak, '__iter__'):
peak[peak == 0] = np.nan
pbdB = mphonecaldb + (20. * np.log10(peak / refval))
return pbdB | u"""
Converts voltage difference into decibels : 20*log10(peak/refval)
:param peak: amplitude
:type peak: float or np.array
:param refval: This can be either a another sound peak(or RMS val), to get the dB difference, or the microphone mphone_sensitivity
:type refval: float
:param mphonecaldb: If using the microphone sensitivity for refval, provide the dB SPL the microphone was calibrated at. Otherwise, leave as 0
:type mphonecaldb: int
:returns: float -- decibels difference (comparision), or dB SPL (using microphone sensitivity) | Below is the the instruction that describes the task:
### Input:
u"""
Converts voltage difference into decibels : 20*log10(peak/refval)
:param peak: amplitude
:type peak: float or np.array
:param refval: This can be either a another sound peak(or RMS val), to get the dB difference, or the microphone mphone_sensitivity
:type refval: float
:param mphonecaldb: If using the microphone sensitivity for refval, provide the dB SPL the microphone was calibrated at. Otherwise, leave as 0
:type mphonecaldb: int
:returns: float -- decibels difference (comparision), or dB SPL (using microphone sensitivity)
### Response:
def calc_db(peak, refval, mphonecaldb=0):
u"""
Converts voltage difference into decibels : 20*log10(peak/refval)
:param peak: amplitude
:type peak: float or np.array
:param refval: This can be either a another sound peak(or RMS val), to get the dB difference, or the microphone mphone_sensitivity
:type refval: float
:param mphonecaldb: If using the microphone sensitivity for refval, provide the dB SPL the microphone was calibrated at. Otherwise, leave as 0
:type mphonecaldb: int
:returns: float -- decibels difference (comparision), or dB SPL (using microphone sensitivity)
"""
if refval == 0:
return np.nan
if hasattr(peak, '__iter__'):
peak[peak == 0] = np.nan
pbdB = mphonecaldb + (20. * np.log10(peak / refval))
return pbdB |
def get_dimension_array(array):
"""
Get dimension of an array getting the number of rows and the max num of
columns.
"""
if all(isinstance(el, list) for el in array):
result = [len(array), len(max([x for x in array], key=len,))]
# elif array and isinstance(array, list):
else:
result = [len(array), 1]
return result | Get dimension of an array getting the number of rows and the max num of
columns. | Below is the the instruction that describes the task:
### Input:
Get dimension of an array getting the number of rows and the max num of
columns.
### Response:
def get_dimension_array(array):
"""
Get dimension of an array getting the number of rows and the max num of
columns.
"""
if all(isinstance(el, list) for el in array):
result = [len(array), len(max([x for x in array], key=len,))]
# elif array and isinstance(array, list):
else:
result = [len(array), 1]
return result |
def get_property_func(key):
"""
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
"""
def get_it(obj):
try:
return getattr(obj, key)
except AttributeError:
return obj.tags.get(key)
return get_it | Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag. | Below is the the instruction that describes the task:
### Input:
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
### Response:
def get_property_func(key):
"""
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
"""
def get_it(obj):
try:
return getattr(obj, key)
except AttributeError:
return obj.tags.get(key)
return get_it |
def get_data(self, file_id):
"""
Acquires the data from the table identified by the id.
The file is read only once, consecutive calls to this method will
return the sale collection.
:param file_id: identifier for the table
:return: all the values from the table
"""
if file_id not in self._file_values:
file_contents = 'cwr_%s.csv' % file_id
self._file_values[file_id] = self._reader.read_csv_file(
file_contents)
return self._file_values[file_id] | Acquires the data from the table identified by the id.
The file is read only once, consecutive calls to this method will
return the sale collection.
:param file_id: identifier for the table
:return: all the values from the table | Below is the the instruction that describes the task:
### Input:
Acquires the data from the table identified by the id.
The file is read only once, consecutive calls to this method will
return the sale collection.
:param file_id: identifier for the table
:return: all the values from the table
### Response:
def get_data(self, file_id):
"""
Acquires the data from the table identified by the id.
The file is read only once, consecutive calls to this method will
return the sale collection.
:param file_id: identifier for the table
:return: all the values from the table
"""
if file_id not in self._file_values:
file_contents = 'cwr_%s.csv' % file_id
self._file_values[file_id] = self._reader.read_csv_file(
file_contents)
return self._file_values[file_id] |
def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
else:
slc_stop = start + 1
if not stop and stop != 0:
slc_start = 0
elif stop < 0:
slc_start = self.length() + stop
else:
slc_start = stop
return slice(slc_start, slc_stop, slc_step) | Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. | Below is the the instruction that describes the task:
### Input:
Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
### Response:
def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
else:
slc_stop = start + 1
if not stop and stop != 0:
slc_start = 0
elif stop < 0:
slc_start = self.length() + stop
else:
slc_start = stop
return slice(slc_start, slc_stop, slc_step) |
def prompt_user_to_select_link(self, links):
"""
Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected.
"""
link_pages = self.get_link_pages(links)
n = 0
while n in range(len(link_pages)):
link_page = link_pages[n]
text = 'Select a link to open (page {} of {}):\n\n'
text = text.format(n+1, len(link_pages))
text += self.get_link_page_text(link_page)
if link_page is not link_pages[-1]:
text += '[j] next page...'
if link_page is not link_pages[0]:
if link_page is not link_pages[-1]:
text += '\n'
text += '[k] ...previous page'
try:
choice = chr(self.show_notification(text))
try:
choice = int(choice)
except ValueError:
pass
except ValueError:
return None
if choice == 'j':
if link_page is not link_pages[-1]:
n += 1
continue
elif choice == 'k':
if link_page is not link_pages[0]:
n -= 1
continue
elif choice not in range(len(link_page)):
return None
return link_page[choice]['href'] | Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected. | Below is the the instruction that describes the task:
### Input:
Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected.
### Response:
def prompt_user_to_select_link(self, links):
"""
Prompt the user to select a link from a list to open.
Return the link that was selected, or ``None`` if no link was selected.
"""
link_pages = self.get_link_pages(links)
n = 0
while n in range(len(link_pages)):
link_page = link_pages[n]
text = 'Select a link to open (page {} of {}):\n\n'
text = text.format(n+1, len(link_pages))
text += self.get_link_page_text(link_page)
if link_page is not link_pages[-1]:
text += '[j] next page...'
if link_page is not link_pages[0]:
if link_page is not link_pages[-1]:
text += '\n'
text += '[k] ...previous page'
try:
choice = chr(self.show_notification(text))
try:
choice = int(choice)
except ValueError:
pass
except ValueError:
return None
if choice == 'j':
if link_page is not link_pages[-1]:
n += 1
continue
elif choice == 'k':
if link_page is not link_pages[0]:
n -= 1
continue
elif choice not in range(len(link_page)):
return None
return link_page[choice]['href'] |
def track_request(self, name: str, url: str, success: bool, start_time: str=None,
duration: int=None, response_code: str =None, http_method: str=None,
properties: Dict[str, object]=None, measurements: Dict[str, object]=None,
request_id: str=None):
"""
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
raise NotImplementedError('BotTelemetryClient.track_request(): is not implemented.') | Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None) | Below is the the instruction that describes the task:
### Input:
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
### Response:
def track_request(self, name: str, url: str, success: bool, start_time: str=None,
duration: int=None, response_code: str =None, http_method: str=None,
properties: Dict[str, object]=None, measurements: Dict[str, object]=None,
request_id: str=None):
"""
Sends a single request that was captured for the application.
:param name: The name for this request. All requests with the same name will be grouped together.
:param url: The actual URL for this request (to show in individual request instances).
:param success: True if the request ended in success, False otherwise.
:param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
:param duration: the number of milliseconds that this request lasted. (defaults to: None)
:param response_code: the response code that this request returned. (defaults to: None)
:param http_method: the HTTP method that triggered this request. (defaults to: None)
:param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
:param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
:param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
"""
raise NotImplementedError('BotTelemetryClient.track_request(): is not implemented.') |
def state():
'''Get The playback state: 'playing', 'paused', or 'stopped'.
If PLAYING or PAUSED, show information on current track.
Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get
PlaybackController.get_current_track() and
PlaybackController.get_time_position()'''
server = getServer()
state = server.core.playback.get_state()
logging.debug('Got playback state: %r', state)
if state.upper() == 'STOPPED':
print('Playback is currently stopped')
else:
track = server.core.playback.get_current_track()
logging.debug('Track is %r', track)
logging.debug('Track loaded is %r', jsonrpclib.jsonclass.load(track))
pos = server.core.playback.get_time_position()
logging.debug('Pos is %r', pos)
print('{} track: "{}", by {} (at {})'.format(state.title(),
track['name'],
','.join([a['name'] for a in track['artists']]),
formatTimeposition(pos))
) | Get The playback state: 'playing', 'paused', or 'stopped'.
If PLAYING or PAUSED, show information on current track.
Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get
PlaybackController.get_current_track() and
PlaybackController.get_time_position() | Below is the the instruction that describes the task:
### Input:
Get The playback state: 'playing', 'paused', or 'stopped'.
If PLAYING or PAUSED, show information on current track.
Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get
PlaybackController.get_current_track() and
PlaybackController.get_time_position()
### Response:
def state():
'''Get The playback state: 'playing', 'paused', or 'stopped'.
If PLAYING or PAUSED, show information on current track.
Calls PlaybackController.get_state(), and if state is PLAYING or PAUSED, get
PlaybackController.get_current_track() and
PlaybackController.get_time_position()'''
server = getServer()
state = server.core.playback.get_state()
logging.debug('Got playback state: %r', state)
if state.upper() == 'STOPPED':
print('Playback is currently stopped')
else:
track = server.core.playback.get_current_track()
logging.debug('Track is %r', track)
logging.debug('Track loaded is %r', jsonrpclib.jsonclass.load(track))
pos = server.core.playback.get_time_position()
logging.debug('Pos is %r', pos)
print('{} track: "{}", by {} (at {})'.format(state.title(),
track['name'],
','.join([a['name'] for a in track['artists']]),
formatTimeposition(pos))
) |
def fit(self, x, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
"""
if self._dtype is not None:
iter2array(x, dtype=self._dtype)
else:
iter2array(x)
return self | Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines. | Below is the the instruction that describes the task:
### Input:
Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
### Response:
def fit(self, x, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
"""
if self._dtype is not None:
iter2array(x, dtype=self._dtype)
else:
iter2array(x)
return self |
def get_map_location(self):
"""Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
"""
map_data = self.get_map()
(bounds_e, bounds_n), (bounds_w, bounds_s) = map_data["continent_rect"]
(map_e, map_n), (map_w, map_s) = map_data["map_rect"]
assert bounds_w < bounds_e
assert bounds_n < bounds_s
assert map_w < map_e
assert map_n < map_s
meters_to_inches = 39.3701
x, y, z = self.fAvatarPosition
map_x = bounds_w + ((x * meters_to_inches - map_w) /
(map_e - map_w) * (bounds_e - bounds_w))
map_y = bounds_n + ((-z * meters_to_inches - map_n) /
(map_s - map_n) * (bounds_s - bounds_n))
map_z = y * meters_to_inches
return map_x, map_y, map_z | Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z). | Below is the the instruction that describes the task:
### Input:
Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
### Response:
def get_map_location(self):
"""Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
"""
map_data = self.get_map()
(bounds_e, bounds_n), (bounds_w, bounds_s) = map_data["continent_rect"]
(map_e, map_n), (map_w, map_s) = map_data["map_rect"]
assert bounds_w < bounds_e
assert bounds_n < bounds_s
assert map_w < map_e
assert map_n < map_s
meters_to_inches = 39.3701
x, y, z = self.fAvatarPosition
map_x = bounds_w + ((x * meters_to_inches - map_w) /
(map_e - map_w) * (bounds_e - bounds_w))
map_y = bounds_n + ((-z * meters_to_inches - map_n) /
(map_s - map_n) * (bounds_s - bounds_n))
map_z = y * meters_to_inches
return map_x, map_y, map_z |
def get_queryset(self):
""" Returns all the approved topics or posts. """
qs = super().get_queryset()
qs = qs.filter(approved=True)
return qs | Returns all the approved topics or posts. | Below is the the instruction that describes the task:
### Input:
Returns all the approved topics or posts.
### Response:
def get_queryset(self):
""" Returns all the approved topics or posts. """
qs = super().get_queryset()
qs = qs.filter(approved=True)
return qs |
def on_state_changed(self, state):
"""Connect/disconnect sig_key_pressed signal."""
if state:
self.editor.sig_key_pressed.connect(self._on_key_pressed)
else:
self.editor.sig_key_pressed.disconnect(self._on_key_pressed) | Connect/disconnect sig_key_pressed signal. | Below is the the instruction that describes the task:
### Input:
Connect/disconnect sig_key_pressed signal.
### Response:
def on_state_changed(self, state):
"""Connect/disconnect sig_key_pressed signal."""
if state:
self.editor.sig_key_pressed.connect(self._on_key_pressed)
else:
self.editor.sig_key_pressed.disconnect(self._on_key_pressed) |
def collect_output(self, out_file=None):
"""
Run :func:`collect_output` on the job's output directory.
"""
if self.logger.isEnabledFor(logging.INFO):
self.logger.info(
"collecting output %s", " to %s" % out_file if out_file else ''
)
self.logger.info("self.output %s", self.output)
return collect_output(self.output, out_file) | Run :func:`collect_output` on the job's output directory. | Below is the the instruction that describes the task:
### Input:
Run :func:`collect_output` on the job's output directory.
### Response:
def collect_output(self, out_file=None):
"""
Run :func:`collect_output` on the job's output directory.
"""
if self.logger.isEnabledFor(logging.INFO):
self.logger.info(
"collecting output %s", " to %s" % out_file if out_file else ''
)
self.logger.info("self.output %s", self.output)
return collect_output(self.output, out_file) |
def autocommand(func):
""" A simplified decorator for making a single function a Command
instance. In the future this will leverage PEP0484 to do really smart
function parsing and conversion to argparse actions. """
name = func.__name__
title, desc = command.parse_docstring(func)
if not title:
title = 'Auto command for: %s' % name
if not desc:
# Prevent Command from using docstring of AutoCommand
desc = ' '
return AutoCommand(title=title, desc=desc, name=name, func=func) | A simplified decorator for making a single function a Command
instance. In the future this will leverage PEP0484 to do really smart
function parsing and conversion to argparse actions. | Below is the the instruction that describes the task:
### Input:
A simplified decorator for making a single function a Command
instance. In the future this will leverage PEP0484 to do really smart
function parsing and conversion to argparse actions.
### Response:
def autocommand(func):
""" A simplified decorator for making a single function a Command
instance. In the future this will leverage PEP0484 to do really smart
function parsing and conversion to argparse actions. """
name = func.__name__
title, desc = command.parse_docstring(func)
if not title:
title = 'Auto command for: %s' % name
if not desc:
# Prevent Command from using docstring of AutoCommand
desc = ' '
return AutoCommand(title=title, desc=desc, name=name, func=func) |
def enable_alarm_actions(self, alarm_names):
"""
Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('EnableAlarmActions', params) | Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names. | Below is the the instruction that describes the task:
### Input:
Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
### Response:
def enable_alarm_actions(self, alarm_names):
"""
Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('EnableAlarmActions', params) |
def _get_module(self, module):
"""Get module."""
if isinstance(module, str):
mod = importlib.import_module(module)
for name in ('get_plugin', 'get_filter'):
attr = getattr(mod, name, None)
if attr is not None:
break
if name == 'get_filter':
util.warn_deprecated("'get_filter' is deprecated. Plugins should use 'get_plugin'.")
if not attr:
raise ValueError("Could not find the 'get_plugin' function in module '%s'!" % module)
return attr() | Get module. | Below is the the instruction that describes the task:
### Input:
Get module.
### Response:
def _get_module(self, module):
"""Get module."""
if isinstance(module, str):
mod = importlib.import_module(module)
for name in ('get_plugin', 'get_filter'):
attr = getattr(mod, name, None)
if attr is not None:
break
if name == 'get_filter':
util.warn_deprecated("'get_filter' is deprecated. Plugins should use 'get_plugin'.")
if not attr:
raise ValueError("Could not find the 'get_plugin' function in module '%s'!" % module)
return attr() |
def create(self, alpha_sender):
"""
Create a new AlphaSenderInstance
:param unicode alpha_sender: An Alphanumeric Sender ID string, up to 11 characters.
:returns: Newly created AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
"""
data = values.of({'AlphaSender': alpha_sender, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AlphaSenderInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Create a new AlphaSenderInstance
:param unicode alpha_sender: An Alphanumeric Sender ID string, up to 11 characters.
:returns: Newly created AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance | Below is the the instruction that describes the task:
### Input:
Create a new AlphaSenderInstance
:param unicode alpha_sender: An Alphanumeric Sender ID string, up to 11 characters.
:returns: Newly created AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
### Response:
def create(self, alpha_sender):
"""
Create a new AlphaSenderInstance
:param unicode alpha_sender: An Alphanumeric Sender ID string, up to 11 characters.
:returns: Newly created AlphaSenderInstance
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderInstance
"""
data = values.of({'AlphaSender': alpha_sender, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return AlphaSenderInstance(self._version, payload, service_sid=self._solution['service_sid'], ) |
def do_dimension_value_list(mc, args):
'''List names of metric dimensions.'''
fields = {}
fields['dimension_name'] = args.dimension_name
if args.metric_name:
fields['metric_name'] = args.metric_name
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
if args.tenant_id:
fields['tenant_id'] = args.tenant_id
try:
dimension_values = mc.metrics.list_dimension_values(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
if args.json:
print(utils.json_formatter(dimension_values))
return
if isinstance(dimension_values, list):
utils.print_list(dimension_values, ['Dimension Values'], formatters={
'Dimension Values': lambda x: x['dimension_value']}) | List names of metric dimensions. | Below is the the instruction that describes the task:
### Input:
List names of metric dimensions.
### Response:
def do_dimension_value_list(mc, args):
'''List names of metric dimensions.'''
fields = {}
fields['dimension_name'] = args.dimension_name
if args.metric_name:
fields['metric_name'] = args.metric_name
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
if args.tenant_id:
fields['tenant_id'] = args.tenant_id
try:
dimension_values = mc.metrics.list_dimension_values(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
if args.json:
print(utils.json_formatter(dimension_values))
return
if isinstance(dimension_values, list):
utils.print_list(dimension_values, ['Dimension Values'], formatters={
'Dimension Values': lambda x: x['dimension_value']}) |
def ddx(data, axis=0, dx=None, x=None, axis_x=0, boundary='forward-backward'):
'''
Calculates a second-order centered finite difference derivative of data
along the specified axis.
Parameters
----------
data : ndarray
Data on which we are taking a derivative.
axis : int
Index of the data array on which to take the derivative.
dx : float, optional
Constant grid spacing value. Will assume constant grid spacing if
given. May not be used with argument x. Default value is 1 unless
x is given.
x : ndarray, optional
Values of the axis along which we are taking a derivative to allow
variable grid spacing. May not be given with argument dx.
axis_x : int, optional
Index of the x array on which to take the derivative. Does nothing if
x is not given as an argument.
boundary : string, optional
Boundary condition. If 'periodic', assume periodic boundary condition
for centered difference. If 'forward-backward', take first-order
forward or backward derivatives at boundary.
Returns
-------
derivative : ndarray
Derivative of the data along the specified axis.
Raises
------
ValueError:
If an invalid boundary condition choice is given, if both dx and x are
specified, if axis is out of the valid range for the shape of the data,
or if x is specified and axis_x is out of the valid range for the shape
of x.
'''
if abs(axis) >= len(data.shape):
raise ValueError('axis is out of bounds for the shape of data')
if x is not None and abs(axis_x) > len(x.shape):
raise ValueError('axis_x is out of bounds for the shape of x')
if dx is not None and x is not None:
raise ValueError('may not give both x and dx as keyword arguments')
if boundary == 'periodic':
deriv = (np.roll(data, -1, axis) - np.roll(data, 1, axis)
)/(np.roll(x, -1, axis_x) - np.roll(x, 1, axis_x))
elif boundary == 'forward-backward':
# We will take forward-backward differencing at edges
# need some fancy indexing to handle arbitrary derivative axis
# Initialize our index lists
front = [slice(s) for s in data.shape]
back = [slice(s) for s in data.shape]
target = [slice(s) for s in data.shape]
# Set our index values for the derivative axis
# front is the +1 index for derivative
front[axis] = np.array([1, -1])
# back is the -1 index for derivative
back[axis] = np.array([0, -2])
# target is the position where the derivative is being calculated
target[axis] = np.array([0, -1])
if dx is not None: # grid spacing is constant
deriv = (np.roll(data, -1, axis) - np.roll(data, 1, axis))/(2.*dx)
deriv[target] = (data[front]-data[back])/dx
else: # grid spacing is arbitrary
# Need to calculate differences for our grid positions, too!
# first take care of the centered differences
dx = (np.roll(x, -1, axis_x) - np.roll(x, 1, axis_x))
# now handle those annoying edge points, like with the data above
front_x = [slice(s) for s in x.shape]
back_x = [slice(s) for s in x.shape]
target_x = [slice(s) for s in x.shape]
front_x[axis_x] = np.array([1, -1])
back_x[axis_x] = np.array([0, -2])
target_x[axis] = np.array([0, -1])
dx[target_x] = (x[front_x] - x[back_x])
# Here dx spans two grid indices, no need for *2
deriv = (np.roll(data, -1, axis) - np.roll(data, 1, axis))/dx
deriv[target] = (data[front] - data[back])/dx
else: # invalid boundary condition was given
raise ValueError('Invalid option {} for boundary '
'condition.'.format(boundary))
return deriv | Calculates a second-order centered finite difference derivative of data
along the specified axis.
Parameters
----------
data : ndarray
Data on which we are taking a derivative.
axis : int
Index of the data array on which to take the derivative.
dx : float, optional
Constant grid spacing value. Will assume constant grid spacing if
given. May not be used with argument x. Default value is 1 unless
x is given.
x : ndarray, optional
Values of the axis along which we are taking a derivative to allow
variable grid spacing. May not be given with argument dx.
axis_x : int, optional
Index of the x array on which to take the derivative. Does nothing if
x is not given as an argument.
boundary : string, optional
Boundary condition. If 'periodic', assume periodic boundary condition
for centered difference. If 'forward-backward', take first-order
forward or backward derivatives at boundary.
Returns
-------
derivative : ndarray
Derivative of the data along the specified axis.
Raises
------
ValueError:
If an invalid boundary condition choice is given, if both dx and x are
specified, if axis is out of the valid range for the shape of the data,
or if x is specified and axis_x is out of the valid range for the shape
of x. | Below is the the instruction that describes the task:
### Input:
Calculates a second-order centered finite difference derivative of data
along the specified axis.
Parameters
----------
data : ndarray
Data on which we are taking a derivative.
axis : int
Index of the data array on which to take the derivative.
dx : float, optional
Constant grid spacing value. Will assume constant grid spacing if
given. May not be used with argument x. Default value is 1 unless
x is given.
x : ndarray, optional
Values of the axis along which we are taking a derivative to allow
variable grid spacing. May not be given with argument dx.
axis_x : int, optional
Index of the x array on which to take the derivative. Does nothing if
x is not given as an argument.
boundary : string, optional
Boundary condition. If 'periodic', assume periodic boundary condition
for centered difference. If 'forward-backward', take first-order
forward or backward derivatives at boundary.
Returns
-------
derivative : ndarray
Derivative of the data along the specified axis.
Raises
------
ValueError:
If an invalid boundary condition choice is given, if both dx and x are
specified, if axis is out of the valid range for the shape of the data,
or if x is specified and axis_x is out of the valid range for the shape
of x.
### Response:
def ddx(data, axis=0, dx=None, x=None, axis_x=0, boundary='forward-backward'):
'''
Calculates a second-order centered finite difference derivative of data
along the specified axis.
Parameters
----------
data : ndarray
Data on which we are taking a derivative.
axis : int
Index of the data array on which to take the derivative.
dx : float, optional
Constant grid spacing value. Will assume constant grid spacing if
given. May not be used with argument x. Default value is 1 unless
x is given.
x : ndarray, optional
Values of the axis along which we are taking a derivative to allow
variable grid spacing. May not be given with argument dx.
axis_x : int, optional
Index of the x array on which to take the derivative. Does nothing if
x is not given as an argument.
boundary : string, optional
Boundary condition. If 'periodic', assume periodic boundary condition
for centered difference. If 'forward-backward', take first-order
forward or backward derivatives at boundary.
Returns
-------
derivative : ndarray
Derivative of the data along the specified axis.
Raises
------
ValueError:
If an invalid boundary condition choice is given, if both dx and x are
specified, if axis is out of the valid range for the shape of the data,
or if x is specified and axis_x is out of the valid range for the shape
of x.
'''
if abs(axis) >= len(data.shape):
raise ValueError('axis is out of bounds for the shape of data')
if x is not None and abs(axis_x) > len(x.shape):
raise ValueError('axis_x is out of bounds for the shape of x')
if dx is not None and x is not None:
raise ValueError('may not give both x and dx as keyword arguments')
if boundary == 'periodic':
deriv = (np.roll(data, -1, axis) - np.roll(data, 1, axis)
)/(np.roll(x, -1, axis_x) - np.roll(x, 1, axis_x))
elif boundary == 'forward-backward':
# We will take forward-backward differencing at edges
# need some fancy indexing to handle arbitrary derivative axis
# Initialize our index lists
front = [slice(s) for s in data.shape]
back = [slice(s) for s in data.shape]
target = [slice(s) for s in data.shape]
# Set our index values for the derivative axis
# front is the +1 index for derivative
front[axis] = np.array([1, -1])
# back is the -1 index for derivative
back[axis] = np.array([0, -2])
# target is the position where the derivative is being calculated
target[axis] = np.array([0, -1])
if dx is not None: # grid spacing is constant
deriv = (np.roll(data, -1, axis) - np.roll(data, 1, axis))/(2.*dx)
deriv[target] = (data[front]-data[back])/dx
else: # grid spacing is arbitrary
# Need to calculate differences for our grid positions, too!
# first take care of the centered differences
dx = (np.roll(x, -1, axis_x) - np.roll(x, 1, axis_x))
# now handle those annoying edge points, like with the data above
front_x = [slice(s) for s in x.shape]
back_x = [slice(s) for s in x.shape]
target_x = [slice(s) for s in x.shape]
front_x[axis_x] = np.array([1, -1])
back_x[axis_x] = np.array([0, -2])
target_x[axis] = np.array([0, -1])
dx[target_x] = (x[front_x] - x[back_x])
# Here dx spans two grid indices, no need for *2
deriv = (np.roll(data, -1, axis) - np.roll(data, 1, axis))/dx
deriv[target] = (data[front] - data[back])/dx
else: # invalid boundary condition was given
raise ValueError('Invalid option {} for boundary '
'condition.'.format(boundary))
return deriv |
def DeleteFile(target_filename):
'''
Deletes the given local filename.
.. note:: If file doesn't exist this method has no effect.
:param unicode target_filename:
A local filename
:raises NotImplementedForRemotePathError:
If trying to delete a non-local path
:raises FileOnlyActionError:
Raised when filename refers to a directory.
'''
_AssertIsLocal(target_filename)
try:
if IsLink(target_filename):
DeleteLink(target_filename)
elif IsFile(target_filename):
os.remove(target_filename)
elif IsDir(target_filename):
from ._exceptions import FileOnlyActionError
raise FileOnlyActionError(target_filename)
except Exception as e:
reraise(e, 'While executing filesystem.DeleteFile(%s)' % (target_filename)) | Deletes the given local filename.
.. note:: If file doesn't exist this method has no effect.
:param unicode target_filename:
A local filename
:raises NotImplementedForRemotePathError:
If trying to delete a non-local path
:raises FileOnlyActionError:
Raised when filename refers to a directory. | Below is the the instruction that describes the task:
### Input:
Deletes the given local filename.
.. note:: If file doesn't exist this method has no effect.
:param unicode target_filename:
A local filename
:raises NotImplementedForRemotePathError:
If trying to delete a non-local path
:raises FileOnlyActionError:
Raised when filename refers to a directory.
### Response:
def DeleteFile(target_filename):
'''
Deletes the given local filename.
.. note:: If file doesn't exist this method has no effect.
:param unicode target_filename:
A local filename
:raises NotImplementedForRemotePathError:
If trying to delete a non-local path
:raises FileOnlyActionError:
Raised when filename refers to a directory.
'''
_AssertIsLocal(target_filename)
try:
if IsLink(target_filename):
DeleteLink(target_filename)
elif IsFile(target_filename):
os.remove(target_filename)
elif IsDir(target_filename):
from ._exceptions import FileOnlyActionError
raise FileOnlyActionError(target_filename)
except Exception as e:
reraise(e, 'While executing filesystem.DeleteFile(%s)' % (target_filename)) |
def terminate(self):
"""Terminate DMESG job"""
if self.__thread:
cmd = ["who am i"]
status, output, _ = cij.util.execute(cmd, shell=True, echo=True)
if status:
cij.warn("cij.dmesg.terminate: who am i failed")
return 1
tty = output.split()[1]
cmd = ["pkill -f '{}' -t '{}'".format(" ".join(self.__prefix), tty)]
status, _, _ = cij.util.execute(cmd, shell=True, echo=True)
if status:
cij.warn("cij.dmesg.terminate: pkill failed")
return 1
self.__thread.join()
self.__thread = None
return 0 | Terminate DMESG job | Below is the the instruction that describes the task:
### Input:
Terminate DMESG job
### Response:
def terminate(self):
"""Terminate DMESG job"""
if self.__thread:
cmd = ["who am i"]
status, output, _ = cij.util.execute(cmd, shell=True, echo=True)
if status:
cij.warn("cij.dmesg.terminate: who am i failed")
return 1
tty = output.split()[1]
cmd = ["pkill -f '{}' -t '{}'".format(" ".join(self.__prefix), tty)]
status, _, _ = cij.util.execute(cmd, shell=True, echo=True)
if status:
cij.warn("cij.dmesg.terminate: pkill failed")
return 1
self.__thread.join()
self.__thread = None
return 0 |
def remove_rows_containing(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = [match not in str(v) for v in df[column].values]
return df.iloc[mask, :] | Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered | Below is the the instruction that describes the task:
### Input:
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
### Response:
def remove_rows_containing(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = [match not in str(v) for v in df[column].values]
return df.iloc[mask, :] |
def update_descriptor_le(self, lineedit, tf):
"""Update the given line edit to show the descriptor that is stored in the index
:param lineedit: the line edit to update with the descriptor
:type lineedit: QLineEdit
:param tf: the selected taskfileinfo
:type tf: :class:`TaskFileInfo` | None
:returns: None
:rtype: None
:raises: None
"""
if tf:
descriptor = tf.descriptor
lineedit.setText(descriptor)
else:
lineedit.setText("") | Update the given line edit to show the descriptor that is stored in the index
:param lineedit: the line edit to update with the descriptor
:type lineedit: QLineEdit
:param tf: the selected taskfileinfo
:type tf: :class:`TaskFileInfo` | None
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Update the given line edit to show the descriptor that is stored in the index
:param lineedit: the line edit to update with the descriptor
:type lineedit: QLineEdit
:param tf: the selected taskfileinfo
:type tf: :class:`TaskFileInfo` | None
:returns: None
:rtype: None
:raises: None
### Response:
def update_descriptor_le(self, lineedit, tf):
"""Update the given line edit to show the descriptor that is stored in the index
:param lineedit: the line edit to update with the descriptor
:type lineedit: QLineEdit
:param tf: the selected taskfileinfo
:type tf: :class:`TaskFileInfo` | None
:returns: None
:rtype: None
:raises: None
"""
if tf:
descriptor = tf.descriptor
lineedit.setText(descriptor)
else:
lineedit.setText("") |
def hashes_get(versions_file, base_path):
"""
Gets hashes for currently checked out version.
@param versions_file: a common.VersionsFile instance to check against.
@param base_path: where to look for files. e.g. './.update-workspace/silverstripe/'
@return: checksums {'file1': 'hash1'}
"""
files = versions_file.files_get_all()
result = {}
for f in files:
try:
result[f] = functions.md5_file(base_path + f)
except IOError:
# Not all files exist for all versions.
pass
return result | Gets hashes for currently checked out version.
@param versions_file: a common.VersionsFile instance to check against.
@param base_path: where to look for files. e.g. './.update-workspace/silverstripe/'
@return: checksums {'file1': 'hash1'} | Below is the the instruction that describes the task:
### Input:
Gets hashes for currently checked out version.
@param versions_file: a common.VersionsFile instance to check against.
@param base_path: where to look for files. e.g. './.update-workspace/silverstripe/'
@return: checksums {'file1': 'hash1'}
### Response:
def hashes_get(versions_file, base_path):
"""
Gets hashes for currently checked out version.
@param versions_file: a common.VersionsFile instance to check against.
@param base_path: where to look for files. e.g. './.update-workspace/silverstripe/'
@return: checksums {'file1': 'hash1'}
"""
files = versions_file.files_get_all()
result = {}
for f in files:
try:
result[f] = functions.md5_file(base_path + f)
except IOError:
# Not all files exist for all versions.
pass
return result |
def __security_definitions_descriptor(self, issuers):
"""Create a descriptor for the security definitions.
Args:
issuers: dict, mapping issuer names to Issuer tuples
Returns:
The dict representing the security definitions descriptor.
"""
if not issuers:
result = {
_DEFAULT_SECURITY_DEFINITION: {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': 'https://accounts.google.com',
'x-google-jwks_uri': 'https://www.googleapis.com/oauth2/v3/certs',
}
}
return result
result = {}
for issuer_key, issuer_value in issuers.items():
result[issuer_key] = {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': issuer_value.issuer,
}
# If jwks_uri is omitted, the auth library will use OpenID discovery
# to find it. Otherwise, include it in the descriptor explicitly.
if issuer_value.jwks_uri:
result[issuer_key]['x-google-jwks_uri'] = issuer_value.jwks_uri
return result | Create a descriptor for the security definitions.
Args:
issuers: dict, mapping issuer names to Issuer tuples
Returns:
The dict representing the security definitions descriptor. | Below is the the instruction that describes the task:
### Input:
Create a descriptor for the security definitions.
Args:
issuers: dict, mapping issuer names to Issuer tuples
Returns:
The dict representing the security definitions descriptor.
### Response:
def __security_definitions_descriptor(self, issuers):
"""Create a descriptor for the security definitions.
Args:
issuers: dict, mapping issuer names to Issuer tuples
Returns:
The dict representing the security definitions descriptor.
"""
if not issuers:
result = {
_DEFAULT_SECURITY_DEFINITION: {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': 'https://accounts.google.com',
'x-google-jwks_uri': 'https://www.googleapis.com/oauth2/v3/certs',
}
}
return result
result = {}
for issuer_key, issuer_value in issuers.items():
result[issuer_key] = {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': issuer_value.issuer,
}
# If jwks_uri is omitted, the auth library will use OpenID discovery
# to find it. Otherwise, include it in the descriptor explicitly.
if issuer_value.jwks_uri:
result[issuer_key]['x-google-jwks_uri'] = issuer_value.jwks_uri
return result |
def get_target_dimensions(self):
"""
Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int)
"""
if self.target_height is None:
self._calculate_target_dimensions()
return int(self.target_width), int(self.target_height) | Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int) | Below is the the instruction that describes the task:
### Input:
Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int)
### Response:
def get_target_dimensions(self):
"""
Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int)
"""
if self.target_height is None:
self._calculate_target_dimensions()
return int(self.target_width), int(self.target_height) |
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload the image at either path or url.
:param path: The path to the image you want to upload.
:param url: The url to the image you want to upload.
:param title: The title the image will have when uploaded.
:param description: The description the image will have when uploaded.
:param album: The album the image will be added to when uploaded. Can
be either a Album object or it's id. Leave at None to upload
without adding to an Album, adding it later is possible.
Authentication as album owner is necessary to upload to an album
with this function.
:returns: An Image object representing the uploaded image.
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': album, 'image': image,
'title': title, 'description': description}
resp = self._send_request(self._base_url + "/3/image",
params=payload, method='POST')
# TEMPORARY HACK:
# On 5-08-2013 I noticed Imgur now returned enough information from
# this call to fully populate the Image object. However those variables
# that matched arguments were always None, even if they had been given.
# See https://groups.google.com/forum/#!topic/imgur/F3uVb55TMGo
resp['title'] = title
resp['description'] = description
if album is not None:
resp['album'] = (Album({'id': album}, self, False) if not
isinstance(album, Album) else album)
return Image(resp, self) | Upload the image at either path or url.
:param path: The path to the image you want to upload.
:param url: The url to the image you want to upload.
:param title: The title the image will have when uploaded.
:param description: The description the image will have when uploaded.
:param album: The album the image will be added to when uploaded. Can
be either a Album object or it's id. Leave at None to upload
without adding to an Album, adding it later is possible.
Authentication as album owner is necessary to upload to an album
with this function.
:returns: An Image object representing the uploaded image. | Below is the the instruction that describes the task:
### Input:
Upload the image at either path or url.
:param path: The path to the image you want to upload.
:param url: The url to the image you want to upload.
:param title: The title the image will have when uploaded.
:param description: The description the image will have when uploaded.
:param album: The album the image will be added to when uploaded. Can
be either a Album object or it's id. Leave at None to upload
without adding to an Album, adding it later is possible.
Authentication as album owner is necessary to upload to an album
with this function.
:returns: An Image object representing the uploaded image.
### Response:
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload the image at either path or url.
:param path: The path to the image you want to upload.
:param url: The url to the image you want to upload.
:param title: The title the image will have when uploaded.
:param description: The description the image will have when uploaded.
:param album: The album the image will be added to when uploaded. Can
be either a Album object or it's id. Leave at None to upload
without adding to an Album, adding it later is possible.
Authentication as album owner is necessary to upload to an album
with this function.
:returns: An Image object representing the uploaded image.
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': album, 'image': image,
'title': title, 'description': description}
resp = self._send_request(self._base_url + "/3/image",
params=payload, method='POST')
# TEMPORARY HACK:
# On 5-08-2013 I noticed Imgur now returned enough information from
# this call to fully populate the Image object. However those variables
# that matched arguments were always None, even if they had been given.
# See https://groups.google.com/forum/#!topic/imgur/F3uVb55TMGo
resp['title'] = title
resp['description'] = description
if album is not None:
resp['album'] = (Album({'id': album}, self, False) if not
isinstance(album, Album) else album)
return Image(resp, self) |
def parse_control_options(controls, variable_defaults=None):
""" Parse a set of control options.
Args:
controls: The dictionary of control options.
variable_defaults: If the controls are for a Query with variables, then this is the
default variable values defined in the Query module. The options in the controls
parameter can override these but if a variable has no 'value' property then we
fall back to these.
Returns:
- the HTML for the controls.
- the default values for the controls as a dict.
- the list of DIV IDs of the controls.
"""
controls_html = ''
control_defaults = {}
control_ids = []
div_id = _html.Html.next_id()
if variable_defaults is None:
variable_defaults = {}
for varname, control in list(controls.items()):
label = control.get('label', varname)
control_id = div_id + '__' + varname
control_ids.append(control_id)
value = control.get('value', variable_defaults.get(varname, None))
# The user should usually specify the type but we will default to 'textbox' for strings
# and 'set' for lists.
if isinstance(value, basestring):
type = 'textbox'
elif isinstance(value, list):
type = 'set'
else:
type = None
type = control.get('type', type)
if type == 'picker':
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('picker control must specify a nonempty set of choices')
if value is None:
value = choices[0]
choices_html = ''
for i, choice in enumerate(choices):
choices_html += "<option value=\"%s\" %s>%s</option>" % \
(choice, ("selected=\"selected\"" if choice == value else ''), choice)
control_html = "{label}<select disabled id=\"{id}\">{choices}</select>" \
.format(label=label, id=control_id, choices=choices_html)
elif type == 'set': # Multi-picker; implemented as checkboxes.
# TODO(gram): consider using "name" property of the control to group checkboxes. That
# way we can save the code of constructing and parsing control Ids with sequential
# numbers in it. Multiple checkboxes can share the same name.
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('set control must specify a nonempty set of choices')
if value is None:
value = choices
choices_html = ''
control_ids[-1] = '%s:%d' % (control_id, len(choices)) # replace ID to include count.
for i, choice in enumerate(choices):
checked = choice in value
choice_id = '%s:%d' % (control_id, i)
# TODO(gram): we may want a 'Submit/Refresh button as we may not want to rerun
# query on each checkbox change.
choices_html += """
<div>
<label>
<input type="checkbox" id="{id}" value="{choice}" {checked} disabled>
{choice}
</label>
</div>
""".format(id=choice_id, choice=choice, checked="checked" if checked else '')
control_html = "{label}<div>{choices}</div>".format(label=label, choices=choices_html)
elif type == 'checkbox':
control_html = """
<label>
<input type="checkbox" id="{id}" {checked} disabled>
{label}
</label>
""".format(label=label, id=control_id, checked="checked" if value else '')
elif type == 'slider':
min_ = control.get('min', None)
max_ = control.get('max', None)
if min_ is None or max_ is None:
raise Exception('slider control must specify a min and max value')
if max_ <= min_:
raise Exception('slider control must specify a min value less than max value')
step = control.get('step', 1 if isinstance(min_, int) and isinstance(max_, int)
else (float(max_ - min_) / 10.0))
if value is None:
value = min_
control_html = """
{label}
<input type="text" class="gchart-slider_value" id="{id}_value" value="{value}" disabled/>
<input type="range" class="gchart-slider" id="{id}" min="{min}" max="{max}" step="{step}"
value="{value}" disabled/>
""".format(label=label, id=control_id, value=value, min=min_, max=max_, step=step)
elif type == 'textbox':
if value is None:
value = ''
control_html = "{label}<input type=\"text\" value=\"{value}\" id=\"{id}\" disabled/>" \
.format(label=label, value=value, id=control_id)
else:
raise Exception(
'Unknown control type %s (expected picker, slider, checkbox, textbox or set)' % type)
control_defaults[varname] = value
controls_html += "<div class=\"gchart-control\">{control}</div>\n" \
.format(control=control_html)
controls_html = "<div class=\"gchart-controls\">{controls}</div>".format(controls=controls_html)
return controls_html, control_defaults, control_ids | Parse a set of control options.
Args:
controls: The dictionary of control options.
variable_defaults: If the controls are for a Query with variables, then this is the
default variable values defined in the Query module. The options in the controls
parameter can override these but if a variable has no 'value' property then we
fall back to these.
Returns:
- the HTML for the controls.
- the default values for the controls as a dict.
- the list of DIV IDs of the controls. | Below is the the instruction that describes the task:
### Input:
Parse a set of control options.
Args:
controls: The dictionary of control options.
variable_defaults: If the controls are for a Query with variables, then this is the
default variable values defined in the Query module. The options in the controls
parameter can override these but if a variable has no 'value' property then we
fall back to these.
Returns:
- the HTML for the controls.
- the default values for the controls as a dict.
- the list of DIV IDs of the controls.
### Response:
def parse_control_options(controls, variable_defaults=None):
""" Parse a set of control options.
Args:
controls: The dictionary of control options.
variable_defaults: If the controls are for a Query with variables, then this is the
default variable values defined in the Query module. The options in the controls
parameter can override these but if a variable has no 'value' property then we
fall back to these.
Returns:
- the HTML for the controls.
- the default values for the controls as a dict.
- the list of DIV IDs of the controls.
"""
controls_html = ''
control_defaults = {}
control_ids = []
div_id = _html.Html.next_id()
if variable_defaults is None:
variable_defaults = {}
for varname, control in list(controls.items()):
label = control.get('label', varname)
control_id = div_id + '__' + varname
control_ids.append(control_id)
value = control.get('value', variable_defaults.get(varname, None))
# The user should usually specify the type but we will default to 'textbox' for strings
# and 'set' for lists.
if isinstance(value, basestring):
type = 'textbox'
elif isinstance(value, list):
type = 'set'
else:
type = None
type = control.get('type', type)
if type == 'picker':
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('picker control must specify a nonempty set of choices')
if value is None:
value = choices[0]
choices_html = ''
for i, choice in enumerate(choices):
choices_html += "<option value=\"%s\" %s>%s</option>" % \
(choice, ("selected=\"selected\"" if choice == value else ''), choice)
control_html = "{label}<select disabled id=\"{id}\">{choices}</select>" \
.format(label=label, id=control_id, choices=choices_html)
elif type == 'set': # Multi-picker; implemented as checkboxes.
# TODO(gram): consider using "name" property of the control to group checkboxes. That
# way we can save the code of constructing and parsing control Ids with sequential
# numbers in it. Multiple checkboxes can share the same name.
choices = control.get('choices', value)
if not isinstance(choices, list) or len(choices) == 0:
raise Exception('set control must specify a nonempty set of choices')
if value is None:
value = choices
choices_html = ''
control_ids[-1] = '%s:%d' % (control_id, len(choices)) # replace ID to include count.
for i, choice in enumerate(choices):
checked = choice in value
choice_id = '%s:%d' % (control_id, i)
# TODO(gram): we may want a 'Submit/Refresh button as we may not want to rerun
# query on each checkbox change.
choices_html += """
<div>
<label>
<input type="checkbox" id="{id}" value="{choice}" {checked} disabled>
{choice}
</label>
</div>
""".format(id=choice_id, choice=choice, checked="checked" if checked else '')
control_html = "{label}<div>{choices}</div>".format(label=label, choices=choices_html)
elif type == 'checkbox':
control_html = """
<label>
<input type="checkbox" id="{id}" {checked} disabled>
{label}
</label>
""".format(label=label, id=control_id, checked="checked" if value else '')
elif type == 'slider':
min_ = control.get('min', None)
max_ = control.get('max', None)
if min_ is None or max_ is None:
raise Exception('slider control must specify a min and max value')
if max_ <= min_:
raise Exception('slider control must specify a min value less than max value')
step = control.get('step', 1 if isinstance(min_, int) and isinstance(max_, int)
else (float(max_ - min_) / 10.0))
if value is None:
value = min_
control_html = """
{label}
<input type="text" class="gchart-slider_value" id="{id}_value" value="{value}" disabled/>
<input type="range" class="gchart-slider" id="{id}" min="{min}" max="{max}" step="{step}"
value="{value}" disabled/>
""".format(label=label, id=control_id, value=value, min=min_, max=max_, step=step)
elif type == 'textbox':
if value is None:
value = ''
control_html = "{label}<input type=\"text\" value=\"{value}\" id=\"{id}\" disabled/>" \
.format(label=label, value=value, id=control_id)
else:
raise Exception(
'Unknown control type %s (expected picker, slider, checkbox, textbox or set)' % type)
control_defaults[varname] = value
controls_html += "<div class=\"gchart-control\">{control}</div>\n" \
.format(control=control_html)
controls_html = "<div class=\"gchart-controls\">{controls}</div>".format(controls=controls_html)
return controls_html, control_defaults, control_ids |
def move_to(self, thing, destination):
"Move a thing to a new location."
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
for o in self.observers:
o.thing_moved(thing) | Move a thing to a new location. | Below is the the instruction that describes the task:
### Input:
Move a thing to a new location.
### Response:
def move_to(self, thing, destination):
"Move a thing to a new location."
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
for o in self.observers:
o.thing_moved(thing) |
def print_model(self, include_unsigned_edges=False):
"""Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
"""
sif_str = ''
for edge in self.graph.edges(data=True):
n1 = edge[0]
n2 = edge[1]
data = edge[2]
polarity = data.get('polarity')
if polarity == 'negative':
rel = '-1'
elif polarity == 'positive':
rel = '1'
elif include_unsigned_edges:
rel = '0'
else:
continue
sif_str += '%s %s %s\n' % (n1, rel, n2)
return sif_str | Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False. | Below is the the instruction that describes the task:
### Input:
Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
### Response:
def print_model(self, include_unsigned_edges=False):
"""Return a SIF string of the assembled model.
Parameters
----------
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False.
"""
sif_str = ''
for edge in self.graph.edges(data=True):
n1 = edge[0]
n2 = edge[1]
data = edge[2]
polarity = data.get('polarity')
if polarity == 'negative':
rel = '-1'
elif polarity == 'positive':
rel = '1'
elif include_unsigned_edges:
rel = '0'
else:
continue
sif_str += '%s %s %s\n' % (n1, rel, n2)
return sif_str |
def setup_core_catalogs(portal):
"""Setup core catalogs
"""
logger.info("*** Setup Core Catalogs ***")
to_reindex = []
for catalog, name, attribute, meta_type in INDEXES:
c = api.get_tool(catalog)
indexes = c.indexes()
if name in indexes:
logger.info("*** Index '%s' already in Catalog [SKIP]" % name)
continue
logger.info("*** Adding Index '%s' for field '%s' to catalog ..."
% (meta_type, name))
# do we still need ZCTextIndexes?
if meta_type == "ZCTextIndex":
addZCTextIndex(c, name)
else:
c.addIndex(name, meta_type)
# get the new created index
index = c._catalog.getIndex(name)
# set the indexed attributes
if hasattr(index, "indexed_attrs"):
index.indexed_attrs = [attribute or name]
to_reindex.append((c, name))
logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]"
% (meta_type, name))
# catalog columns
for catalog, name in COLUMNS:
c = api.get_tool(catalog)
if name not in c.schema():
logger.info("*** Adding Column '%s' to catalog '%s' ..."
% (name, catalog))
c.addColumn(name)
logger.info("*** Added Column '%s' to catalog '%s' [DONE]"
% (name, catalog))
else:
logger.info("*** Column '%s' already in catalog '%s' [SKIP]"
% (name, catalog))
continue
for catalog, name in to_reindex:
logger.info("*** Indexing new index '%s' ..." % name)
catalog.manage_reindexIndex(name)
logger.info("*** Indexing new index '%s' [DONE]" % name) | Setup core catalogs | Below is the the instruction that describes the task:
### Input:
Setup core catalogs
### Response:
def setup_core_catalogs(portal):
"""Setup core catalogs
"""
logger.info("*** Setup Core Catalogs ***")
to_reindex = []
for catalog, name, attribute, meta_type in INDEXES:
c = api.get_tool(catalog)
indexes = c.indexes()
if name in indexes:
logger.info("*** Index '%s' already in Catalog [SKIP]" % name)
continue
logger.info("*** Adding Index '%s' for field '%s' to catalog ..."
% (meta_type, name))
# do we still need ZCTextIndexes?
if meta_type == "ZCTextIndex":
addZCTextIndex(c, name)
else:
c.addIndex(name, meta_type)
# get the new created index
index = c._catalog.getIndex(name)
# set the indexed attributes
if hasattr(index, "indexed_attrs"):
index.indexed_attrs = [attribute or name]
to_reindex.append((c, name))
logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]"
% (meta_type, name))
# catalog columns
for catalog, name in COLUMNS:
c = api.get_tool(catalog)
if name not in c.schema():
logger.info("*** Adding Column '%s' to catalog '%s' ..."
% (name, catalog))
c.addColumn(name)
logger.info("*** Added Column '%s' to catalog '%s' [DONE]"
% (name, catalog))
else:
logger.info("*** Column '%s' already in catalog '%s' [SKIP]"
% (name, catalog))
continue
for catalog, name in to_reindex:
logger.info("*** Indexing new index '%s' ..." % name)
catalog.manage_reindexIndex(name)
logger.info("*** Indexing new index '%s' [DONE]" % name) |
def boot_priority(self, boot_priority):
"""
Sets the boot priority for this QEMU VM.
:param boot_priority: QEMU boot priority
"""
self._boot_priority = boot_priority
log.info('QEMU VM "{name}" [{id}] has set the boot priority to {boot_priority}'.format(name=self._name,
id=self._id,
boot_priority=self._boot_priority)) | Sets the boot priority for this QEMU VM.
:param boot_priority: QEMU boot priority | Below is the the instruction that describes the task:
### Input:
Sets the boot priority for this QEMU VM.
:param boot_priority: QEMU boot priority
### Response:
def boot_priority(self, boot_priority):
"""
Sets the boot priority for this QEMU VM.
:param boot_priority: QEMU boot priority
"""
self._boot_priority = boot_priority
log.info('QEMU VM "{name}" [{id}] has set the boot priority to {boot_priority}'.format(name=self._name,
id=self._id,
boot_priority=self._boot_priority)) |
def check_for_lane_permission(self):
"""
One or more permissions can be associated with a lane
of a workflow. In a similar way, a lane can be
restricted with relation to other lanes of the workflow.
This method called on lane changes and checks user has
required permissions and relations.
Raises:
HTTPForbidden: if the current user hasn't got the
required permissions and proper relations
"""
# TODO: Cache lane_data in app memory
if self.current.lane_permission:
log.debug("HAS LANE PERM: %s" % self.current.lane_permission)
perm = self.current.lane_permission
if not self.current.has_permission(perm):
raise HTTPError(403, "You don't have required lane permission: %s" % perm)
if self.current.lane_relations:
context = self.get_pool_context()
log.debug("HAS LANE RELS: %s" % self.current.lane_relations)
try:
cond_result = eval(self.current.lane_relations, context)
except:
log.exception("CONDITION EVAL ERROR : %s || %s" % (
self.current.lane_relations, context))
raise
if not cond_result:
log.debug("LANE RELATION ERR: %s %s" % (self.current.lane_relations, context))
raise HTTPError(403, "You aren't qualified for this lane: %s" %
self.current.lane_relations) | One or more permissions can be associated with a lane
of a workflow. In a similar way, a lane can be
restricted with relation to other lanes of the workflow.
This method called on lane changes and checks user has
required permissions and relations.
Raises:
HTTPForbidden: if the current user hasn't got the
required permissions and proper relations | Below is the the instruction that describes the task:
### Input:
One or more permissions can be associated with a lane
of a workflow. In a similar way, a lane can be
restricted with relation to other lanes of the workflow.
This method called on lane changes and checks user has
required permissions and relations.
Raises:
HTTPForbidden: if the current user hasn't got the
required permissions and proper relations
### Response:
def check_for_lane_permission(self):
"""
One or more permissions can be associated with a lane
of a workflow. In a similar way, a lane can be
restricted with relation to other lanes of the workflow.
This method called on lane changes and checks user has
required permissions and relations.
Raises:
HTTPForbidden: if the current user hasn't got the
required permissions and proper relations
"""
# TODO: Cache lane_data in app memory
if self.current.lane_permission:
log.debug("HAS LANE PERM: %s" % self.current.lane_permission)
perm = self.current.lane_permission
if not self.current.has_permission(perm):
raise HTTPError(403, "You don't have required lane permission: %s" % perm)
if self.current.lane_relations:
context = self.get_pool_context()
log.debug("HAS LANE RELS: %s" % self.current.lane_relations)
try:
cond_result = eval(self.current.lane_relations, context)
except:
log.exception("CONDITION EVAL ERROR : %s || %s" % (
self.current.lane_relations, context))
raise
if not cond_result:
log.debug("LANE RELATION ERR: %s %s" % (self.current.lane_relations, context))
raise HTTPError(403, "You aren't qualified for this lane: %s" %
self.current.lane_relations) |
def index(self, slide_layout):
"""Return zero-based index of *slide_layout* in this collection.
Raises ValueError if *slide_layout* is not present in this collection.
"""
for idx, this_layout in enumerate(self):
if slide_layout == this_layout:
return idx
raise ValueError("layout not in this SlideLayouts collection") | Return zero-based index of *slide_layout* in this collection.
Raises ValueError if *slide_layout* is not present in this collection. | Below is the the instruction that describes the task:
### Input:
Return zero-based index of *slide_layout* in this collection.
Raises ValueError if *slide_layout* is not present in this collection.
### Response:
def index(self, slide_layout):
"""Return zero-based index of *slide_layout* in this collection.
Raises ValueError if *slide_layout* is not present in this collection.
"""
for idx, this_layout in enumerate(self):
if slide_layout == this_layout:
return idx
raise ValueError("layout not in this SlideLayouts collection") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.