code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def derive_coordinates(self):
"""
Depending on the compilation source, some members of the SourceRef
object may be incomplete.
Calling this function performs the necessary derivations to complete the
object.
"""
if self._coordinates_resolved:
# Coordinates were already resolved. Skip
return
if self.seg_map is not None:
# Translate coordinates
self.start, self.filename, include_ref = self.seg_map.derive_source_offset(self.start)
self.end, end_filename, _ = self.seg_map.derive_source_offset(self.end, is_end=True)
else:
end_filename = self.filename
line_start = 0
lineno = 1
file_pos = 0
# Skip deriving end coordinate if selection spans multiple files
if self.filename != end_filename:
get_end = False
elif self.end is None:
get_end = False
else:
get_end = True
if (self.filename is not None) and (self.start is not None):
with open(self.filename, 'r', newline='', encoding='utf_8') as fp:
while True:
line_text = fp.readline()
file_pos += len(line_text)
if line_text == "":
break
if (self.start_line is None) and (self.start < file_pos):
self.start_line = lineno
self.start_col = self.start - line_start
self.start_line_text = line_text.rstrip("\n").rstrip("\r")
if not get_end:
break
if get_end and (self.end_line is None) and (self.end < file_pos):
self.end_line = lineno
self.end_col = self.end - line_start
break
lineno += 1
line_start = file_pos
# If no end coordinate was derived, just do a single char selection
if not get_end:
self.end_line = self.start_line
self.end_col = self.start_col
self.end = self.start
self._coordinates_resolved = True | Depending on the compilation source, some members of the SourceRef
object may be incomplete.
Calling this function performs the necessary derivations to complete the
object. | Below is the the instruction that describes the task:
### Input:
Depending on the compilation source, some members of the SourceRef
object may be incomplete.
Calling this function performs the necessary derivations to complete the
object.
### Response:
def derive_coordinates(self):
"""
Depending on the compilation source, some members of the SourceRef
object may be incomplete.
Calling this function performs the necessary derivations to complete the
object.
"""
if self._coordinates_resolved:
# Coordinates were already resolved. Skip
return
if self.seg_map is not None:
# Translate coordinates
self.start, self.filename, include_ref = self.seg_map.derive_source_offset(self.start)
self.end, end_filename, _ = self.seg_map.derive_source_offset(self.end, is_end=True)
else:
end_filename = self.filename
line_start = 0
lineno = 1
file_pos = 0
# Skip deriving end coordinate if selection spans multiple files
if self.filename != end_filename:
get_end = False
elif self.end is None:
get_end = False
else:
get_end = True
if (self.filename is not None) and (self.start is not None):
with open(self.filename, 'r', newline='', encoding='utf_8') as fp:
while True:
line_text = fp.readline()
file_pos += len(line_text)
if line_text == "":
break
if (self.start_line is None) and (self.start < file_pos):
self.start_line = lineno
self.start_col = self.start - line_start
self.start_line_text = line_text.rstrip("\n").rstrip("\r")
if not get_end:
break
if get_end and (self.end_line is None) and (self.end < file_pos):
self.end_line = lineno
self.end_col = self.end - line_start
break
lineno += 1
line_start = file_pos
# If no end coordinate was derived, just do a single char selection
if not get_end:
self.end_line = self.start_line
self.end_col = self.start_col
self.end = self.start
self._coordinates_resolved = True |
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier.
Note: if the object_type and object_uuid values are passed, then the
PID status will be automatically setted to
:attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.
:param object_type: The object type. (Default: None.)
:param object_uuid: The object identifier. (Default: None).
:param kwargs: You specify the pid_value.
"""
# Request next integer in recid sequence.
assert 'pid_value' not in kwargs
kwargs['pid_value'] = str(RecordIdentifier.next())
kwargs.setdefault('status', cls.default_status)
if object_type and object_uuid:
kwargs['status'] = PIDStatus.REGISTERED
return super(RecordIdProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs) | Create a new record identifier.
Note: if the object_type and object_uuid values are passed, then the
PID status will be automatically setted to
:attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.
:param object_type: The object type. (Default: None.)
:param object_uuid: The object identifier. (Default: None).
:param kwargs: You specify the pid_value. | Below is the the instruction that describes the task:
### Input:
Create a new record identifier.
Note: if the object_type and object_uuid values are passed, then the
PID status will be automatically setted to
:attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.
:param object_type: The object type. (Default: None.)
:param object_uuid: The object identifier. (Default: None).
:param kwargs: You specify the pid_value.
### Response:
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier.
Note: if the object_type and object_uuid values are passed, then the
PID status will be automatically setted to
:attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.
:param object_type: The object type. (Default: None.)
:param object_uuid: The object identifier. (Default: None).
:param kwargs: You specify the pid_value.
"""
# Request next integer in recid sequence.
assert 'pid_value' not in kwargs
kwargs['pid_value'] = str(RecordIdentifier.next())
kwargs.setdefault('status', cls.default_status)
if object_type and object_uuid:
kwargs['status'] = PIDStatus.REGISTERED
return super(RecordIdProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs) |
def to_gds(self, multiplier):
"""
Convert this object to a series of GDSII elements.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
Returns
-------
out : string
The GDSII binary string that represents this object.
"""
data = []
for ii in range(len(self.polygons)):
if len(self.polygons[ii]) > 4094:
raise ValueError("[GDSPY] Polygons with more than 4094 are "
"not supported by the GDSII format.")
data.append(
struct.pack('>10h', 4, 0x0800, 6, 0x0D02, self.layers[ii], 6,
0x0E02, self.datatypes[ii],
12 + 8 * len(self.polygons[ii]), 0x1003))
data.extend(
struct.pack('>2l', int(round(point[0] * multiplier)),
int(round(point[1] * multiplier)))
for point in self.polygons[ii])
data.append(
struct.pack('>2l2h',
int(round(self.polygons[ii][0][0] * multiplier)),
int(round(self.polygons[ii][0][1] * multiplier)),
4, 0x1100))
return b''.join(data) | Convert this object to a series of GDSII elements.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
Returns
-------
out : string
The GDSII binary string that represents this object. | Below is the the instruction that describes the task:
### Input:
Convert this object to a series of GDSII elements.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
Returns
-------
out : string
The GDSII binary string that represents this object.
### Response:
def to_gds(self, multiplier):
"""
Convert this object to a series of GDSII elements.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
Returns
-------
out : string
The GDSII binary string that represents this object.
"""
data = []
for ii in range(len(self.polygons)):
if len(self.polygons[ii]) > 4094:
raise ValueError("[GDSPY] Polygons with more than 4094 are "
"not supported by the GDSII format.")
data.append(
struct.pack('>10h', 4, 0x0800, 6, 0x0D02, self.layers[ii], 6,
0x0E02, self.datatypes[ii],
12 + 8 * len(self.polygons[ii]), 0x1003))
data.extend(
struct.pack('>2l', int(round(point[0] * multiplier)),
int(round(point[1] * multiplier)))
for point in self.polygons[ii])
data.append(
struct.pack('>2l2h',
int(round(self.polygons[ii][0][0] * multiplier)),
int(round(self.polygons[ii][0][1] * multiplier)),
4, 0x1100))
return b''.join(data) |
def get(self, name, typ):
"""
Gets a counter specified by its name.
It counter does not exist or its type doesn't match the specified type
it creates a new one.
:param name: a counter name to retrieve.
:param typ: a counter type.
:return: an existing or newly created counter of the specified type.
"""
if name == None or len(name) == 0:
raise Exception("Counter name was not set")
self._lock.acquire()
try:
counter = self._cache[name] if name in self._cache else None
if counter == None or counter.type != typ:
counter = Counter(name, typ)
self._cache[name] = counter
return counter
finally:
self._lock.release() | Gets a counter specified by its name.
It counter does not exist or its type doesn't match the specified type
it creates a new one.
:param name: a counter name to retrieve.
:param typ: a counter type.
:return: an existing or newly created counter of the specified type. | Below is the the instruction that describes the task:
### Input:
Gets a counter specified by its name.
It counter does not exist or its type doesn't match the specified type
it creates a new one.
:param name: a counter name to retrieve.
:param typ: a counter type.
:return: an existing or newly created counter of the specified type.
### Response:
def get(self, name, typ):
"""
Gets a counter specified by its name.
It counter does not exist or its type doesn't match the specified type
it creates a new one.
:param name: a counter name to retrieve.
:param typ: a counter type.
:return: an existing or newly created counter of the specified type.
"""
if name == None or len(name) == 0:
raise Exception("Counter name was not set")
self._lock.acquire()
try:
counter = self._cache[name] if name in self._cache else None
if counter == None or counter.type != typ:
counter = Counter(name, typ)
self._cache[name] = counter
return counter
finally:
self._lock.release() |
def start(self):
"""Starts the delayed execution"""
if self._timer:
self._timer.cancel()
self._timer = Timer(self._timeout, self._fire)
self._timer.start() | Starts the delayed execution | Below is the the instruction that describes the task:
### Input:
Starts the delayed execution
### Response:
def start(self):
"""Starts the delayed execution"""
if self._timer:
self._timer.cancel()
self._timer = Timer(self._timeout, self._fire)
self._timer.start() |
def percentile(self, p):
"""
Computes the percentile of a specific value in [0,100].
"""
if not (0 <= p <= 100):
raise ValueError("p must be between 0 and 100, inclusive.")
p = float(p)/100.
p *= self.n
c_i = None
t = 0
if p == 0:
return self.C.min_item()[1].mean
for i, key in enumerate(self.C.keys()):
c_i_plus_one = self.C[key]
if i == 0:
k = c_i_plus_one.count / 2
else:
k = (c_i_plus_one.count + c_i.count) / 2.
if p < t + k:
z1 = p - t
z2 = t + k - p
return (c_i.mean * z2 + c_i_plus_one.mean * z1) / (z1 + z2)
c_i = c_i_plus_one
t += k
return self.C.max_item()[1].mean | Computes the percentile of a specific value in [0,100]. | Below is the the instruction that describes the task:
### Input:
Computes the percentile of a specific value in [0,100].
### Response:
def percentile(self, p):
"""
Computes the percentile of a specific value in [0,100].
"""
if not (0 <= p <= 100):
raise ValueError("p must be between 0 and 100, inclusive.")
p = float(p)/100.
p *= self.n
c_i = None
t = 0
if p == 0:
return self.C.min_item()[1].mean
for i, key in enumerate(self.C.keys()):
c_i_plus_one = self.C[key]
if i == 0:
k = c_i_plus_one.count / 2
else:
k = (c_i_plus_one.count + c_i.count) / 2.
if p < t + k:
z1 = p - t
z2 = t + k - p
return (c_i.mean * z2 + c_i_plus_one.mean * z1) / (z1 + z2)
c_i = c_i_plus_one
t += k
return self.C.max_item()[1].mean |
def _ConvertStructMessage(value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
_ConvertValueMessage(value[key], message.fields[key])
return | Convert a JSON representation into Struct message. | Below is the the instruction that describes the task:
### Input:
Convert a JSON representation into Struct message.
### Response:
def _ConvertStructMessage(value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
_ConvertValueMessage(value[key], message.fields[key])
return |
def _plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads=None):
"""Plot requested profiles"""
if rads is None:
rads = {}
for vfig in lovs:
fig, axes = plt.subplots(ncols=len(vfig), sharey=True)
axes = [axes] if len(vfig) == 1 else axes
fname = 'rprof_'
for iplt, vplt in enumerate(vfig):
xlabel = None
for ivar, rvar in enumerate(vplt):
fname += rvar + '_'
rad = rads[rvar] if rvar in rads else rprofs['r']
if conf.rprof.depth:
rad = rprofs['bounds'][1] - rad
axes[iplt].plot(rprofs[rvar], rad,
conf.rprof.style,
label=metas[rvar].description)
if conf.rprof.depth:
axes[iplt].invert_yaxis()
if xlabel is None:
xlabel = metas[rvar].kind
elif xlabel != metas[rvar].kind:
xlabel = ''
if ivar == 0:
xlabel = metas[rvar].description
if xlabel:
_, unit = sdat.scale(1, metas[rvar].dim)
if unit:
xlabel += ' ({})'.format(unit)
axes[iplt].set_xlabel(xlabel)
if vplt[0][:3] == 'eta': # list of log variables
axes[iplt].set_xscale('log')
axes[iplt].set_xlim(left=conf.plot.vmin, right=conf.plot.vmax)
if ivar:
axes[iplt].legend()
ylabel = 'Depth' if conf.rprof.depth else 'Radius'
_, unit = sdat.scale(1, 'm')
if unit:
ylabel += ' ({})'.format(unit)
axes[0].set_ylabel(ylabel)
misc.saveplot(fig, fname + stepstr) | Plot requested profiles | Below is the the instruction that describes the task:
### Input:
Plot requested profiles
### Response:
def _plot_rprof_list(sdat, lovs, rprofs, metas, stepstr, rads=None):
"""Plot requested profiles"""
if rads is None:
rads = {}
for vfig in lovs:
fig, axes = plt.subplots(ncols=len(vfig), sharey=True)
axes = [axes] if len(vfig) == 1 else axes
fname = 'rprof_'
for iplt, vplt in enumerate(vfig):
xlabel = None
for ivar, rvar in enumerate(vplt):
fname += rvar + '_'
rad = rads[rvar] if rvar in rads else rprofs['r']
if conf.rprof.depth:
rad = rprofs['bounds'][1] - rad
axes[iplt].plot(rprofs[rvar], rad,
conf.rprof.style,
label=metas[rvar].description)
if conf.rprof.depth:
axes[iplt].invert_yaxis()
if xlabel is None:
xlabel = metas[rvar].kind
elif xlabel != metas[rvar].kind:
xlabel = ''
if ivar == 0:
xlabel = metas[rvar].description
if xlabel:
_, unit = sdat.scale(1, metas[rvar].dim)
if unit:
xlabel += ' ({})'.format(unit)
axes[iplt].set_xlabel(xlabel)
if vplt[0][:3] == 'eta': # list of log variables
axes[iplt].set_xscale('log')
axes[iplt].set_xlim(left=conf.plot.vmin, right=conf.plot.vmax)
if ivar:
axes[iplt].legend()
ylabel = 'Depth' if conf.rprof.depth else 'Radius'
_, unit = sdat.scale(1, 'm')
if unit:
ylabel += ' ({})'.format(unit)
axes[0].set_ylabel(ylabel)
misc.saveplot(fig, fname + stepstr) |
def unescape(value):
"""
Inverse of escape.
"""
pattern = ESCAPE_FMT.replace('%02X', '(?P<code>[0-9A-Fa-f]{2})')
# the pattern must be bytes to operate on bytes
pattern_bytes = pattern.encode('ascii')
re_esc = re.compile(pattern_bytes)
return re_esc.sub(_unescape_code, value.encode('ascii')).decode('utf-8') | Inverse of escape. | Below is the the instruction that describes the task:
### Input:
Inverse of escape.
### Response:
def unescape(value):
"""
Inverse of escape.
"""
pattern = ESCAPE_FMT.replace('%02X', '(?P<code>[0-9A-Fa-f]{2})')
# the pattern must be bytes to operate on bytes
pattern_bytes = pattern.encode('ascii')
re_esc = re.compile(pattern_bytes)
return re_esc.sub(_unescape_code, value.encode('ascii')).decode('utf-8') |
def _getFromDate(l, date):
'''
returns the index of given or best fitting date
'''
try:
date = _toDate(date)
i = _insertDateIndex(date, l) - 1
if i == -1:
return l[0]
return l[i]
except (ValueError, TypeError):
# ValueError: date invalid / TypeError: date = None
return l[0] | returns the index of given or best fitting date | Below is the the instruction that describes the task:
### Input:
returns the index of given or best fitting date
### Response:
def _getFromDate(l, date):
'''
returns the index of given or best fitting date
'''
try:
date = _toDate(date)
i = _insertDateIndex(date, l) - 1
if i == -1:
return l[0]
return l[i]
except (ValueError, TypeError):
# ValueError: date invalid / TypeError: date = None
return l[0] |
def validate_clabe(clabe: str) -> bool:
"""
Validate CLABE according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
return (clabe.isdigit() and
len(clabe) == CLABE_LENGTH and
clabe[:3] in BANKS.keys() and
clabe[-1] == compute_control_digit(clabe)) | Validate CLABE according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control | Below is the the instruction that describes the task:
### Input:
Validate CLABE according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
### Response:
def validate_clabe(clabe: str) -> bool:
"""
Validate CLABE according to
https://es.wikipedia.org/wiki/CLABE#D.C3.ADgito_control
"""
return (clabe.isdigit() and
len(clabe) == CLABE_LENGTH and
clabe[:3] in BANKS.keys() and
clabe[-1] == compute_control_digit(clabe)) |
def get_kind(self):
"""
Return the 'kind' argument of the instruction
:rtype: int
"""
if self.OP > 0xff:
if self.OP >= 0xf2ff:
return DALVIK_OPCODES_OPTIMIZED[self.OP][1][1]
return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][1]
return DALVIK_OPCODES_FORMAT[self.OP][1][1] | Return the 'kind' argument of the instruction
:rtype: int | Below is the the instruction that describes the task:
### Input:
Return the 'kind' argument of the instruction
:rtype: int
### Response:
def get_kind(self):
"""
Return the 'kind' argument of the instruction
:rtype: int
"""
if self.OP > 0xff:
if self.OP >= 0xf2ff:
return DALVIK_OPCODES_OPTIMIZED[self.OP][1][1]
return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][1]
return DALVIK_OPCODES_FORMAT[self.OP][1][1] |
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets | Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False. | Below is the the instruction that describes the task:
### Input:
Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
### Response:
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets |
def geometrize_stops(stops: List[str], *, use_utm: bool = False) -> DataFrame:
"""
Given a stops DataFrame, convert it to a GeoPandas GeoDataFrame
and return the result.
Parameters
----------
stops : DataFrame
A GTFS stops table
use_utm : boolean
If ``True``, then convert the output to local UTM coordinates;
otherwise use WGS84 coordinates
Returns
-------
GeoPandas GeoDataFrame
Looks like the given stops DataFrame, but has a ``'geometry'``
column of Shapely Point objects that replaces
the ``'stop_lon'`` and ``'stop_lat'`` columns.
Notes
-----
Requires GeoPandas.
"""
import geopandas as gpd
g = (
stops.assign(
geometry=lambda x: [
sg.Point(p) for p in x[["stop_lon", "stop_lat"]].values
]
)
.drop(["stop_lon", "stop_lat"], axis=1)
.pipe(lambda x: gpd.GeoDataFrame(x, crs=cs.WGS84))
)
if use_utm:
lat, lon = stops.loc[0, ["stop_lat", "stop_lon"]].values
crs = hp.get_utm_crs(lat, lon)
g = g.to_crs(crs)
return g | Given a stops DataFrame, convert it to a GeoPandas GeoDataFrame
and return the result.
Parameters
----------
stops : DataFrame
A GTFS stops table
use_utm : boolean
If ``True``, then convert the output to local UTM coordinates;
otherwise use WGS84 coordinates
Returns
-------
GeoPandas GeoDataFrame
Looks like the given stops DataFrame, but has a ``'geometry'``
column of Shapely Point objects that replaces
the ``'stop_lon'`` and ``'stop_lat'`` columns.
Notes
-----
Requires GeoPandas. | Below is the the instruction that describes the task:
### Input:
Given a stops DataFrame, convert it to a GeoPandas GeoDataFrame
and return the result.
Parameters
----------
stops : DataFrame
A GTFS stops table
use_utm : boolean
If ``True``, then convert the output to local UTM coordinates;
otherwise use WGS84 coordinates
Returns
-------
GeoPandas GeoDataFrame
Looks like the given stops DataFrame, but has a ``'geometry'``
column of Shapely Point objects that replaces
the ``'stop_lon'`` and ``'stop_lat'`` columns.
Notes
-----
Requires GeoPandas.
### Response:
def geometrize_stops(stops: List[str], *, use_utm: bool = False) -> DataFrame:
"""
Given a stops DataFrame, convert it to a GeoPandas GeoDataFrame
and return the result.
Parameters
----------
stops : DataFrame
A GTFS stops table
use_utm : boolean
If ``True``, then convert the output to local UTM coordinates;
otherwise use WGS84 coordinates
Returns
-------
GeoPandas GeoDataFrame
Looks like the given stops DataFrame, but has a ``'geometry'``
column of Shapely Point objects that replaces
the ``'stop_lon'`` and ``'stop_lat'`` columns.
Notes
-----
Requires GeoPandas.
"""
import geopandas as gpd
g = (
stops.assign(
geometry=lambda x: [
sg.Point(p) for p in x[["stop_lon", "stop_lat"]].values
]
)
.drop(["stop_lon", "stop_lat"], axis=1)
.pipe(lambda x: gpd.GeoDataFrame(x, crs=cs.WGS84))
)
if use_utm:
lat, lon = stops.loc[0, ["stop_lat", "stop_lon"]].values
crs = hp.get_utm_crs(lat, lon)
g = g.to_crs(crs)
return g |
def floating_ip_pool_list(self):
'''
List all floating IP pools
.. versionadded:: 2016.3.0
'''
nt_ks = self.compute_conn
pools = nt_ks.floating_ip_pools.list()
response = {}
for pool in pools:
response[pool.name] = {
'name': pool.name,
}
return response | List all floating IP pools
.. versionadded:: 2016.3.0 | Below is the the instruction that describes the task:
### Input:
List all floating IP pools
.. versionadded:: 2016.3.0
### Response:
def floating_ip_pool_list(self):
'''
List all floating IP pools
.. versionadded:: 2016.3.0
'''
nt_ks = self.compute_conn
pools = nt_ks.floating_ip_pools.list()
response = {}
for pool in pools:
response[pool.name] = {
'name': pool.name,
}
return response |
def authorization_url(self, client_id, redirect_uri, approval_prompt='auto',
scope=None, state=None):
"""
Get the URL needed to authorize your application to access a Strava user's information.
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "public".
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'
:type scope: str
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str
"""
return self.protocol.authorization_url(client_id=client_id,
redirect_uri=redirect_uri,
approval_prompt=approval_prompt,
scope=scope, state=state) | Get the URL needed to authorize your application to access a Strava user's information.
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "public".
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'
:type scope: str
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the URL needed to authorize your application to access a Strava user's information.
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "public".
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'
:type scope: str
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str
### Response:
def authorization_url(self, client_id, redirect_uri, approval_prompt='auto',
scope=None, state=None):
"""
Get the URL needed to authorize your application to access a Strava user's information.
:param client_id: The numeric developer client id.
:type client_id: int
:param redirect_uri: The URL that Strava will redirect to after successful (or failed) authorization.
:type redirect_uri: str
:param approval_prompt: Whether to prompt for approval even if approval already granted to app.
Choices are 'auto' or 'force'. (Default is 'auto')
:type approval_prompt: str
:param scope: The access scope required. Omit to imply "public".
Valid values are 'read', 'read_all', 'profile:read_all', 'profile:write', 'profile:read_all',
'activity:read_all', 'activity:write'
:type scope: str
:param state: An arbitrary variable that will be returned to your application in the redirect URI.
:type state: str
:return: The URL to use for authorization link.
:rtype: str
"""
return self.protocol.authorization_url(client_id=client_id,
redirect_uri=redirect_uri,
approval_prompt=approval_prompt,
scope=scope, state=state) |
def mapper(class_, local_table=None, id_attribute='id', slug_expression=None,
*args, **kwargs):
"""
Convenience wrapper around the SA mapper which will set up the hybrid
"id" and "slug" attributes required by everest after calling the SA
mapper.
If you (e.g., for testing purposes) want to clear mappers created with
this function, use the :func:`clear_mappers` function in this module.
:param str id_attribute: the name of the column in the table to use as
ID column (will be aliased to a new "id" attribute in the mapped class)
:param slug_expression: function to generate a slug SQL expression given
the mapped class as argument.
"""
mpr = sa_mapper(class_, local_table=local_table, *args, **kwargs)
# Set up the ID attribute as a hybrid property, if necessary.
if id_attribute != 'id':
# Make sure we are not overwriting an already mapped or customized
# 'id' attribute.
if 'id' in mpr.columns:
mpr.dispose()
raise ValueError('Attempting to overwrite the mapped "id" '
'attribute.')
elif isdatadescriptor(getattr(class_, 'id', None)):
mpr.dispose()
raise ValueError('Attempting to overwrite the custom data '
'descriptor defined for the "id" attribute.')
class_.id = synonym(id_attribute)
# If this is a polymorphic class, a base class may already have a
# hybrid descriptor set as slug attribute.
slug_descr = None
for base_cls in class_.__mro__:
try:
slug_descr = object.__getattribute__(base_cls, 'slug')
except AttributeError:
pass
else:
break
if isinstance(slug_descr, hybrid_descriptor):
if not slug_expression is None:
raise ValueError('Attempting to overwrite the expression for '
'an inherited slug hybrid descriptor.')
hyb_descr = slug_descr
else:
# Set up the slug attribute as a hybrid property.
if slug_expression is None:
cls_expr = lambda cls: cast(getattr(cls, 'id'), String)
else:
cls_expr = slug_expression
hyb_descr = hybrid_descriptor(slug_descr, expr=cls_expr)
class_.slug = hyb_descr
return mpr | Convenience wrapper around the SA mapper which will set up the hybrid
"id" and "slug" attributes required by everest after calling the SA
mapper.
If you (e.g., for testing purposes) want to clear mappers created with
this function, use the :func:`clear_mappers` function in this module.
:param str id_attribute: the name of the column in the table to use as
ID column (will be aliased to a new "id" attribute in the mapped class)
:param slug_expression: function to generate a slug SQL expression given
the mapped class as argument. | Below is the the instruction that describes the task:
### Input:
Convenience wrapper around the SA mapper which will set up the hybrid
"id" and "slug" attributes required by everest after calling the SA
mapper.
If you (e.g., for testing purposes) want to clear mappers created with
this function, use the :func:`clear_mappers` function in this module.
:param str id_attribute: the name of the column in the table to use as
ID column (will be aliased to a new "id" attribute in the mapped class)
:param slug_expression: function to generate a slug SQL expression given
the mapped class as argument.
### Response:
def mapper(class_, local_table=None, id_attribute='id', slug_expression=None,
*args, **kwargs):
"""
Convenience wrapper around the SA mapper which will set up the hybrid
"id" and "slug" attributes required by everest after calling the SA
mapper.
If you (e.g., for testing purposes) want to clear mappers created with
this function, use the :func:`clear_mappers` function in this module.
:param str id_attribute: the name of the column in the table to use as
ID column (will be aliased to a new "id" attribute in the mapped class)
:param slug_expression: function to generate a slug SQL expression given
the mapped class as argument.
"""
mpr = sa_mapper(class_, local_table=local_table, *args, **kwargs)
# Set up the ID attribute as a hybrid property, if necessary.
if id_attribute != 'id':
# Make sure we are not overwriting an already mapped or customized
# 'id' attribute.
if 'id' in mpr.columns:
mpr.dispose()
raise ValueError('Attempting to overwrite the mapped "id" '
'attribute.')
elif isdatadescriptor(getattr(class_, 'id', None)):
mpr.dispose()
raise ValueError('Attempting to overwrite the custom data '
'descriptor defined for the "id" attribute.')
class_.id = synonym(id_attribute)
# If this is a polymorphic class, a base class may already have a
# hybrid descriptor set as slug attribute.
slug_descr = None
for base_cls in class_.__mro__:
try:
slug_descr = object.__getattribute__(base_cls, 'slug')
except AttributeError:
pass
else:
break
if isinstance(slug_descr, hybrid_descriptor):
if not slug_expression is None:
raise ValueError('Attempting to overwrite the expression for '
'an inherited slug hybrid descriptor.')
hyb_descr = slug_descr
else:
# Set up the slug attribute as a hybrid property.
if slug_expression is None:
cls_expr = lambda cls: cast(getattr(cls, 'id'), String)
else:
cls_expr = slug_expression
hyb_descr = hybrid_descriptor(slug_descr, expr=cls_expr)
class_.slug = hyb_descr
return mpr |
def _match(self, p, value):
"""
Calls either _match_operator or _match_operand depending on the pattern (p) provided.
"""
if self._is_operator(p):
return self._match_operator(p, value)
else:
try:
if self._is_value_filter(p):
return self._match_value_filter(p, value)
else:
return self._match_key_filter(p, value)
except KeyError:
if self._suppress_key_errors or self._suppress_exceptions:
return False
else:
raise
except TypeError:
if self._suppress_exceptions:
return False
else:
raise | Calls either _match_operator or _match_operand depending on the pattern (p) provided. | Below is the the instruction that describes the task:
### Input:
Calls either _match_operator or _match_operand depending on the pattern (p) provided.
### Response:
def _match(self, p, value):
"""
Calls either _match_operator or _match_operand depending on the pattern (p) provided.
"""
if self._is_operator(p):
return self._match_operator(p, value)
else:
try:
if self._is_value_filter(p):
return self._match_value_filter(p, value)
else:
return self._match_key_filter(p, value)
except KeyError:
if self._suppress_key_errors or self._suppress_exceptions:
return False
else:
raise
except TypeError:
if self._suppress_exceptions:
return False
else:
raise |
def get_standard_dicomdir_info(self):
"""
Read DICOMDIR, crate if necessary.
:return:
"""
dicomdir_filepath = os.path.join(self.dirpath, self.standard_dicomdir_filename)
if not os.path.exists(dicomdir_filepath):
self.create_standard_dicomdir()
return self.read_standard_dicomdir_info() | Read DICOMDIR, crate if necessary.
:return: | Below is the the instruction that describes the task:
### Input:
Read DICOMDIR, crate if necessary.
:return:
### Response:
def get_standard_dicomdir_info(self):
"""
Read DICOMDIR, crate if necessary.
:return:
"""
dicomdir_filepath = os.path.join(self.dirpath, self.standard_dicomdir_filename)
if not os.path.exists(dicomdir_filepath):
self.create_standard_dicomdir()
return self.read_standard_dicomdir_info() |
def reflect_db(self):
"""
No-op to reflect db info.
This is available as a method so the database can be reflected
outside initialization (such bootstrapping unihan during CLI usage).
"""
self.metadata.reflect(views=True, extend_existing=True)
self.base = automap_base(metadata=self.metadata)
self.base.prepare() | No-op to reflect db info.
This is available as a method so the database can be reflected
outside initialization (such bootstrapping unihan during CLI usage). | Below is the the instruction that describes the task:
### Input:
No-op to reflect db info.
This is available as a method so the database can be reflected
outside initialization (such bootstrapping unihan during CLI usage).
### Response:
def reflect_db(self):
"""
No-op to reflect db info.
This is available as a method so the database can be reflected
outside initialization (such bootstrapping unihan during CLI usage).
"""
self.metadata.reflect(views=True, extend_existing=True)
self.base = automap_base(metadata=self.metadata)
self.base.prepare() |
def retrieve(payment, refund_id):
"""
Retrieve a refund from a payment and the refund id.
:param payment: The payment id or the payment object
:type payment: resources.Payment|string
:param refund_id: The refund id
:type refund_id: string
:return: The refund resource
:rtype: resources.Refund
"""
if isinstance(payment, resources.Payment):
payment = payment.id
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, resource_id=refund_id, payment_id=payment))
return resources.Refund(**response) | Retrieve a refund from a payment and the refund id.
:param payment: The payment id or the payment object
:type payment: resources.Payment|string
:param refund_id: The refund id
:type refund_id: string
:return: The refund resource
:rtype: resources.Refund | Below is the the instruction that describes the task:
### Input:
Retrieve a refund from a payment and the refund id.
:param payment: The payment id or the payment object
:type payment: resources.Payment|string
:param refund_id: The refund id
:type refund_id: string
:return: The refund resource
:rtype: resources.Refund
### Response:
def retrieve(payment, refund_id):
"""
Retrieve a refund from a payment and the refund id.
:param payment: The payment id or the payment object
:type payment: resources.Payment|string
:param refund_id: The refund id
:type refund_id: string
:return: The refund resource
:rtype: resources.Refund
"""
if isinstance(payment, resources.Payment):
payment = payment.id
http_client = HttpClient()
response, _ = http_client.get(routes.url(routes.REFUND_RESOURCE, resource_id=refund_id, payment_id=payment))
return resources.Refund(**response) |
def make_new_subdomain_history(self, cursor, subdomain_rec):
"""
Recalculate the history for this subdomain from genesis up until this record.
Returns the list of subdomain records we need to save.
"""
# what's the subdomain's history up until this subdomain record?
hist = self.subdomain_db.get_subdomain_history(subdomain_rec.get_fqn(), include_unaccepted=True, end_sequence=subdomain_rec.n+1, end_zonefile_index=subdomain_rec.parent_zonefile_index+1, cur=cursor)
assert len(hist) > 0, 'BUG: not yet stored: {}'.format(subdomain_rec)
for i in range(0, len(hist)):
hist[i].accepted = False
hist.sort(lambda h1, h2: -1 if h1.n < h2.n or (h1.n == h2.n and h1.parent_zonefile_index < h2.parent_zonefile_index) \
else 0 if h1.n == h2.n and h1.parent_zonefile_index == h2.parent_zonefile_index \
else 1)
if not self.check_initial_subdomain(hist[0]):
log.debug("Reject initial {}".format(hist[0]))
return hist
else:
log.debug("Accept initial {}".format(hist[0]))
pass
hist[0].accepted = True
last_accepted = 0
for i in xrange(1, len(hist)):
if self.check_subdomain_transition(hist[last_accepted], hist[i]):
log.debug("Accept historic update {}".format(hist[i]))
hist[i].accepted = True
last_accepted = i
else:
log.debug("Reject historic update {}".format(hist[i]))
hist[i].accepted = False
return hist | Recalculate the history for this subdomain from genesis up until this record.
Returns the list of subdomain records we need to save. | Below is the the instruction that describes the task:
### Input:
Recalculate the history for this subdomain from genesis up until this record.
Returns the list of subdomain records we need to save.
### Response:
def make_new_subdomain_history(self, cursor, subdomain_rec):
"""
Recalculate the history for this subdomain from genesis up until this record.
Returns the list of subdomain records we need to save.
"""
# what's the subdomain's history up until this subdomain record?
hist = self.subdomain_db.get_subdomain_history(subdomain_rec.get_fqn(), include_unaccepted=True, end_sequence=subdomain_rec.n+1, end_zonefile_index=subdomain_rec.parent_zonefile_index+1, cur=cursor)
assert len(hist) > 0, 'BUG: not yet stored: {}'.format(subdomain_rec)
for i in range(0, len(hist)):
hist[i].accepted = False
hist.sort(lambda h1, h2: -1 if h1.n < h2.n or (h1.n == h2.n and h1.parent_zonefile_index < h2.parent_zonefile_index) \
else 0 if h1.n == h2.n and h1.parent_zonefile_index == h2.parent_zonefile_index \
else 1)
if not self.check_initial_subdomain(hist[0]):
log.debug("Reject initial {}".format(hist[0]))
return hist
else:
log.debug("Accept initial {}".format(hist[0]))
pass
hist[0].accepted = True
last_accepted = 0
for i in xrange(1, len(hist)):
if self.check_subdomain_transition(hist[last_accepted], hist[i]):
log.debug("Accept historic update {}".format(hist[i]))
hist[i].accepted = True
last_accepted = i
else:
log.debug("Reject historic update {}".format(hist[i]))
hist[i].accepted = False
return hist |
def has_segment_tables(xmldoc, name = None):
"""
Return True if the document contains a complete set of segment
tables. Returns False otherwise. If name is given and not None
then the return value is True only if the document's segment
tables, if present, contain a segment list by that name.
"""
try:
names = lsctables.SegmentDefTable.get_table(xmldoc).getColumnByName("name")
lsctables.SegmentTable.get_table(xmldoc)
lsctables.SegmentSumTable.get_table(xmldoc)
except (ValueError, KeyError):
return False
return name is None or name in names | Return True if the document contains a complete set of segment
tables. Returns False otherwise. If name is given and not None
then the return value is True only if the document's segment
tables, if present, contain a segment list by that name. | Below is the the instruction that describes the task:
### Input:
Return True if the document contains a complete set of segment
tables. Returns False otherwise. If name is given and not None
then the return value is True only if the document's segment
tables, if present, contain a segment list by that name.
### Response:
def has_segment_tables(xmldoc, name = None):
"""
Return True if the document contains a complete set of segment
tables. Returns False otherwise. If name is given and not None
then the return value is True only if the document's segment
tables, if present, contain a segment list by that name.
"""
try:
names = lsctables.SegmentDefTable.get_table(xmldoc).getColumnByName("name")
lsctables.SegmentTable.get_table(xmldoc)
lsctables.SegmentSumTable.get_table(xmldoc)
except (ValueError, KeyError):
return False
return name is None or name in names |
def kalman_filter(cls, p_A, p_Q, p_H, p_R, Y, index=None, m_init=None,
P_init=None, p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False, grad_params_no=None,
grad_calc_params=None):
"""
This function implements the basic Kalman Filter algorithm
These notations for the State-Space model are assumed:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) The function generaly do not modify the passed parameters. If
it happens then it is an error. There are several exeprions: scalars
can be modified into a matrix, in some rare cases shapes of
the derivatives matrices may be changed, it is ignored for now.
2) Copies of p_A, p_Q, index are created in memory to be used later
in smoother. References to copies are kept in "matrs_for_smoother"
return parameter.
3) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
4) Calculation of Grad_log_likelihood is not supported if matrices A,Q,
H, or R changes over time. (later may be changed)
5) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
p_A: scalar, square matrix, 3D array
A_{k} in the model. If matrix then A_{k} = A - constant.
If it is 3D array then A_{k} = p_A[:,:, index[0,k]]
p_Q: scalar, square symmetric matrix, 3D array
Q_{k-1} in the model. If matrix then Q_{k-1} = Q - constant.
If it is 3D array then Q_{k-1} = p_Q[:,:, index[1,k]]
p_H: scalar, matrix (measurement_dim, state_dim) , 3D array
H_{k} in the model. If matrix then H_{k} = H - constant.
If it is 3D array then H_{k} = p_Q[:,:, index[2,k]]
p_R: scalar, square symmetric matrix, 3D array
R_{k} in the model. If matrix then R_{k} = R - constant.
If it is 3D array then R_{k} = p_R[:,:, index[3,k]]
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
index: vector
Which indices (on 3-rd dimension) from arrays p_A, p_Q,p_H, p_R to use
on every time step. If this parameter is None then it is assumed
that p_A, p_Q, p_H, p_R do not change over time and indices are not needed.
index[0,:] - correspond to A, index[1,:] - correspond to Q
index[2,:] - correspond to H, index[3,:] - correspond to R.
If index.shape[0] == 1, it is assumed that indides for all matrices
are the same.
m_init: vector or matrix
Initial distribution mean. If None it is assumed to be zero.
For "multiple time series mode" it is matrix, second dimension of
which correspond to different time series. In regular case ("one
time series mode") it is a vector.
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then "grad_calc_params" parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
If previous parameter is true, then this parameters gives the
total number of parameters in the gradient.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dA", "dQ", "dH", "dR", "dm_init", "dP_init".
They can be None, in this case zero matrices (no dependence on parameters)
is assumed. If there is only one parameter then third dimension is
automatically added.
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
matrs_for_smoother: dict
Dictionary with model functions for smoother. The intrinsic model
functions are computed in this functions and they are returned to
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
The dictionary contains the same fields.
"""
#import pdb; pdb.set_trace()
# Parameters checking ->
# index
p_A = np.atleast_1d(p_A)
p_Q = np.atleast_1d(p_Q)
p_H = np.atleast_1d(p_H)
p_R = np.atleast_1d(p_R)
# Reshape and check measurements:
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape)
measurement_dim = Y.shape[1]
time_series_no = Y.shape[2] # multiple time series mode
if ((len(p_A.shape) == 3) and (len(p_A.shape[2]) != 1)) or\
((len(p_Q.shape) == 3) and (len(p_Q.shape[2]) != 1)) or\
((len(p_H.shape) == 3) and (len(p_H.shape[2]) != 1)) or\
((len(p_R.shape) == 3) and (len(p_R.shape[2]) != 1)):
model_matrices_chage_with_time = True
else:
model_matrices_chage_with_time = False
# Check index
old_index_shape = None
if index is None:
if (len(p_A.shape) == 3) or (len(p_Q.shape) == 3) or\
(len(p_H.shape) == 3) or (len(p_R.shape) == 3):
raise ValueError("Parameter index can not be None for time varying matrices (third dimension is present)")
else: # matrices do not change in time, so form dummy zero indices.
index = np.zeros((1,Y.shape[0]))
else:
if len(index.shape) == 1:
index.shape = (1,index.shape[0])
old_index_shape = (index.shape[0],)
if (index.shape[1] != Y.shape[0]):
raise ValueError("Number of measurements must be equal the number of A_{k}, Q_{k}, H_{k}, R_{k}")
if (index.shape[0] == 1):
A_time_var_index = 0; Q_time_var_index = 0
H_time_var_index = 0; R_time_var_index = 0
elif (index.shape[0] == 4):
A_time_var_index = 0; Q_time_var_index = 1
H_time_var_index = 2; R_time_var_index = 3
else:
raise ValueError("First Dimension of index must be either 1 or 4.")
state_dim = p_A.shape[0]
# Check and make right shape for model matrices. On exit they all are 3 dimensional. Last dimension
# correspond to change in time.
(p_A, old_A_shape) = cls._check_SS_matrix(p_A, state_dim, measurement_dim, which='A')
(p_Q, old_Q_shape) = cls._check_SS_matrix(p_Q, state_dim, measurement_dim, which='Q')
(p_H, old_H_shape) = cls._check_SS_matrix(p_H, state_dim, measurement_dim, which='H')
(p_R, old_R_shape) = cls._check_SS_matrix(p_R, state_dim, measurement_dim, which='R')
# m_init
if m_init is None:
m_init = np.zeros((state_dim, time_series_no))
else:
m_init = np.atleast_2d(m_init).T
# P_init
if P_init is None:
P_init = np.eye(state_dim)
elif not isinstance(P_init, collections.Iterable): #scalar
P_init = P_init*np.eye(state_dim)
if p_kalman_filter_type not in ('regular', 'svd'):
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
# Functions to pass to the kalman_filter algorithm:
# Parameters:
# k - number of Kalman filter iteration
# m - vector for calculating matrices. Required for EKF. Not used here.
c_p_A = p_A.copy() # create a copy because this object is passed to the smoother
c_p_Q = p_Q.copy() # create a copy because this object is passed to the smoother
c_index = index.copy() # create a copy because this object is passed to the smoother
if calc_grad_log_likelihood:
if model_matrices_chage_with_time:
raise ValueError("When computing likelihood gradient A and Q can not change over time.")
dA = cls._check_grad_state_matrices(grad_calc_params.get('dA'), state_dim, grad_params_no, which = 'dA')
dQ = cls._check_grad_state_matrices(grad_calc_params.get('dQ'), state_dim, grad_params_no, which = 'dQ')
dH = cls._check_grad_measurement_matrices(grad_calc_params.get('dH'), state_dim, grad_params_no, measurement_dim, which = 'dH')
dR = cls._check_grad_measurement_matrices(grad_calc_params.get('dR'), state_dim, grad_params_no, measurement_dim, which = 'dR')
dm_init = grad_calc_params.get('dm_init')
if dm_init is None:
# multiple time series mode. Keep grad_params always as a last dimension
dm_init = np.zeros((state_dim, time_series_no, grad_params_no))
dP_init = grad_calc_params.get('dP_init')
if dP_init is None:
dP_init = np.zeros((state_dim,state_dim,grad_params_no))
else:
dA = None
dQ = None
dH = None
dR = None
dm_init = None
dP_init = None
dynamic_callables = Std_Dynamic_Callables_Class(c_p_A, A_time_var_index, c_p_Q, c_index, Q_time_var_index, 20, dA, dQ)
measurement_callables = Std_Measurement_Callables_Class(p_H, H_time_var_index, p_R, index, R_time_var_index, 20, dH, dR)
(M, P,log_likelihood, grad_log_likelihood, dynamic_callables) = \
cls._kalman_algorithm_raw(state_dim, dynamic_callables,
measurement_callables, Y, m_init,
P_init, p_kalman_filter_type = p_kalman_filter_type,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
grad_params_no=grad_params_no,
dm_init=dm_init, dP_init=dP_init)
# restore shapes so that input parameters are unchenged
if old_index_shape is not None:
index.shape = old_index_shape
if old_Y_shape is not None:
Y.shape = old_Y_shape
if old_A_shape is not None:
p_A.shape = old_A_shape
if old_Q_shape is not None:
p_Q.shape = old_Q_shape
if old_H_shape is not None:
p_H.shape = old_H_shape
if old_R_shape is not None:
p_R.shape = old_R_shape
# Return values
return (M, P,log_likelihood, grad_log_likelihood, dynamic_callables) | This function implements the basic Kalman Filter algorithm
These notations for the State-Space model are assumed:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) The function generaly do not modify the passed parameters. If
it happens then it is an error. There are several exeprions: scalars
can be modified into a matrix, in some rare cases shapes of
the derivatives matrices may be changed, it is ignored for now.
2) Copies of p_A, p_Q, index are created in memory to be used later
in smoother. References to copies are kept in "matrs_for_smoother"
return parameter.
3) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
4) Calculation of Grad_log_likelihood is not supported if matrices A,Q,
H, or R changes over time. (later may be changed)
5) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
p_A: scalar, square matrix, 3D array
A_{k} in the model. If matrix then A_{k} = A - constant.
If it is 3D array then A_{k} = p_A[:,:, index[0,k]]
p_Q: scalar, square symmetric matrix, 3D array
Q_{k-1} in the model. If matrix then Q_{k-1} = Q - constant.
If it is 3D array then Q_{k-1} = p_Q[:,:, index[1,k]]
p_H: scalar, matrix (measurement_dim, state_dim) , 3D array
H_{k} in the model. If matrix then H_{k} = H - constant.
If it is 3D array then H_{k} = p_Q[:,:, index[2,k]]
p_R: scalar, square symmetric matrix, 3D array
R_{k} in the model. If matrix then R_{k} = R - constant.
If it is 3D array then R_{k} = p_R[:,:, index[3,k]]
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
index: vector
Which indices (on 3-rd dimension) from arrays p_A, p_Q,p_H, p_R to use
on every time step. If this parameter is None then it is assumed
that p_A, p_Q, p_H, p_R do not change over time and indices are not needed.
index[0,:] - correspond to A, index[1,:] - correspond to Q
index[2,:] - correspond to H, index[3,:] - correspond to R.
If index.shape[0] == 1, it is assumed that indides for all matrices
are the same.
m_init: vector or matrix
Initial distribution mean. If None it is assumed to be zero.
For "multiple time series mode" it is matrix, second dimension of
which correspond to different time series. In regular case ("one
time series mode") it is a vector.
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then "grad_calc_params" parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
If previous parameter is true, then this parameters gives the
total number of parameters in the gradient.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dA", "dQ", "dH", "dR", "dm_init", "dP_init".
They can be None, in this case zero matrices (no dependence on parameters)
is assumed. If there is only one parameter then third dimension is
automatically added.
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
matrs_for_smoother: dict
Dictionary with model functions for smoother. The intrinsic model
functions are computed in this functions and they are returned to
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
The dictionary contains the same fields. | Below is the the instruction that describes the task:
### Input:
This function implements the basic Kalman Filter algorithm
These notations for the State-Space model are assumed:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) The function generaly do not modify the passed parameters. If
it happens then it is an error. There are several exeprions: scalars
can be modified into a matrix, in some rare cases shapes of
the derivatives matrices may be changed, it is ignored for now.
2) Copies of p_A, p_Q, index are created in memory to be used later
in smoother. References to copies are kept in "matrs_for_smoother"
return parameter.
3) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
4) Calculation of Grad_log_likelihood is not supported if matrices A,Q,
H, or R changes over time. (later may be changed)
5) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
p_A: scalar, square matrix, 3D array
A_{k} in the model. If matrix then A_{k} = A - constant.
If it is 3D array then A_{k} = p_A[:,:, index[0,k]]
p_Q: scalar, square symmetric matrix, 3D array
Q_{k-1} in the model. If matrix then Q_{k-1} = Q - constant.
If it is 3D array then Q_{k-1} = p_Q[:,:, index[1,k]]
p_H: scalar, matrix (measurement_dim, state_dim) , 3D array
H_{k} in the model. If matrix then H_{k} = H - constant.
If it is 3D array then H_{k} = p_Q[:,:, index[2,k]]
p_R: scalar, square symmetric matrix, 3D array
R_{k} in the model. If matrix then R_{k} = R - constant.
If it is 3D array then R_{k} = p_R[:,:, index[3,k]]
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
index: vector
Which indices (on 3-rd dimension) from arrays p_A, p_Q,p_H, p_R to use
on every time step. If this parameter is None then it is assumed
that p_A, p_Q, p_H, p_R do not change over time and indices are not needed.
index[0,:] - correspond to A, index[1,:] - correspond to Q
index[2,:] - correspond to H, index[3,:] - correspond to R.
If index.shape[0] == 1, it is assumed that indides for all matrices
are the same.
m_init: vector or matrix
Initial distribution mean. If None it is assumed to be zero.
For "multiple time series mode" it is matrix, second dimension of
which correspond to different time series. In regular case ("one
time series mode") it is a vector.
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then "grad_calc_params" parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
If previous parameter is true, then this parameters gives the
total number of parameters in the gradient.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dA", "dQ", "dH", "dR", "dm_init", "dP_init".
They can be None, in this case zero matrices (no dependence on parameters)
is assumed. If there is only one parameter then third dimension is
automatically added.
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
matrs_for_smoother: dict
Dictionary with model functions for smoother. The intrinsic model
functions are computed in this functions and they are returned to
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
The dictionary contains the same fields.
### Response:
def kalman_filter(cls, p_A, p_Q, p_H, p_R, Y, index=None, m_init=None,
P_init=None, p_kalman_filter_type='regular',
calc_log_likelihood=False,
calc_grad_log_likelihood=False, grad_params_no=None,
grad_calc_params=None):
"""
This function implements the basic Kalman Filter algorithm
These notations for the State-Space model are assumed:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated filter distributions x_{k} ~ N(m_{k}, P(k))
Current Features:
----------------------------------------
1) The function generaly do not modify the passed parameters. If
it happens then it is an error. There are several exeprions: scalars
can be modified into a matrix, in some rare cases shapes of
the derivatives matrices may be changed, it is ignored for now.
2) Copies of p_A, p_Q, index are created in memory to be used later
in smoother. References to copies are kept in "matrs_for_smoother"
return parameter.
3) Function support "multiple time series mode" which means that exactly
the same State-Space model is used to filter several sets of measurements.
In this case third dimension of Y should include these state-space measurements
Log_likelihood and Grad_log_likelihood have the corresponding dimensions then.
4) Calculation of Grad_log_likelihood is not supported if matrices A,Q,
H, or R changes over time. (later may be changed)
5) Measurement may include missing values. In this case update step is
not done for this measurement. (later may be changed)
Input:
-----------------
p_A: scalar, square matrix, 3D array
A_{k} in the model. If matrix then A_{k} = A - constant.
If it is 3D array then A_{k} = p_A[:,:, index[0,k]]
p_Q: scalar, square symmetric matrix, 3D array
Q_{k-1} in the model. If matrix then Q_{k-1} = Q - constant.
If it is 3D array then Q_{k-1} = p_Q[:,:, index[1,k]]
p_H: scalar, matrix (measurement_dim, state_dim) , 3D array
H_{k} in the model. If matrix then H_{k} = H - constant.
If it is 3D array then H_{k} = p_Q[:,:, index[2,k]]
p_R: scalar, square symmetric matrix, 3D array
R_{k} in the model. If matrix then R_{k} = R - constant.
If it is 3D array then R_{k} = p_R[:,:, index[3,k]]
Y: matrix or vector or 3D array
Data. If Y is matrix then samples are along 0-th dimension and
features along the 1-st. If 3D array then third dimension
correspond to "multiple time series mode".
index: vector
Which indices (on 3-rd dimension) from arrays p_A, p_Q,p_H, p_R to use
on every time step. If this parameter is None then it is assumed
that p_A, p_Q, p_H, p_R do not change over time and indices are not needed.
index[0,:] - correspond to A, index[1,:] - correspond to Q
index[2,:] - correspond to H, index[3,:] - correspond to R.
If index.shape[0] == 1, it is assumed that indides for all matrices
are the same.
m_init: vector or matrix
Initial distribution mean. If None it is assumed to be zero.
For "multiple time series mode" it is matrix, second dimension of
which correspond to different time series. In regular case ("one
time series mode") it is a vector.
P_init: square symmetric matrix or scalar
Initial covariance of the states. If the parameter is scalar
then it is assumed that initial covariance matrix is unit matrix
multiplied by this scalar. If None the unit matrix is used instead.
"multiple time series mode" does not affect it, since it does not
affect anything related to state variaces.
calc_log_likelihood: boolean
Whether to calculate marginal likelihood of the state-space model.
calc_grad_log_likelihood: boolean
Whether to calculate gradient of the marginal likelihood
of the state-space model. If true then "grad_calc_params" parameter must
provide the extra parameters for gradient calculation.
grad_params_no: int
If previous parameter is true, then this parameters gives the
total number of parameters in the gradient.
grad_calc_params: dictionary
Dictionary with derivatives of model matrices with respect
to parameters "dA", "dQ", "dH", "dR", "dm_init", "dP_init".
They can be None, in this case zero matrices (no dependence on parameters)
is assumed. If there is only one parameter then third dimension is
automatically added.
Output:
--------------
M: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Filter estimates of the state means. In the extra step the initial
value is included. In the "multiple time series mode" third dimension
correspond to different timeseries.
P: (no_steps+1, state_dim, state_dim) 3D array
Filter estimates of the state covariances. In the extra step the initial
value is included.
log_likelihood: double or (1, time_series_no) 3D array.
If the parameter calc_log_likelihood was set to true, return
logarithm of marginal likelihood of the state-space model. If
the parameter was false, return None. In the "multiple time series mode" it is a vector
providing log_likelihood for each time series.
grad_log_likelihood: column vector or (grad_params_no, time_series_no) matrix
If calc_grad_log_likelihood is true, return gradient of log likelihood
with respect to parameters. It returns it column wise, so in
"multiple time series mode" gradients for each time series is in the
corresponding column.
matrs_for_smoother: dict
Dictionary with model functions for smoother. The intrinsic model
functions are computed in this functions and they are returned to
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
The dictionary contains the same fields.
"""
#import pdb; pdb.set_trace()
# Parameters checking ->
# index
p_A = np.atleast_1d(p_A)
p_Q = np.atleast_1d(p_Q)
p_H = np.atleast_1d(p_H)
p_R = np.atleast_1d(p_R)
# Reshape and check measurements:
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape)
measurement_dim = Y.shape[1]
time_series_no = Y.shape[2] # multiple time series mode
if ((len(p_A.shape) == 3) and (len(p_A.shape[2]) != 1)) or\
((len(p_Q.shape) == 3) and (len(p_Q.shape[2]) != 1)) or\
((len(p_H.shape) == 3) and (len(p_H.shape[2]) != 1)) or\
((len(p_R.shape) == 3) and (len(p_R.shape[2]) != 1)):
model_matrices_chage_with_time = True
else:
model_matrices_chage_with_time = False
# Check index
old_index_shape = None
if index is None:
if (len(p_A.shape) == 3) or (len(p_Q.shape) == 3) or\
(len(p_H.shape) == 3) or (len(p_R.shape) == 3):
raise ValueError("Parameter index can not be None for time varying matrices (third dimension is present)")
else: # matrices do not change in time, so form dummy zero indices.
index = np.zeros((1,Y.shape[0]))
else:
if len(index.shape) == 1:
index.shape = (1,index.shape[0])
old_index_shape = (index.shape[0],)
if (index.shape[1] != Y.shape[0]):
raise ValueError("Number of measurements must be equal the number of A_{k}, Q_{k}, H_{k}, R_{k}")
if (index.shape[0] == 1):
A_time_var_index = 0; Q_time_var_index = 0
H_time_var_index = 0; R_time_var_index = 0
elif (index.shape[0] == 4):
A_time_var_index = 0; Q_time_var_index = 1
H_time_var_index = 2; R_time_var_index = 3
else:
raise ValueError("First Dimension of index must be either 1 or 4.")
state_dim = p_A.shape[0]
# Check and make right shape for model matrices. On exit they all are 3 dimensional. Last dimension
# correspond to change in time.
(p_A, old_A_shape) = cls._check_SS_matrix(p_A, state_dim, measurement_dim, which='A')
(p_Q, old_Q_shape) = cls._check_SS_matrix(p_Q, state_dim, measurement_dim, which='Q')
(p_H, old_H_shape) = cls._check_SS_matrix(p_H, state_dim, measurement_dim, which='H')
(p_R, old_R_shape) = cls._check_SS_matrix(p_R, state_dim, measurement_dim, which='R')
# m_init
if m_init is None:
m_init = np.zeros((state_dim, time_series_no))
else:
m_init = np.atleast_2d(m_init).T
# P_init
if P_init is None:
P_init = np.eye(state_dim)
elif not isinstance(P_init, collections.Iterable): #scalar
P_init = P_init*np.eye(state_dim)
if p_kalman_filter_type not in ('regular', 'svd'):
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
# Functions to pass to the kalman_filter algorithm:
# Parameters:
# k - number of Kalman filter iteration
# m - vector for calculating matrices. Required for EKF. Not used here.
c_p_A = p_A.copy() # create a copy because this object is passed to the smoother
c_p_Q = p_Q.copy() # create a copy because this object is passed to the smoother
c_index = index.copy() # create a copy because this object is passed to the smoother
if calc_grad_log_likelihood:
if model_matrices_chage_with_time:
raise ValueError("When computing likelihood gradient A and Q can not change over time.")
dA = cls._check_grad_state_matrices(grad_calc_params.get('dA'), state_dim, grad_params_no, which = 'dA')
dQ = cls._check_grad_state_matrices(grad_calc_params.get('dQ'), state_dim, grad_params_no, which = 'dQ')
dH = cls._check_grad_measurement_matrices(grad_calc_params.get('dH'), state_dim, grad_params_no, measurement_dim, which = 'dH')
dR = cls._check_grad_measurement_matrices(grad_calc_params.get('dR'), state_dim, grad_params_no, measurement_dim, which = 'dR')
dm_init = grad_calc_params.get('dm_init')
if dm_init is None:
# multiple time series mode. Keep grad_params always as a last dimension
dm_init = np.zeros((state_dim, time_series_no, grad_params_no))
dP_init = grad_calc_params.get('dP_init')
if dP_init is None:
dP_init = np.zeros((state_dim,state_dim,grad_params_no))
else:
dA = None
dQ = None
dH = None
dR = None
dm_init = None
dP_init = None
dynamic_callables = Std_Dynamic_Callables_Class(c_p_A, A_time_var_index, c_p_Q, c_index, Q_time_var_index, 20, dA, dQ)
measurement_callables = Std_Measurement_Callables_Class(p_H, H_time_var_index, p_R, index, R_time_var_index, 20, dH, dR)
(M, P,log_likelihood, grad_log_likelihood, dynamic_callables) = \
cls._kalman_algorithm_raw(state_dim, dynamic_callables,
measurement_callables, Y, m_init,
P_init, p_kalman_filter_type = p_kalman_filter_type,
calc_log_likelihood=calc_log_likelihood,
calc_grad_log_likelihood=calc_grad_log_likelihood,
grad_params_no=grad_params_no,
dm_init=dm_init, dP_init=dP_init)
# restore shapes so that input parameters are unchenged
if old_index_shape is not None:
index.shape = old_index_shape
if old_Y_shape is not None:
Y.shape = old_Y_shape
if old_A_shape is not None:
p_A.shape = old_A_shape
if old_Q_shape is not None:
p_Q.shape = old_Q_shape
if old_H_shape is not None:
p_H.shape = old_H_shape
if old_R_shape is not None:
p_R.shape = old_R_shape
# Return values
return (M, P,log_likelihood, grad_log_likelihood, dynamic_callables) |
def _parse_edge_weight(f, nodes):
"""Parse TSPLIB EDGE_WEIGHT_SECTION from file f
Supports only FULL_MATRIX for now
"""
matrix = []
n = 0
for line in f:
line = strip(line)
regex = re.compile(r'\s+')
row = regex.split(line)
matrix.append(row)
n = n + 1
if n == nodes:
break
if n != nodes:
raise ParseException('Missing {} nodes definition from section EDGE_WEIGHT_SECTION'.format(nodes - n))
return matrix | Parse TSPLIB EDGE_WEIGHT_SECTION from file f
Supports only FULL_MATRIX for now | Below is the the instruction that describes the task:
### Input:
Parse TSPLIB EDGE_WEIGHT_SECTION from file f
Supports only FULL_MATRIX for now
### Response:
def _parse_edge_weight(f, nodes):
"""Parse TSPLIB EDGE_WEIGHT_SECTION from file f
Supports only FULL_MATRIX for now
"""
matrix = []
n = 0
for line in f:
line = strip(line)
regex = re.compile(r'\s+')
row = regex.split(line)
matrix.append(row)
n = n + 1
if n == nodes:
break
if n != nodes:
raise ParseException('Missing {} nodes definition from section EDGE_WEIGHT_SECTION'.format(nodes - n))
return matrix |
def before(self):
"""Run This Middleware Before The Route Executes."""
user = self.request.user()
if user and user.verified_at is None:
self.request.redirect('/email/verify') | Run This Middleware Before The Route Executes. | Below is the the instruction that describes the task:
### Input:
Run This Middleware Before The Route Executes.
### Response:
def before(self):
"""Run This Middleware Before The Route Executes."""
user = self.request.user()
if user and user.verified_at is None:
self.request.redirect('/email/verify') |
def logging_from_debugplot(debugplot):
"""Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
if isinstance(debugplot, int):
if abs(debugplot) >= 10:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
else:
raise ValueError("Unexpected debugplot=" + str(debugplot)) | Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'. | Below is the the instruction that describes the task:
### Input:
Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
### Response:
def logging_from_debugplot(debugplot):
"""Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
if isinstance(debugplot, int):
if abs(debugplot) >= 10:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
else:
raise ValueError("Unexpected debugplot=" + str(debugplot)) |
def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs, param_to_step):
# type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], Dict[Text, Dict[Text, Any]]) -> None
"""Check if all source and sink types of a workflow are compatible before run time.
"""
# source parameters: workflow_inputs and step_outputs
# sink parameters: step_inputs and workflow_outputs
# make a dictionary of source parameters, indexed by the "id" field
src_parms = workflow_inputs + step_outputs
src_dict = {}
for parm in src_parms:
src_dict[parm["id"]] = parm
step_inputs_val = check_all_types(src_dict, step_inputs, "source")
workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource")
warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"]
exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"]
warning_msgs = []
exception_msgs = []
for warning in warnings:
src = warning.src
sink = warning.sink
linkMerge = warning.linkMerge
sinksf = sorted([p["pattern"] for p in sink.get("secondaryFiles", []) if p.get("required", True)])
srcsf = sorted([p["pattern"] for p in src.get("secondaryFiles", [])])
# Every secondaryFile required by the sink, should be declared
# by the source
missing = missing_subset(srcsf, sinksf)
if missing:
msg1 = "Parameter '%s' requires secondaryFiles %s but" % (shortname(sink["id"]), missing)
msg3 = SourceLine(src, "id").makeError(
"source '%s' does not provide those secondaryFiles." % (shortname(src["id"])))
msg4 = SourceLine(src.get("_tool_entry", src), "secondaryFiles").makeError("To resolve, add missing secondaryFiles patterns to definition of '%s' or" % (shortname(src["id"])))
msg5 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError("mark missing secondaryFiles in definition of '%s' as optional." % shortname(sink["id"]))
msg = SourceLine(sink).makeError("%s\n%s" % (msg1, bullets([msg3, msg4, msg5], " ")))
elif sink.get("not_connected"):
msg = SourceLine(sink, "type").makeError(
"'%s' is not an input parameter of %s, expected %s"
% (shortname(sink["id"]), param_to_step[sink["id"]]["run"],
", ".join(shortname(s["id"])
for s in param_to_step[sink["id"]]["inputs"]
if not s.get("not_connected"))))
else:
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s may be incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge is not None:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
warning_msgs.append(msg)
for exception in exceptions:
src = exception.src
sink = exception.sink
linkMerge = exception.linkMerge
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s is incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge is not None:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
exception_msgs.append(msg)
for sink in step_inputs:
if ('null' != sink["type"] and 'null' not in sink["type"]
and "source" not in sink and "default" not in sink and "valueFrom" not in sink):
msg = SourceLine(sink).makeError(
"Required parameter '%s' does not have source, default, or valueFrom expression"
% shortname(sink["id"]))
exception_msgs.append(msg)
all_warning_msg = strip_dup_lineno("\n".join(warning_msgs))
all_exception_msg = strip_dup_lineno("\n".join(exception_msgs))
if warnings:
_logger.warning("Workflow checker warning:\n%s", all_warning_msg)
if exceptions:
raise validate.ValidationException(all_exception_msg) | Check if all source and sink types of a workflow are compatible before run time. | Below is the the instruction that describes the task:
### Input:
Check if all source and sink types of a workflow are compatible before run time.
### Response:
def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs, param_to_step):
# type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], Dict[Text, Dict[Text, Any]]) -> None
"""Check if all source and sink types of a workflow are compatible before run time.
"""
# source parameters: workflow_inputs and step_outputs
# sink parameters: step_inputs and workflow_outputs
# make a dictionary of source parameters, indexed by the "id" field
src_parms = workflow_inputs + step_outputs
src_dict = {}
for parm in src_parms:
src_dict[parm["id"]] = parm
step_inputs_val = check_all_types(src_dict, step_inputs, "source")
workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource")
warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"]
exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"]
warning_msgs = []
exception_msgs = []
for warning in warnings:
src = warning.src
sink = warning.sink
linkMerge = warning.linkMerge
sinksf = sorted([p["pattern"] for p in sink.get("secondaryFiles", []) if p.get("required", True)])
srcsf = sorted([p["pattern"] for p in src.get("secondaryFiles", [])])
# Every secondaryFile required by the sink, should be declared
# by the source
missing = missing_subset(srcsf, sinksf)
if missing:
msg1 = "Parameter '%s' requires secondaryFiles %s but" % (shortname(sink["id"]), missing)
msg3 = SourceLine(src, "id").makeError(
"source '%s' does not provide those secondaryFiles." % (shortname(src["id"])))
msg4 = SourceLine(src.get("_tool_entry", src), "secondaryFiles").makeError("To resolve, add missing secondaryFiles patterns to definition of '%s' or" % (shortname(src["id"])))
msg5 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError("mark missing secondaryFiles in definition of '%s' as optional." % shortname(sink["id"]))
msg = SourceLine(sink).makeError("%s\n%s" % (msg1, bullets([msg3, msg4, msg5], " ")))
elif sink.get("not_connected"):
msg = SourceLine(sink, "type").makeError(
"'%s' is not an input parameter of %s, expected %s"
% (shortname(sink["id"]), param_to_step[sink["id"]]["run"],
", ".join(shortname(s["id"])
for s in param_to_step[sink["id"]]["inputs"]
if not s.get("not_connected"))))
else:
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s may be incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge is not None:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
warning_msgs.append(msg)
for exception in exceptions:
src = exception.src
sink = exception.sink
linkMerge = exception.linkMerge
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s is incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge is not None:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
exception_msgs.append(msg)
for sink in step_inputs:
if ('null' != sink["type"] and 'null' not in sink["type"]
and "source" not in sink and "default" not in sink and "valueFrom" not in sink):
msg = SourceLine(sink).makeError(
"Required parameter '%s' does not have source, default, or valueFrom expression"
% shortname(sink["id"]))
exception_msgs.append(msg)
all_warning_msg = strip_dup_lineno("\n".join(warning_msgs))
all_exception_msg = strip_dup_lineno("\n".join(exception_msgs))
if warnings:
_logger.warning("Workflow checker warning:\n%s", all_warning_msg)
if exceptions:
raise validate.ValidationException(all_exception_msg) |
def create_snappy_message(message_set):
"""
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
"""
encoded_message_set = KafkaCodec._encode_message_set(message_set)
snapped = snappy_encode(encoded_message_set)
return Message(0, CODEC_SNAPPY, None, snapped) | Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances | Below is the the instruction that describes the task:
### Input:
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
### Response:
def create_snappy_message(message_set):
"""
Construct a Snappy-compressed message containing multiple messages
The given messages will be encoded, compressed, and sent as a single atomic
message to Kafka.
:param list message_set: a list of :class:`Message` instances
"""
encoded_message_set = KafkaCodec._encode_message_set(message_set)
snapped = snappy_encode(encoded_message_set)
return Message(0, CODEC_SNAPPY, None, snapped) |
def _tmp_html_file(
self,
content):
"""*create a tmp html file with some content used for the header or footer of the ebook*
**Key Arguments:**
- ``content`` -- the content to include in the HTML file.
"""
self.log.debug('starting the ``_tmp_html_file`` method')
content = """
<hr>
<div style="text-align: center">
%(content)s
</div>
<hr>
""" % locals()
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S%f")
pathToWriteFile = "/tmp/%(now)s.html" % locals()
try:
self.log.debug("attempting to open the file %s" %
(pathToWriteFile,))
writeFile = codecs.open(
pathToWriteFile, encoding='utf-8', mode='w')
except IOError, e:
message = 'could not open the file %s' % (pathToWriteFile,)
self.log.critical(message)
raise IOError(message)
writeFile.write(content)
writeFile.close()
self.log.debug('completed the ``_tmp_html_file`` method')
return pathToWriteFile | *create a tmp html file with some content used for the header or footer of the ebook*
**Key Arguments:**
- ``content`` -- the content to include in the HTML file. | Below is the the instruction that describes the task:
### Input:
*create a tmp html file with some content used for the header or footer of the ebook*
**Key Arguments:**
- ``content`` -- the content to include in the HTML file.
### Response:
def _tmp_html_file(
self,
content):
"""*create a tmp html file with some content used for the header or footer of the ebook*
**Key Arguments:**
- ``content`` -- the content to include in the HTML file.
"""
self.log.debug('starting the ``_tmp_html_file`` method')
content = """
<hr>
<div style="text-align: center">
%(content)s
</div>
<hr>
""" % locals()
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S%f")
pathToWriteFile = "/tmp/%(now)s.html" % locals()
try:
self.log.debug("attempting to open the file %s" %
(pathToWriteFile,))
writeFile = codecs.open(
pathToWriteFile, encoding='utf-8', mode='w')
except IOError, e:
message = 'could not open the file %s' % (pathToWriteFile,)
self.log.critical(message)
raise IOError(message)
writeFile.write(content)
writeFile.close()
self.log.debug('completed the ``_tmp_html_file`` method')
return pathToWriteFile |
def primitive_structure_from_cif(cif, parse_engine, symprec, site_tolerance):
"""
This calcfunction will take a CifData node, attempt to create a StructureData object from it
using the 'parse_engine' and pass it through SeeKpath to try and get the primitive cell. Finally, it will
store several keys from the SeeKpath output parameters dictionary directly on the structure data as attributes,
which are otherwise difficult if not impossible to query for.
:param cif: the CifData node
:param parse_engine: the parsing engine, supported libraries 'ase' and 'pymatgen'
:param symprec: a Float node with symmetry precision for determining primitive cell in SeeKpath
:param site_tolerance: a Float node with the fractional coordinate distance tolerance for finding overlapping sites
This will only be used if the parse_engine is pymatgen
:returns: the primitive StructureData as determined by SeeKpath
"""
CifCleanWorkChain = WorkflowFactory('codtools.cif_clean') # pylint: disable=invalid-name
try:
structure = cif.get_structure(converter=parse_engine.value, site_tolerance=site_tolerance, store=False)
except exceptions.UnsupportedSpeciesError:
return CifCleanWorkChain.exit_codes.ERROR_CIF_HAS_UNKNOWN_SPECIES
except InvalidOccupationsError:
return CifCleanWorkChain.exit_codes.ERROR_CIF_HAS_INVALID_OCCUPANCIES
except Exception: # pylint: disable=broad-except
return CifCleanWorkChain.exit_codes.ERROR_CIF_STRUCTURE_PARSING_FAILED
try:
seekpath_results = get_kpoints_path(structure, symprec=symprec)
except ValueError:
return CifCleanWorkChain.exit_codes.ERROR_SEEKPATH_INCONSISTENT_SYMMETRY
except SymmetryDetectionError:
return CifCleanWorkChain.exit_codes.ERROR_SEEKPATH_SYMMETRY_DETECTION_FAILED
# Store important information that should be easily queryable as attributes in the StructureData
parameters = seekpath_results['parameters'].get_dict()
structure = seekpath_results['primitive_structure']
for key in ['spacegroup_international', 'spacegroup_number', 'bravais_lattice', 'bravais_lattice_extended']:
try:
value = parameters[key]
structure.set_extra(key, value)
except KeyError:
pass
# Store the formula as a string, in both hill as well as hill-compact notation, so it can be easily queried for
structure.set_extra('formula_hill', structure.get_formula(mode='hill'))
structure.set_extra('formula_hill_compact', structure.get_formula(mode='hill_compact'))
structure.set_extra('chemical_system', '-{}-'.format('-'.join(sorted(structure.get_symbols_set()))))
return structure | This calcfunction will take a CifData node, attempt to create a StructureData object from it
using the 'parse_engine' and pass it through SeeKpath to try and get the primitive cell. Finally, it will
store several keys from the SeeKpath output parameters dictionary directly on the structure data as attributes,
which are otherwise difficult if not impossible to query for.
:param cif: the CifData node
:param parse_engine: the parsing engine, supported libraries 'ase' and 'pymatgen'
:param symprec: a Float node with symmetry precision for determining primitive cell in SeeKpath
:param site_tolerance: a Float node with the fractional coordinate distance tolerance for finding overlapping sites
This will only be used if the parse_engine is pymatgen
:returns: the primitive StructureData as determined by SeeKpath | Below is the the instruction that describes the task:
### Input:
This calcfunction will take a CifData node, attempt to create a StructureData object from it
using the 'parse_engine' and pass it through SeeKpath to try and get the primitive cell. Finally, it will
store several keys from the SeeKpath output parameters dictionary directly on the structure data as attributes,
which are otherwise difficult if not impossible to query for.
:param cif: the CifData node
:param parse_engine: the parsing engine, supported libraries 'ase' and 'pymatgen'
:param symprec: a Float node with symmetry precision for determining primitive cell in SeeKpath
:param site_tolerance: a Float node with the fractional coordinate distance tolerance for finding overlapping sites
This will only be used if the parse_engine is pymatgen
:returns: the primitive StructureData as determined by SeeKpath
### Response:
def primitive_structure_from_cif(cif, parse_engine, symprec, site_tolerance):
"""
This calcfunction will take a CifData node, attempt to create a StructureData object from it
using the 'parse_engine' and pass it through SeeKpath to try and get the primitive cell. Finally, it will
store several keys from the SeeKpath output parameters dictionary directly on the structure data as attributes,
which are otherwise difficult if not impossible to query for.
:param cif: the CifData node
:param parse_engine: the parsing engine, supported libraries 'ase' and 'pymatgen'
:param symprec: a Float node with symmetry precision for determining primitive cell in SeeKpath
:param site_tolerance: a Float node with the fractional coordinate distance tolerance for finding overlapping sites
This will only be used if the parse_engine is pymatgen
:returns: the primitive StructureData as determined by SeeKpath
"""
CifCleanWorkChain = WorkflowFactory('codtools.cif_clean') # pylint: disable=invalid-name
try:
structure = cif.get_structure(converter=parse_engine.value, site_tolerance=site_tolerance, store=False)
except exceptions.UnsupportedSpeciesError:
return CifCleanWorkChain.exit_codes.ERROR_CIF_HAS_UNKNOWN_SPECIES
except InvalidOccupationsError:
return CifCleanWorkChain.exit_codes.ERROR_CIF_HAS_INVALID_OCCUPANCIES
except Exception: # pylint: disable=broad-except
return CifCleanWorkChain.exit_codes.ERROR_CIF_STRUCTURE_PARSING_FAILED
try:
seekpath_results = get_kpoints_path(structure, symprec=symprec)
except ValueError:
return CifCleanWorkChain.exit_codes.ERROR_SEEKPATH_INCONSISTENT_SYMMETRY
except SymmetryDetectionError:
return CifCleanWorkChain.exit_codes.ERROR_SEEKPATH_SYMMETRY_DETECTION_FAILED
# Store important information that should be easily queryable as attributes in the StructureData
parameters = seekpath_results['parameters'].get_dict()
structure = seekpath_results['primitive_structure']
for key in ['spacegroup_international', 'spacegroup_number', 'bravais_lattice', 'bravais_lattice_extended']:
try:
value = parameters[key]
structure.set_extra(key, value)
except KeyError:
pass
# Store the formula as a string, in both hill as well as hill-compact notation, so it can be easily queried for
structure.set_extra('formula_hill', structure.get_formula(mode='hill'))
structure.set_extra('formula_hill_compact', structure.get_formula(mode='hill_compact'))
structure.set_extra('chemical_system', '-{}-'.format('-'.join(sorted(structure.get_symbols_set()))))
return structure |
def avg_pool(self, block_size):
"""
Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = ia.avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value) | Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling. | Below is the the instruction that describes the task:
### Input:
Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
### Response:
def avg_pool(self, block_size):
"""
Resize the heatmap(s) array using average pooling of a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = ia.avg_pool(self.arr_0to1, block_size, cval=0.0)
return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,
max_value=self.max_value) |
def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) | Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list | Below is the the instruction that describes the task:
### Input:
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
### Response:
def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) |
def get_consistent_edges(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:
"""Yield pairs of (source node, target node) for which all of their edges have the same type of relation.
:return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations
"""
for u, v in graph.edges():
if pair_is_consistent(graph, u, v):
yield u, v | Yield pairs of (source node, target node) for which all of their edges have the same type of relation.
:return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations | Below is the the instruction that describes the task:
### Input:
Yield pairs of (source node, target node) for which all of their edges have the same type of relation.
:return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations
### Response:
def get_consistent_edges(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:
"""Yield pairs of (source node, target node) for which all of their edges have the same type of relation.
:return: An iterator over (source, target) node pairs corresponding to edges with many inconsistent relations
"""
for u, v in graph.edges():
if pair_is_consistent(graph, u, v):
yield u, v |
def graph_draw(self, mode):
""" Draws grid graph using networkx
This method is for debugging purposes only.
Use ding0.tools.plots.plot_mv_topology() for advanced plotting.
Parameters
----------
mode : str
Mode selection 'MV' or 'LV'.
Notes
-----
The geo coords (for used crs see database import in class `NetworkDing0`)
are used as positions for drawing but networkx uses cartesian crs.
Since no coordinate transformation is performed, the drawn graph representation is falsified!
"""
g = self._graph
if mode == 'MV':
# get draw params from nodes and edges (coordinates, colors, demands, etc.)
nodes_pos = {}; demands = {}; demands_pos = {}
nodes_color = []
for node in g.nodes():
if isinstance(node, (StationDing0,
LVLoadAreaCentreDing0,
CableDistributorDing0,
GeneratorDing0,
CircuitBreakerDing0)):
nodes_pos[node] = (node.geo_data.x, node.geo_data.y)
# TODO: MOVE draw/color settings to config
if node == self.station():
nodes_color.append((1, 0.5, 0.5))
else:
#demands[node] = 'd=' + '{:.3f}'.format(node.grid.region.peak_load_sum)
#demands_pos[node] = tuple([a+b for a, b in zip(nodes_pos[node], [0.003]*len(nodes_pos[node]))])
nodes_color.append((0.5, 0.5, 1))
edges_color = []
for edge in self.graph_edges():
if edge['branch'].critical:
edges_color.append((1, 0, 0))
else:
edges_color.append((0, 0, 0))
plt.figure()
nx.draw_networkx(g, nodes_pos, node_color=nodes_color, edge_color=edges_color, font_size=8)
#nx.draw_networkx_labels(g, demands_pos, labels=demands, font_size=8)
plt.show()
elif mode == 'LV':
nodes_pos = {}
nodes_color = []
for node in g.nodes():
# get neighbors of station (=first node of each branch)
station_neighbors = sorted(
g.neighbors(self.station()), key=lambda _: repr(_))
# set x-offset according to count of branches
if len(station_neighbors) % 2 == 0:
x_pos_start = -(len(station_neighbors) // 2 - 0.5)
else:
x_pos_start = -(len(station_neighbors) // 2)
# set positions
if isinstance(node, CableDistributorDing0):
if node.in_building:
nodes_pos[node] = (x_pos_start + node.branch_no - 1 + 0.25, -node.load_no - 2)
nodes_color.append((0.5, 0.5, 0.5))
else:
nodes_pos[node] = (x_pos_start + node.branch_no - 1, -node.load_no - 2)
nodes_color.append((0.5, 0.5, 0.5))
elif isinstance(node, LoadDing0):
nodes_pos[node] = (x_pos_start + node.branch_no - 1 + 0.5, -node.load_no - 2 - 0.25)
nodes_color.append((0.5, 0.5, 1))
elif isinstance(node, GeneratorDing0):
# get neighbor of geno
neighbor = list(g.neighbors(node))[0]
# neighbor is cable distributor of building
if isinstance(neighbor, CableDistributorDing0):
nodes_pos[node] = (x_pos_start + neighbor.branch_no - 1 + 0.5, -neighbor.load_no - 2 + 0.25)
else:
nodes_pos[node] = (1,1)
nodes_color.append((0.5, 1, 0.5))
elif isinstance(node, StationDing0):
nodes_pos[node] = (0, 0)
nodes_color.append((1, 0.5, 0.5))
plt.figure()
nx.draw_networkx(g, nodes_pos, node_color=nodes_color, font_size=8, node_size=100)
plt.show() | Draws grid graph using networkx
This method is for debugging purposes only.
Use ding0.tools.plots.plot_mv_topology() for advanced plotting.
Parameters
----------
mode : str
Mode selection 'MV' or 'LV'.
Notes
-----
The geo coords (for used crs see database import in class `NetworkDing0`)
are used as positions for drawing but networkx uses cartesian crs.
Since no coordinate transformation is performed, the drawn graph representation is falsified! | Below is the the instruction that describes the task:
### Input:
Draws grid graph using networkx
This method is for debugging purposes only.
Use ding0.tools.plots.plot_mv_topology() for advanced plotting.
Parameters
----------
mode : str
Mode selection 'MV' or 'LV'.
Notes
-----
The geo coords (for used crs see database import in class `NetworkDing0`)
are used as positions for drawing but networkx uses cartesian crs.
Since no coordinate transformation is performed, the drawn graph representation is falsified!
### Response:
def graph_draw(self, mode):
""" Draws grid graph using networkx
This method is for debugging purposes only.
Use ding0.tools.plots.plot_mv_topology() for advanced plotting.
Parameters
----------
mode : str
Mode selection 'MV' or 'LV'.
Notes
-----
The geo coords (for used crs see database import in class `NetworkDing0`)
are used as positions for drawing but networkx uses cartesian crs.
Since no coordinate transformation is performed, the drawn graph representation is falsified!
"""
g = self._graph
if mode == 'MV':
# get draw params from nodes and edges (coordinates, colors, demands, etc.)
nodes_pos = {}; demands = {}; demands_pos = {}
nodes_color = []
for node in g.nodes():
if isinstance(node, (StationDing0,
LVLoadAreaCentreDing0,
CableDistributorDing0,
GeneratorDing0,
CircuitBreakerDing0)):
nodes_pos[node] = (node.geo_data.x, node.geo_data.y)
# TODO: MOVE draw/color settings to config
if node == self.station():
nodes_color.append((1, 0.5, 0.5))
else:
#demands[node] = 'd=' + '{:.3f}'.format(node.grid.region.peak_load_sum)
#demands_pos[node] = tuple([a+b for a, b in zip(nodes_pos[node], [0.003]*len(nodes_pos[node]))])
nodes_color.append((0.5, 0.5, 1))
edges_color = []
for edge in self.graph_edges():
if edge['branch'].critical:
edges_color.append((1, 0, 0))
else:
edges_color.append((0, 0, 0))
plt.figure()
nx.draw_networkx(g, nodes_pos, node_color=nodes_color, edge_color=edges_color, font_size=8)
#nx.draw_networkx_labels(g, demands_pos, labels=demands, font_size=8)
plt.show()
elif mode == 'LV':
nodes_pos = {}
nodes_color = []
for node in g.nodes():
# get neighbors of station (=first node of each branch)
station_neighbors = sorted(
g.neighbors(self.station()), key=lambda _: repr(_))
# set x-offset according to count of branches
if len(station_neighbors) % 2 == 0:
x_pos_start = -(len(station_neighbors) // 2 - 0.5)
else:
x_pos_start = -(len(station_neighbors) // 2)
# set positions
if isinstance(node, CableDistributorDing0):
if node.in_building:
nodes_pos[node] = (x_pos_start + node.branch_no - 1 + 0.25, -node.load_no - 2)
nodes_color.append((0.5, 0.5, 0.5))
else:
nodes_pos[node] = (x_pos_start + node.branch_no - 1, -node.load_no - 2)
nodes_color.append((0.5, 0.5, 0.5))
elif isinstance(node, LoadDing0):
nodes_pos[node] = (x_pos_start + node.branch_no - 1 + 0.5, -node.load_no - 2 - 0.25)
nodes_color.append((0.5, 0.5, 1))
elif isinstance(node, GeneratorDing0):
# get neighbor of geno
neighbor = list(g.neighbors(node))[0]
# neighbor is cable distributor of building
if isinstance(neighbor, CableDistributorDing0):
nodes_pos[node] = (x_pos_start + neighbor.branch_no - 1 + 0.5, -neighbor.load_no - 2 + 0.25)
else:
nodes_pos[node] = (1,1)
nodes_color.append((0.5, 1, 0.5))
elif isinstance(node, StationDing0):
nodes_pos[node] = (0, 0)
nodes_color.append((1, 0.5, 0.5))
plt.figure()
nx.draw_networkx(g, nodes_pos, node_color=nodes_color, font_size=8, node_size=100)
plt.show() |
def tail(self, stream='stdout', num_lines=10):
""" Tail a specified stream (stdout or stderr) by num_lines. """
target = self._map_string_to_file(stream)
if not target: # no current temp file
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if not last_run:
return None
return self._tail_string(last_run['tasks'][self.name][stream],
num_lines)
else:
return self._tail_temp_file(target, num_lines) | Tail a specified stream (stdout or stderr) by num_lines. | Below is the the instruction that describes the task:
### Input:
Tail a specified stream (stdout or stderr) by num_lines.
### Response:
def tail(self, stream='stdout', num_lines=10):
""" Tail a specified stream (stdout or stderr) by num_lines. """
target = self._map_string_to_file(stream)
if not target: # no current temp file
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if not last_run:
return None
return self._tail_string(last_run['tasks'][self.name][stream],
num_lines)
else:
return self._tail_temp_file(target, num_lines) |
def gen_csv(sc, filename, field_list, source, filters):
'''csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
header = []
for field in field_list:
header.append(fields.fields[field]['name'])
csvfile.writerow(header)
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile, 'flist': field_list}
sc.query('vulndetails', source=source,
func=writer, func_params=fparams, **filters)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close() | csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress | Below is the the instruction that describes the task:
### Input:
csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress
### Response:
def gen_csv(sc, filename, field_list, source, filters):
'''csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
header = []
for field in field_list:
header.append(fields.fields[field]['name'])
csvfile.writerow(header)
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile, 'flist': field_list}
sc.query('vulndetails', source=source,
func=writer, func_params=fparams, **filters)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close() |
def evaluate(self, verbose=False, decode=True, passes=None, num_threads=1, apply_experimental=True):
"""Evaluates by creating an Index containing evaluated data.
See `LazyResult`
Returns
-------
Index
Index with evaluated data.
"""
evaluated_data = super(Index, self).evaluate(verbose, decode, passes, num_threads, apply_experimental)
return Index(evaluated_data, self.dtype, self.name) | Evaluates by creating an Index containing evaluated data.
See `LazyResult`
Returns
-------
Index
Index with evaluated data. | Below is the the instruction that describes the task:
### Input:
Evaluates by creating an Index containing evaluated data.
See `LazyResult`
Returns
-------
Index
Index with evaluated data.
### Response:
def evaluate(self, verbose=False, decode=True, passes=None, num_threads=1, apply_experimental=True):
"""Evaluates by creating an Index containing evaluated data.
See `LazyResult`
Returns
-------
Index
Index with evaluated data.
"""
evaluated_data = super(Index, self).evaluate(verbose, decode, passes, num_threads, apply_experimental)
return Index(evaluated_data, self.dtype, self.name) |
def _add_options_to_parser(self, opts_dict, parser):
"""Add options to a parser."""
store_bool = ('store_true', 'store_false')
for opt, sct in opts_dict.items():
meta = self._conf[sct].def_[opt]
kwargs = copy.deepcopy(meta.cmd_kwargs)
action = kwargs.get('action')
if action is internal.Switch:
kwargs.update(nargs=0)
elif meta.default is not None and action not in store_bool:
kwargs.setdefault('type', type(meta.default))
kwargs.update(help=meta.help)
kwargs.setdefault('default', self._conf[sct][opt])
parser.add_argument(*_names(self._conf[sct], opt), **kwargs) | Add options to a parser. | Below is the the instruction that describes the task:
### Input:
Add options to a parser.
### Response:
def _add_options_to_parser(self, opts_dict, parser):
"""Add options to a parser."""
store_bool = ('store_true', 'store_false')
for opt, sct in opts_dict.items():
meta = self._conf[sct].def_[opt]
kwargs = copy.deepcopy(meta.cmd_kwargs)
action = kwargs.get('action')
if action is internal.Switch:
kwargs.update(nargs=0)
elif meta.default is not None and action not in store_bool:
kwargs.setdefault('type', type(meta.default))
kwargs.update(help=meta.help)
kwargs.setdefault('default', self._conf[sct][opt])
parser.add_argument(*_names(self._conf[sct], opt), **kwargs) |
def initialize(self, cfg_file=None, mode=None):
"""Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional.
"""
self.sim = ScipyOdeSimulator(self.model)
self.state = numpy.array(copy.copy(self.sim.initials)[0])
self.time = numpy.array(0.0)
self.status = 'initialized' | Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional. | Below is the the instruction that describes the task:
### Input:
Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional.
### Response:
def initialize(self, cfg_file=None, mode=None):
"""Initialize the model for simulation, possibly given a config file.
Parameters
----------
cfg_file : Optional[str]
The name of the configuration file to load, optional.
"""
self.sim = ScipyOdeSimulator(self.model)
self.state = numpy.array(copy.copy(self.sim.initials)[0])
self.time = numpy.array(0.0)
self.status = 'initialized' |
def surfdens(self,R,z,phi=0.,t=0.,forcepoisson=False):
"""
NAME:
surfdens
PURPOSE:
evaluate the surface density :math:`\\Sigma(R,z,\\phi,t) = \\int_{-z}^{+z} dz' \\rho(R,z',\\phi,t)`
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forcepoisson= if True, calculate the surface density through the Poisson equation, even if an explicit expression for the surface density exists
OUTPUT:
Sigma (R,z,phi,t)
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
try:
if forcepoisson: raise AttributeError #Hack!
return self._amp*self._surfdens(R,z,phi=phi,t=t)
except AttributeError:
#Use the Poisson equation to get the surface density
return (-self.zforce(R,nu.fabs(z),phi=phi,t=t,use_physical=False)
+integrate.quad(\
lambda x: -self.Rforce(R,x,phi=phi,t=t,use_physical=False)/R
+self.R2deriv(R,x,phi=phi,t=t,use_physical=False)
+self.phi2deriv(R,x,phi=phi,t=t,use_physical=False)/R**2.,
0.,nu.fabs(z))[0])/2./nu.pi | NAME:
surfdens
PURPOSE:
evaluate the surface density :math:`\\Sigma(R,z,\\phi,t) = \\int_{-z}^{+z} dz' \\rho(R,z',\\phi,t)`
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forcepoisson= if True, calculate the surface density through the Poisson equation, even if an explicit expression for the surface density exists
OUTPUT:
Sigma (R,z,phi,t)
HISTORY:
2018-08-19 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
surfdens
PURPOSE:
evaluate the surface density :math:`\\Sigma(R,z,\\phi,t) = \\int_{-z}^{+z} dz' \\rho(R,z',\\phi,t)`
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forcepoisson= if True, calculate the surface density through the Poisson equation, even if an explicit expression for the surface density exists
OUTPUT:
Sigma (R,z,phi,t)
HISTORY:
2018-08-19 - Written - Bovy (UofT)
### Response:
def surfdens(self,R,z,phi=0.,t=0.,forcepoisson=False):
"""
NAME:
surfdens
PURPOSE:
evaluate the surface density :math:`\\Sigma(R,z,\\phi,t) = \\int_{-z}^{+z} dz' \\rho(R,z',\\phi,t)`
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forcepoisson= if True, calculate the surface density through the Poisson equation, even if an explicit expression for the surface density exists
OUTPUT:
Sigma (R,z,phi,t)
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
try:
if forcepoisson: raise AttributeError #Hack!
return self._amp*self._surfdens(R,z,phi=phi,t=t)
except AttributeError:
#Use the Poisson equation to get the surface density
return (-self.zforce(R,nu.fabs(z),phi=phi,t=t,use_physical=False)
+integrate.quad(\
lambda x: -self.Rforce(R,x,phi=phi,t=t,use_physical=False)/R
+self.R2deriv(R,x,phi=phi,t=t,use_physical=False)
+self.phi2deriv(R,x,phi=phi,t=t,use_physical=False)/R**2.,
0.,nu.fabs(z))[0])/2./nu.pi |
def _verify_field_spec(self, spec, path):
"""Verifies a given field specification is valid, recursing into nested schemas if required."""
# Required should be a boolean
if 'required' in spec and not isinstance(spec['required'], bool):
raise SchemaFormatException("{} required declaration should be True or False", path)
# Required should be a boolean
if 'nullable' in spec and not isinstance(spec['nullable'], bool):
raise SchemaFormatException("{} nullable declaration should be True or False", path)
# Must have a type specified
if 'type' not in spec:
raise SchemaFormatException("{} has no type declared.", path)
self._verify_type(spec, path)
# Validations should be either a single function or array of functions
if 'validates' in spec:
self._verify_validates(spec, path)
# Defaults must be of the correct type or a function
if 'default' in spec:
self._verify_default(spec, path)
# Only expected spec keys are supported
if not set(spec.keys()).issubset(set(['type', 'required', 'validates', 'default', 'nullable'])):
raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path) | Verifies a given field specification is valid, recursing into nested schemas if required. | Below is the the instruction that describes the task:
### Input:
Verifies a given field specification is valid, recursing into nested schemas if required.
### Response:
def _verify_field_spec(self, spec, path):
"""Verifies a given field specification is valid, recursing into nested schemas if required."""
# Required should be a boolean
if 'required' in spec and not isinstance(spec['required'], bool):
raise SchemaFormatException("{} required declaration should be True or False", path)
# Required should be a boolean
if 'nullable' in spec and not isinstance(spec['nullable'], bool):
raise SchemaFormatException("{} nullable declaration should be True or False", path)
# Must have a type specified
if 'type' not in spec:
raise SchemaFormatException("{} has no type declared.", path)
self._verify_type(spec, path)
# Validations should be either a single function or array of functions
if 'validates' in spec:
self._verify_validates(spec, path)
# Defaults must be of the correct type or a function
if 'default' in spec:
self._verify_default(spec, path)
# Only expected spec keys are supported
if not set(spec.keys()).issubset(set(['type', 'required', 'validates', 'default', 'nullable'])):
raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path) |
def clean(self):
"""Remove services without host object linked to
Note that this should not happen!
:return: None
"""
to_del = []
for serv in self:
if not serv.host:
to_del.append(serv.uuid)
for service_uuid in to_del:
del self.items[service_uuid] | Remove services without host object linked to
Note that this should not happen!
:return: None | Below is the the instruction that describes the task:
### Input:
Remove services without host object linked to
Note that this should not happen!
:return: None
### Response:
def clean(self):
"""Remove services without host object linked to
Note that this should not happen!
:return: None
"""
to_del = []
for serv in self:
if not serv.host:
to_del.append(serv.uuid)
for service_uuid in to_del:
del self.items[service_uuid] |
def main():
"""
Setup consumer
"""
config = loader.load_config()
if config.version:
show_version()
if config.show_rules:
show_rules()
if not config.configfile and not (hasattr(config, "status") or hasattr(config, "stop")):
show_configfile_warning()
# Check if we have permissions to open the log file.
check_write_permissions(config.logfile)
start_proxy(config) | Setup consumer | Below is the the instruction that describes the task:
### Input:
Setup consumer
### Response:
def main():
"""
Setup consumer
"""
config = loader.load_config()
if config.version:
show_version()
if config.show_rules:
show_rules()
if not config.configfile and not (hasattr(config, "status") or hasattr(config, "stop")):
show_configfile_warning()
# Check if we have permissions to open the log file.
check_write_permissions(config.logfile)
start_proxy(config) |
def simulate_to_target_signal_to_noise(cls, array, pixel_scale, target_signal_to_noise, exposure_time_map,
psf=None, background_sky_map=None, seed=-1):
"""
Create a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
target_signal_to_noise
array : ndarray
The image before simulating (e.g. the lens and source galaxies before optics blurring and CCD read-out).
pixel_scale: float
The scale of each pixel in arc seconds
exposure_time_map : ndarray
An array representing the effective exposure time of each pixel.
psf: PSF
An array describing the PSF the simulated image is blurred with.
background_sky_map : ndarray
The value of background sky in every image pixel (electrons per second).
seed: int
A seed for random noise_maps generation
"""
max_index = np.unravel_index(array.argmax(), array.shape)
max_image = array[max_index]
max_effective_exposure_time = exposure_time_map[max_index]
max_array_counts = np.multiply(max_image, max_effective_exposure_time)
if background_sky_map is not None:
max_background_sky_map = background_sky_map[max_index]
max_background_sky_map_counts = np.multiply(max_background_sky_map, max_effective_exposure_time)
else:
max_background_sky_map_counts = None
scale_factor = 1.
if background_sky_map is None:
scale_factor = target_signal_to_noise ** 2.0 / max_array_counts
elif background_sky_map is not None:
scale_factor = (max_array_counts + max_background_sky_map_counts) * target_signal_to_noise ** 2.0 \
/ max_array_counts ** 2.0
scaled_effective_exposure_time = np.multiply(scale_factor, exposure_time_map)
return cls.simulate_variable_arrays(array=array, pixel_scale=pixel_scale,
exposure_time_map=scaled_effective_exposure_time,
psf=psf, background_sky_map=background_sky_map,
add_noise=True, noise_seed=seed) | Create a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
target_signal_to_noise
array : ndarray
The image before simulating (e.g. the lens and source galaxies before optics blurring and CCD read-out).
pixel_scale: float
The scale of each pixel in arc seconds
exposure_time_map : ndarray
An array representing the effective exposure time of each pixel.
psf: PSF
An array describing the PSF the simulated image is blurred with.
background_sky_map : ndarray
The value of background sky in every image pixel (electrons per second).
seed: int
A seed for random noise_maps generation | Below is the the instruction that describes the task:
### Input:
Create a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
target_signal_to_noise
array : ndarray
The image before simulating (e.g. the lens and source galaxies before optics blurring and CCD read-out).
pixel_scale: float
The scale of each pixel in arc seconds
exposure_time_map : ndarray
An array representing the effective exposure time of each pixel.
psf: PSF
An array describing the PSF the simulated image is blurred with.
background_sky_map : ndarray
The value of background sky in every image pixel (electrons per second).
seed: int
A seed for random noise_maps generation
### Response:
def simulate_to_target_signal_to_noise(cls, array, pixel_scale, target_signal_to_noise, exposure_time_map,
psf=None, background_sky_map=None, seed=-1):
"""
Create a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
target_signal_to_noise
array : ndarray
The image before simulating (e.g. the lens and source galaxies before optics blurring and CCD read-out).
pixel_scale: float
The scale of each pixel in arc seconds
exposure_time_map : ndarray
An array representing the effective exposure time of each pixel.
psf: PSF
An array describing the PSF the simulated image is blurred with.
background_sky_map : ndarray
The value of background sky in every image pixel (electrons per second).
seed: int
A seed for random noise_maps generation
"""
max_index = np.unravel_index(array.argmax(), array.shape)
max_image = array[max_index]
max_effective_exposure_time = exposure_time_map[max_index]
max_array_counts = np.multiply(max_image, max_effective_exposure_time)
if background_sky_map is not None:
max_background_sky_map = background_sky_map[max_index]
max_background_sky_map_counts = np.multiply(max_background_sky_map, max_effective_exposure_time)
else:
max_background_sky_map_counts = None
scale_factor = 1.
if background_sky_map is None:
scale_factor = target_signal_to_noise ** 2.0 / max_array_counts
elif background_sky_map is not None:
scale_factor = (max_array_counts + max_background_sky_map_counts) * target_signal_to_noise ** 2.0 \
/ max_array_counts ** 2.0
scaled_effective_exposure_time = np.multiply(scale_factor, exposure_time_map)
return cls.simulate_variable_arrays(array=array, pixel_scale=pixel_scale,
exposure_time_map=scaled_effective_exposure_time,
psf=psf, background_sky_map=background_sky_map,
add_noise=True, noise_seed=seed) |
def reset(self, mode=InputMode.INSERT):
"""
Reset state, go back to the given mode. INSERT by default.
"""
# Go back to insert mode.
self.input_mode = mode
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None | Reset state, go back to the given mode. INSERT by default. | Below is the the instruction that describes the task:
### Input:
Reset state, go back to the given mode. INSERT by default.
### Response:
def reset(self, mode=InputMode.INSERT):
"""
Reset state, go back to the given mode. INSERT by default.
"""
# Go back to insert mode.
self.input_mode = mode
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None |
def create(self):
"""Create the widget layout with all the information."""
b0 = QGroupBox('Dataset')
form = QFormLayout()
b0.setLayout(form)
open_rec = QPushButton('Open Dataset...')
open_rec.clicked.connect(self.open_dataset)
open_rec.setToolTip('Click here to open a new recording')
self.idx_filename = open_rec
self.idx_s_freq = QLabel('')
self.idx_n_chan = QLabel('')
self.idx_start_time = QLabel('')
self.idx_end_time = QLabel('')
form.addRow('Filename:', self.idx_filename)
form.addRow('Sampl. Freq:', self.idx_s_freq)
form.addRow('N. Channels:', self.idx_n_chan)
form.addRow('Start Time: ', self.idx_start_time)
form.addRow('End Time: ', self.idx_end_time)
b1 = QGroupBox('View')
form = QFormLayout()
b1.setLayout(form)
self.idx_start = QLabel('')
self.idx_start.setToolTip('Start time in seconds from the beginning of'
' the recordings')
self.idx_length = QLabel('')
self.idx_length.setToolTip('Duration of the time window in seconds')
self.idx_scaling = QLabel('')
self.idx_scaling.setToolTip('Global scaling for all the channels')
self.idx_distance = QLabel('')
self.idx_distance.setToolTip('Visual distances between the traces of '
'individual channels')
form.addRow('Start Time:', self.idx_start)
form.addRow('Length:', self.idx_length)
form.addRow('Scaling:', self.idx_scaling)
form.addRow('Distance:', self.idx_distance)
layout = QVBoxLayout()
layout.addWidget(b0)
layout.addWidget(b1)
self.setLayout(layout) | Create the widget layout with all the information. | Below is the the instruction that describes the task:
### Input:
Create the widget layout with all the information.
### Response:
def create(self):
"""Create the widget layout with all the information."""
b0 = QGroupBox('Dataset')
form = QFormLayout()
b0.setLayout(form)
open_rec = QPushButton('Open Dataset...')
open_rec.clicked.connect(self.open_dataset)
open_rec.setToolTip('Click here to open a new recording')
self.idx_filename = open_rec
self.idx_s_freq = QLabel('')
self.idx_n_chan = QLabel('')
self.idx_start_time = QLabel('')
self.idx_end_time = QLabel('')
form.addRow('Filename:', self.idx_filename)
form.addRow('Sampl. Freq:', self.idx_s_freq)
form.addRow('N. Channels:', self.idx_n_chan)
form.addRow('Start Time: ', self.idx_start_time)
form.addRow('End Time: ', self.idx_end_time)
b1 = QGroupBox('View')
form = QFormLayout()
b1.setLayout(form)
self.idx_start = QLabel('')
self.idx_start.setToolTip('Start time in seconds from the beginning of'
' the recordings')
self.idx_length = QLabel('')
self.idx_length.setToolTip('Duration of the time window in seconds')
self.idx_scaling = QLabel('')
self.idx_scaling.setToolTip('Global scaling for all the channels')
self.idx_distance = QLabel('')
self.idx_distance.setToolTip('Visual distances between the traces of '
'individual channels')
form.addRow('Start Time:', self.idx_start)
form.addRow('Length:', self.idx_length)
form.addRow('Scaling:', self.idx_scaling)
form.addRow('Distance:', self.idx_distance)
layout = QVBoxLayout()
layout.addWidget(b0)
layout.addWidget(b1)
self.setLayout(layout) |
def generate_nhs_number_from_first_9_digits(first9digits: str) -> Optional[int]:
"""
Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999
"""
if len(first9digits) != 9:
log.warning("Not 9 digits")
return None
try:
first9int = int(first9digits)
except (TypeError, ValueError):
log.warning("Not an integer")
return None # not an int
if len(str(first9int)) != len(first9digits):
# e.g. leading zeros, or some such
log.warning("Leading zeros?")
return None
check_digit = nhs_check_digit(first9digits)
if check_digit == 10: # NHS numbers with this check digit are all invalid
log.warning("Can't have check digit of 10")
return None
return int(first9digits + str(check_digit)) | Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999 | Below is the the instruction that describes the task:
### Input:
Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999
### Response:
def generate_nhs_number_from_first_9_digits(first9digits: str) -> Optional[int]:
"""
Returns a valid NHS number, as an ``int``, given the first 9 digits.
The particular purpose is to make NHS numbers that *look* fake (rather
than truly random NHS numbers which might accidentally be real).
For example:
.. code-block:: none
123456789_ : no; checksum 10
987654321_ : yes, valid if completed to 9876543210
999999999_ : yes, valid if completed to 9999999999
"""
if len(first9digits) != 9:
log.warning("Not 9 digits")
return None
try:
first9int = int(first9digits)
except (TypeError, ValueError):
log.warning("Not an integer")
return None # not an int
if len(str(first9int)) != len(first9digits):
# e.g. leading zeros, or some such
log.warning("Leading zeros?")
return None
check_digit = nhs_check_digit(first9digits)
if check_digit == 10: # NHS numbers with this check digit are all invalid
log.warning("Can't have check digit of 10")
return None
return int(first9digits + str(check_digit)) |
def ConvertFromWireFormat(self, value, container=None):
"""The wire format is an AnyValue message."""
result = AnyValue()
ReadIntoObject(value[2], 0, result)
if self._type is not None:
converted_value = self._type(container)
else:
converted_value = self._TypeFromAnyValue(result)
# If one of the protobuf library wrapper classes is used, unwrap the value.
if result.type_url.startswith("type.googleapis.com/google.protobuf."):
wrapper_cls = self.__class__.WRAPPER_BY_TYPE[
converted_value.data_store_type]
wrapper_value = wrapper_cls()
wrapper_value.ParseFromString(result.value)
return converted_value.FromDatastoreValue(wrapper_value.value)
else:
# TODO(user): Type stored in type_url is currently ignored when value
# is decoded. We should use it to deserialize the value and then check
# that value type and dynamic type are compatible.
return converted_value.FromSerializedString(result.value) | The wire format is an AnyValue message. | Below is the the instruction that describes the task:
### Input:
The wire format is an AnyValue message.
### Response:
def ConvertFromWireFormat(self, value, container=None):
"""The wire format is an AnyValue message."""
result = AnyValue()
ReadIntoObject(value[2], 0, result)
if self._type is not None:
converted_value = self._type(container)
else:
converted_value = self._TypeFromAnyValue(result)
# If one of the protobuf library wrapper classes is used, unwrap the value.
if result.type_url.startswith("type.googleapis.com/google.protobuf."):
wrapper_cls = self.__class__.WRAPPER_BY_TYPE[
converted_value.data_store_type]
wrapper_value = wrapper_cls()
wrapper_value.ParseFromString(result.value)
return converted_value.FromDatastoreValue(wrapper_value.value)
else:
# TODO(user): Type stored in type_url is currently ignored when value
# is decoded. We should use it to deserialize the value and then check
# that value type and dynamic type are compatible.
return converted_value.FromSerializedString(result.value) |
def _construct_field_operator_expression_dict(expression_list):
"""Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators
"""
between_operators = (u'<=', u'>=')
inverse_operator = {u'>=': u'<=', u'<=': u'>='}
local_field_to_expressions = {}
remaining_expression_list = deque([])
for expression in expression_list:
if all((
isinstance(expression, BinaryComposition),
expression.operator in between_operators,
isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField)
)):
if isinstance(expression.right, LocalField):
new_operator = inverse_operator[expression.operator]
new_expression = BinaryComposition(new_operator, expression.right, expression.left)
else:
new_expression = expression
field_name = new_expression.left.field_name
expressions_dict = local_field_to_expressions.setdefault(field_name, {})
expressions_dict.setdefault(new_expression.operator, []).append(new_expression)
else:
remaining_expression_list.append(expression)
return local_field_to_expressions, remaining_expression_list | Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators | Below is the the instruction that describes the task:
### Input:
Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators
### Response:
def _construct_field_operator_expression_dict(expression_list):
"""Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators
"""
between_operators = (u'<=', u'>=')
inverse_operator = {u'>=': u'<=', u'<=': u'>='}
local_field_to_expressions = {}
remaining_expression_list = deque([])
for expression in expression_list:
if all((
isinstance(expression, BinaryComposition),
expression.operator in between_operators,
isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField)
)):
if isinstance(expression.right, LocalField):
new_operator = inverse_operator[expression.operator]
new_expression = BinaryComposition(new_operator, expression.right, expression.left)
else:
new_expression = expression
field_name = new_expression.left.field_name
expressions_dict = local_field_to_expressions.setdefault(field_name, {})
expressions_dict.setdefault(new_expression.operator, []).append(new_expression)
else:
remaining_expression_list.append(expression)
return local_field_to_expressions, remaining_expression_list |
def WideResnet(num_blocks=3, hidden_size=64, num_output_classes=10,
mode='train'):
"""WideResnet from https://arxiv.org/pdf/1605.07146.pdf.
Args:
num_blocks: int, number of blocks in a group.
hidden_size: the size of the first hidden layer (multiplied later).
num_output_classes: int, number of classes to distinguish.
mode: is it training or eval.
Returns:
The WideResnet model with given layer and output sizes.
"""
del mode
return layers.Serial(
layers.Conv(hidden_size, (3, 3), padding='SAME'),
WideResnetGroup(num_blocks, hidden_size),
WideResnetGroup(num_blocks, hidden_size * 2, (2, 2)),
WideResnetGroup(num_blocks, hidden_size * 4, (2, 2)), layers.BatchNorm(),
layers.Relu(), layers.AvgPool(pool_size=(8, 8)), layers.Flatten(),
layers.Dense(num_output_classes), layers.LogSoftmax()) | WideResnet from https://arxiv.org/pdf/1605.07146.pdf.
Args:
num_blocks: int, number of blocks in a group.
hidden_size: the size of the first hidden layer (multiplied later).
num_output_classes: int, number of classes to distinguish.
mode: is it training or eval.
Returns:
The WideResnet model with given layer and output sizes. | Below is the the instruction that describes the task:
### Input:
WideResnet from https://arxiv.org/pdf/1605.07146.pdf.
Args:
num_blocks: int, number of blocks in a group.
hidden_size: the size of the first hidden layer (multiplied later).
num_output_classes: int, number of classes to distinguish.
mode: is it training or eval.
Returns:
The WideResnet model with given layer and output sizes.
### Response:
def WideResnet(num_blocks=3, hidden_size=64, num_output_classes=10,
mode='train'):
"""WideResnet from https://arxiv.org/pdf/1605.07146.pdf.
Args:
num_blocks: int, number of blocks in a group.
hidden_size: the size of the first hidden layer (multiplied later).
num_output_classes: int, number of classes to distinguish.
mode: is it training or eval.
Returns:
The WideResnet model with given layer and output sizes.
"""
del mode
return layers.Serial(
layers.Conv(hidden_size, (3, 3), padding='SAME'),
WideResnetGroup(num_blocks, hidden_size),
WideResnetGroup(num_blocks, hidden_size * 2, (2, 2)),
WideResnetGroup(num_blocks, hidden_size * 4, (2, 2)), layers.BatchNorm(),
layers.Relu(), layers.AvgPool(pool_size=(8, 8)), layers.Flatten(),
layers.Dense(num_output_classes), layers.LogSoftmax()) |
def create_method(self):
"""
Build the estimator method or function.
Returns
-------
:return : string
The built method as string.
"""
temp_method = self.temp('method', skipping=True, n_indents=1)
return temp_method.format(class_name=self.class_name,
method_name=self.method_name,
n_features=self.n_inputs,
n_classes=self.n_outputs) | Build the estimator method or function.
Returns
-------
:return : string
The built method as string. | Below is the the instruction that describes the task:
### Input:
Build the estimator method or function.
Returns
-------
:return : string
The built method as string.
### Response:
def create_method(self):
"""
Build the estimator method or function.
Returns
-------
:return : string
The built method as string.
"""
temp_method = self.temp('method', skipping=True, n_indents=1)
return temp_method.format(class_name=self.class_name,
method_name=self.method_name,
n_features=self.n_inputs,
n_classes=self.n_outputs) |
def replace_note(self, player, text):
"""Replace note text with text. (Overwrites previous note!)"""
note = self._find_note(player)
note.text = text | Replace note text with text. (Overwrites previous note!) | Below is the the instruction that describes the task:
### Input:
Replace note text with text. (Overwrites previous note!)
### Response:
def replace_note(self, player, text):
"""Replace note text with text. (Overwrites previous note!)"""
note = self._find_note(player)
note.text = text |
def get_ns2goids(self, goids):
"""Group GO IDs by namespace."""
ns2goids = cx.defaultdict(set)
go2nt = self.go2nt
for goid in goids:
ns2goids[go2nt[goid].NS].add(goid)
return {ns:gos for ns, gos in ns2goids.items()} | Group GO IDs by namespace. | Below is the the instruction that describes the task:
### Input:
Group GO IDs by namespace.
### Response:
def get_ns2goids(self, goids):
"""Group GO IDs by namespace."""
ns2goids = cx.defaultdict(set)
go2nt = self.go2nt
for goid in goids:
ns2goids[go2nt[goid].NS].add(goid)
return {ns:gos for ns, gos in ns2goids.items()} |
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_query()`` and ``supports_visible_federation()`` are ``true``*
"""
if not objective_bank_id:
raise NullArgument
if not self.supports_proficiency_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ProficiencyQuerySession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the ``OsidSession`` associated with the proficiency query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_query()`` and ``supports_visible_federation()`` are ``true``* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the proficiency query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_query()`` and ``supports_visible_federation()`` are ``true``*
### Response:
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the proficiency query service for the given objective bank.
:param objective_bank_id: the ``Id`` of the obective bank
:type objective_bank_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyQuerySession``
:rtype: ``osid.learning.ProficiencyQuerySession``
:raise: ``NotFound`` -- no ``ObjectiveBank`` found by the given ``Id``
:raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_query()`` and ``supports_visible_federation()`` are ``true``*
"""
if not objective_bank_id:
raise NullArgument
if not self.supports_proficiency_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ProficiencyQuerySession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session |
def increment(self):
"increment the counter, and wake anyone waiting for the new value"
self._count += 1
waiters = self._waiters.pop(self._count, [])
if waiters:
scheduler.state.awoken_from_events.update(waiters) | increment the counter, and wake anyone waiting for the new value | Below is the the instruction that describes the task:
### Input:
increment the counter, and wake anyone waiting for the new value
### Response:
def increment(self):
"increment the counter, and wake anyone waiting for the new value"
self._count += 1
waiters = self._waiters.pop(self._count, [])
if waiters:
scheduler.state.awoken_from_events.update(waiters) |
def keys(self, full_grid=False):
"""Returns the keys of the GridSpace
Args:
full_grid (bool, optional): Return full cross-product of keys
Returns:
List of keys
"""
keys = super(GridSpace, self).keys()
if self.ndims == 1 or not full_grid:
return keys
dim1_keys = sorted(set(k[0] for k in keys))
dim2_keys = sorted(set(k[1] for k in keys))
return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys] | Returns the keys of the GridSpace
Args:
full_grid (bool, optional): Return full cross-product of keys
Returns:
List of keys | Below is the the instruction that describes the task:
### Input:
Returns the keys of the GridSpace
Args:
full_grid (bool, optional): Return full cross-product of keys
Returns:
List of keys
### Response:
def keys(self, full_grid=False):
"""Returns the keys of the GridSpace
Args:
full_grid (bool, optional): Return full cross-product of keys
Returns:
List of keys
"""
keys = super(GridSpace, self).keys()
if self.ndims == 1 or not full_grid:
return keys
dim1_keys = sorted(set(k[0] for k in keys))
dim2_keys = sorted(set(k[1] for k in keys))
return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys] |
def add(self, obj):
"""
Add an instance of :class:`SubComponent <hl7apy.core.SubComponent>` to the list of children
:param obj: an instance of :class:`SubComponent <hl7apy.core.SubComponent>`
>>> c = Component('CX_10')
>>> s = SubComponent(name='CWE_1', value='EXAMPLE_ID')
>>> s2 = SubComponent(name='CWE_4', value='ALT_ID')
>>> c.add(s)
>>> c.add(s2)
>>> print(c.to_er7())
EXAMPLE_ID&&&ALT_ID
"""
# base datatype components can't have more than one child
if self.name and is_base_datatype(self.datatype, self.version) and \
len(self.children) >= 1:
raise MaxChildLimitReached(self, obj, 1)
# the name is different from the datatype (i.e. the name has been forced to be equal to the datatype)
try:
if obj.name and obj.name != obj.datatype:
try:
if not _valid_child_name(obj.name, self.datatype):
raise ChildNotValid(obj.name, self)
except AttributeError:
pass
except ChildNotFound: # obj.datatype causes ChildNotFound for some Elements (Message, Groups etc)
raise ChildNotValid(obj, self)
return super(Component, self).add(obj) | Add an instance of :class:`SubComponent <hl7apy.core.SubComponent>` to the list of children
:param obj: an instance of :class:`SubComponent <hl7apy.core.SubComponent>`
>>> c = Component('CX_10')
>>> s = SubComponent(name='CWE_1', value='EXAMPLE_ID')
>>> s2 = SubComponent(name='CWE_4', value='ALT_ID')
>>> c.add(s)
>>> c.add(s2)
>>> print(c.to_er7())
EXAMPLE_ID&&&ALT_ID | Below is the the instruction that describes the task:
### Input:
Add an instance of :class:`SubComponent <hl7apy.core.SubComponent>` to the list of children
:param obj: an instance of :class:`SubComponent <hl7apy.core.SubComponent>`
>>> c = Component('CX_10')
>>> s = SubComponent(name='CWE_1', value='EXAMPLE_ID')
>>> s2 = SubComponent(name='CWE_4', value='ALT_ID')
>>> c.add(s)
>>> c.add(s2)
>>> print(c.to_er7())
EXAMPLE_ID&&&ALT_ID
### Response:
def add(self, obj):
"""
Add an instance of :class:`SubComponent <hl7apy.core.SubComponent>` to the list of children
:param obj: an instance of :class:`SubComponent <hl7apy.core.SubComponent>`
>>> c = Component('CX_10')
>>> s = SubComponent(name='CWE_1', value='EXAMPLE_ID')
>>> s2 = SubComponent(name='CWE_4', value='ALT_ID')
>>> c.add(s)
>>> c.add(s2)
>>> print(c.to_er7())
EXAMPLE_ID&&&ALT_ID
"""
# base datatype components can't have more than one child
if self.name and is_base_datatype(self.datatype, self.version) and \
len(self.children) >= 1:
raise MaxChildLimitReached(self, obj, 1)
# the name is different from the datatype (i.e. the name has been forced to be equal to the datatype)
try:
if obj.name and obj.name != obj.datatype:
try:
if not _valid_child_name(obj.name, self.datatype):
raise ChildNotValid(obj.name, self)
except AttributeError:
pass
except ChildNotFound: # obj.datatype causes ChildNotFound for some Elements (Message, Groups etc)
raise ChildNotValid(obj, self)
return super(Component, self).add(obj) |
def accept_connection(self, name=None, alias=None, timeout=0):
"""Accepts a connection to server identified by `name` or the latest
server if `name` is empty.
If given an `alias`, the connection is named and can be later referenced
with that name.
If `timeout` is > 0, the connection times out after the time specified.
`timeout` defaults to 0 which will wait indefinitely.
Empty value or None will use socket default timeout.
Examples:
| Accept connection |
| Accept connection | Server1 | my_connection |
| Accept connection | Server1 | my_connection | timeout=5 |
"""
server = self._servers.get(name)
server.accept_connection(alias, timeout) | Accepts a connection to server identified by `name` or the latest
server if `name` is empty.
If given an `alias`, the connection is named and can be later referenced
with that name.
If `timeout` is > 0, the connection times out after the time specified.
`timeout` defaults to 0 which will wait indefinitely.
Empty value or None will use socket default timeout.
Examples:
| Accept connection |
| Accept connection | Server1 | my_connection |
| Accept connection | Server1 | my_connection | timeout=5 | | Below is the the instruction that describes the task:
### Input:
Accepts a connection to server identified by `name` or the latest
server if `name` is empty.
If given an `alias`, the connection is named and can be later referenced
with that name.
If `timeout` is > 0, the connection times out after the time specified.
`timeout` defaults to 0 which will wait indefinitely.
Empty value or None will use socket default timeout.
Examples:
| Accept connection |
| Accept connection | Server1 | my_connection |
| Accept connection | Server1 | my_connection | timeout=5 |
### Response:
def accept_connection(self, name=None, alias=None, timeout=0):
"""Accepts a connection to server identified by `name` or the latest
server if `name` is empty.
If given an `alias`, the connection is named and can be later referenced
with that name.
If `timeout` is > 0, the connection times out after the time specified.
`timeout` defaults to 0 which will wait indefinitely.
Empty value or None will use socket default timeout.
Examples:
| Accept connection |
| Accept connection | Server1 | my_connection |
| Accept connection | Server1 | my_connection | timeout=5 |
"""
server = self._servers.get(name)
server.accept_connection(alias, timeout) |
def get_range(self, address=None):
"""
Returns a Range instance from whitin this worksheet
:param str address: Optional, the range address you want
:return: a Range instance
"""
url = self.build_url(self._endpoints.get('get_range'))
if address is not None:
url = "{}(address='{}')".format(url, address)
response = self.session.get(url)
if not response:
return None
return self.range_constructor(parent=self, **{self._cloud_data_key: response.json()}) | Returns a Range instance from whitin this worksheet
:param str address: Optional, the range address you want
:return: a Range instance | Below is the the instruction that describes the task:
### Input:
Returns a Range instance from whitin this worksheet
:param str address: Optional, the range address you want
:return: a Range instance
### Response:
def get_range(self, address=None):
"""
Returns a Range instance from whitin this worksheet
:param str address: Optional, the range address you want
:return: a Range instance
"""
url = self.build_url(self._endpoints.get('get_range'))
if address is not None:
url = "{}(address='{}')".format(url, address)
response = self.session.get(url)
if not response:
return None
return self.range_constructor(parent=self, **{self._cloud_data_key: response.json()}) |
def _calcEnergyStretchTwist(self, diff, es, which):
r"""Calculate energy for ``estype='ST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
See ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
"""
if which not in self.enGlobalTypes[:5]:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enGlobalTypes[:5]))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1]))
if which == 'stretch':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'twist':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'st_coupling':
energy = 0.5 * (diff[0] * diff[1] * es[0][1])
return energy | r"""Calculate energy for ``estype='ST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
See ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value | Below is the the instruction that describes the task:
### Input:
r"""Calculate energy for ``estype='ST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
See ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
### Response:
def _calcEnergyStretchTwist(self, diff, es, which):
r"""Calculate energy for ``estype='ST'`` using a difference vector.
It is called in :meth:`dnaEY.getGlobalDeformationEnergy` for energy calculation of each frame.
Parameters
----------
diff : numpy.ndarray
Array of difference between minimum and current parameter values.
.. math::
\mathbf{x} = \begin{bmatrix}
(L_i - L_0) & (\phi_i - \phi_0)
\end{bmatrix}
es : numpy.ndarray
Elastic matrix. See in :meth:`dnaEY.getStretchTwistModulus` about elastic matrix.
which : str
For which type of motions, energy will be calculated.
See ``which`` parameter in :meth:`dnaEY.getGlobalDeformationEnergy` for keywords.
Return
------
energy : float
Deformation free energy value
"""
if which not in self.enGlobalTypes[:5]:
raise ValueError('{0} is not a supported energy keywords.\n Use any of the following: \n {1}'.format(
which, self.enGlobalTypes[:5]))
energy = None
if which == 'full':
temp = np.matrix(diff)
energy = 0.5 * ((temp * es) * temp.T)
energy = energy[0,0]
if which == 'diag':
energy = 0.5 * ((diff[0] ** 2 * es[0][0])
+ (diff[1] ** 2 * es[1][1]))
if which == 'stretch':
energy = 0.5 * (diff[0] ** 2 * es[0][0])
if which == 'twist':
energy = 0.5 * (diff[1] ** 2 * es[1][1])
if which == 'st_coupling':
energy = 0.5 * (diff[0] * diff[1] * es[0][1])
return energy |
def sort(self, column, order=Qt.AscendingOrder):
"""Reimplement Qt method"""
self.sourceModel().sort(column, order) | Reimplement Qt method | Below is the the instruction that describes the task:
### Input:
Reimplement Qt method
### Response:
def sort(self, column, order=Qt.AscendingOrder):
"""Reimplement Qt method"""
self.sourceModel().sort(column, order) |
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15. / (8. * self.compliance_tensor.voigt[:3, :3].trace() -
4. * np.triu(self.compliance_tensor.voigt[:3, :3]).sum() +
3. * self.compliance_tensor.voigt[3:, 3:].trace()) | returns the G_r shear modulus | Below is the the instruction that describes the task:
### Input:
returns the G_r shear modulus
### Response:
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15. / (8. * self.compliance_tensor.voigt[:3, :3].trace() -
4. * np.triu(self.compliance_tensor.voigt[:3, :3]).sum() +
3. * self.compliance_tensor.voigt[3:, 3:].trace()) |
def tsqr(a):
"""Perform a QR decomposition of a tall-skinny matrix.
Args:
a: A distributed matrix with shape MxN (suppose K = min(M, N)).
Returns:
A tuple of q (a DistArray) and r (a numpy array) satisfying the
following.
- If q_full = ray.get(DistArray, q).assemble(), then
q_full.shape == (M, K).
- np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.
- If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).
- np.allclose(r, np.triu(r)) == True.
"""
if len(a.shape) != 2:
raise Exception("tsqr requires len(a.shape) == 2, but a.shape is "
"{}".format(a.shape))
if a.num_blocks[1] != 1:
raise Exception("tsqr requires a.num_blocks[1] == 1, but a.num_blocks "
"is {}".format(a.num_blocks))
num_blocks = a.num_blocks[0]
K = int(np.ceil(np.log2(num_blocks))) + 1
q_tree = np.empty((num_blocks, K), dtype=object)
current_rs = []
for i in range(num_blocks):
block = a.objectids[i, 0]
q, r = ra.linalg.qr.remote(block)
q_tree[i, 0] = q
current_rs.append(r)
for j in range(1, K):
new_rs = []
for i in range(int(np.ceil(1.0 * len(current_rs) / 2))):
stacked_rs = ra.vstack.remote(*current_rs[(2 * i):(2 * i + 2)])
q, r = ra.linalg.qr.remote(stacked_rs)
q_tree[i, j] = q
new_rs.append(r)
current_rs = new_rs
assert len(current_rs) == 1, "len(current_rs) = " + str(len(current_rs))
# handle the special case in which the whole DistArray "a" fits in one
# block and has fewer rows than columns, this is a bit ugly so think about
# how to remove it
if a.shape[0] >= a.shape[1]:
q_shape = a.shape
else:
q_shape = [a.shape[0], a.shape[0]]
q_num_blocks = core.DistArray.compute_num_blocks(q_shape)
q_objectids = np.empty(q_num_blocks, dtype=object)
q_result = core.DistArray(q_shape, q_objectids)
# reconstruct output
for i in range(num_blocks):
q_block_current = q_tree[i, 0]
ith_index = i
for j in range(1, K):
if np.mod(ith_index, 2) == 0:
lower = [0, 0]
upper = [a.shape[1], core.BLOCK_SIZE]
else:
lower = [a.shape[1], 0]
upper = [2 * a.shape[1], core.BLOCK_SIZE]
ith_index //= 2
q_block_current = ra.dot.remote(
q_block_current,
ra.subarray.remote(q_tree[ith_index, j], lower, upper))
q_result.objectids[i] = q_block_current
r = current_rs[0]
return q_result, ray.get(r) | Perform a QR decomposition of a tall-skinny matrix.
Args:
a: A distributed matrix with shape MxN (suppose K = min(M, N)).
Returns:
A tuple of q (a DistArray) and r (a numpy array) satisfying the
following.
- If q_full = ray.get(DistArray, q).assemble(), then
q_full.shape == (M, K).
- np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.
- If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).
- np.allclose(r, np.triu(r)) == True. | Below is the the instruction that describes the task:
### Input:
Perform a QR decomposition of a tall-skinny matrix.
Args:
a: A distributed matrix with shape MxN (suppose K = min(M, N)).
Returns:
A tuple of q (a DistArray) and r (a numpy array) satisfying the
following.
- If q_full = ray.get(DistArray, q).assemble(), then
q_full.shape == (M, K).
- np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.
- If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).
- np.allclose(r, np.triu(r)) == True.
### Response:
def tsqr(a):
"""Perform a QR decomposition of a tall-skinny matrix.
Args:
a: A distributed matrix with shape MxN (suppose K = min(M, N)).
Returns:
A tuple of q (a DistArray) and r (a numpy array) satisfying the
following.
- If q_full = ray.get(DistArray, q).assemble(), then
q_full.shape == (M, K).
- np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True.
- If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N).
- np.allclose(r, np.triu(r)) == True.
"""
if len(a.shape) != 2:
raise Exception("tsqr requires len(a.shape) == 2, but a.shape is "
"{}".format(a.shape))
if a.num_blocks[1] != 1:
raise Exception("tsqr requires a.num_blocks[1] == 1, but a.num_blocks "
"is {}".format(a.num_blocks))
num_blocks = a.num_blocks[0]
K = int(np.ceil(np.log2(num_blocks))) + 1
q_tree = np.empty((num_blocks, K), dtype=object)
current_rs = []
for i in range(num_blocks):
block = a.objectids[i, 0]
q, r = ra.linalg.qr.remote(block)
q_tree[i, 0] = q
current_rs.append(r)
for j in range(1, K):
new_rs = []
for i in range(int(np.ceil(1.0 * len(current_rs) / 2))):
stacked_rs = ra.vstack.remote(*current_rs[(2 * i):(2 * i + 2)])
q, r = ra.linalg.qr.remote(stacked_rs)
q_tree[i, j] = q
new_rs.append(r)
current_rs = new_rs
assert len(current_rs) == 1, "len(current_rs) = " + str(len(current_rs))
# handle the special case in which the whole DistArray "a" fits in one
# block and has fewer rows than columns, this is a bit ugly so think about
# how to remove it
if a.shape[0] >= a.shape[1]:
q_shape = a.shape
else:
q_shape = [a.shape[0], a.shape[0]]
q_num_blocks = core.DistArray.compute_num_blocks(q_shape)
q_objectids = np.empty(q_num_blocks, dtype=object)
q_result = core.DistArray(q_shape, q_objectids)
# reconstruct output
for i in range(num_blocks):
q_block_current = q_tree[i, 0]
ith_index = i
for j in range(1, K):
if np.mod(ith_index, 2) == 0:
lower = [0, 0]
upper = [a.shape[1], core.BLOCK_SIZE]
else:
lower = [a.shape[1], 0]
upper = [2 * a.shape[1], core.BLOCK_SIZE]
ith_index //= 2
q_block_current = ra.dot.remote(
q_block_current,
ra.subarray.remote(q_tree[ith_index, j], lower, upper))
q_result.objectids[i] = q_block_current
r = current_rs[0]
return q_result, ray.get(r) |
def _build_dummy_calls(self):
"""
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
"""
# Make sure nothing gets removed by inserting dummy calls
dummy_calls = []
for d in self.kernel_ast.block_items:
# Only consider toplevel declarations from kernel ast
if type(d) is not c_ast.Decl: continue
if type(d.type) is c_ast.ArrayDecl:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.ID(d.name)])))
else:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.UnaryOp('&', c_ast.ID(d.name))])))
dummy_stmt = c_ast.If(
cond=c_ast.ID('var_false'),
iftrue=c_ast.Compound(dummy_calls),
iffalse=None)
return dummy_stmt | Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement | Below is the the instruction that describes the task:
### Input:
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
### Response:
def _build_dummy_calls(self):
"""
Generate false if branch with dummy calls
Requires kerncraft.h to be included, which defines dummy(...) and var_false.
:return: dummy statement
"""
# Make sure nothing gets removed by inserting dummy calls
dummy_calls = []
for d in self.kernel_ast.block_items:
# Only consider toplevel declarations from kernel ast
if type(d) is not c_ast.Decl: continue
if type(d.type) is c_ast.ArrayDecl:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.ID(d.name)])))
else:
dummy_calls.append(c_ast.FuncCall(
c_ast.ID('dummy'),
c_ast.ExprList([c_ast.UnaryOp('&', c_ast.ID(d.name))])))
dummy_stmt = c_ast.If(
cond=c_ast.ID('var_false'),
iftrue=c_ast.Compound(dummy_calls),
iffalse=None)
return dummy_stmt |
def create_function(self, func, name=None, database=None):
"""
Creates a function within Impala
Parameters
----------
func : ImpalaUDF or ImpalaUDA
Created with wrap_udf or wrap_uda
name : string (optional)
database : string (optional)
"""
if name is None:
name = func.name
database = database or self.current_database
if isinstance(func, udf.ImpalaUDF):
stmt = ddl.CreateUDF(func, name=name, database=database)
elif isinstance(func, udf.ImpalaUDA):
stmt = ddl.CreateUDA(func, name=name, database=database)
else:
raise TypeError(func)
self._execute(stmt) | Creates a function within Impala
Parameters
----------
func : ImpalaUDF or ImpalaUDA
Created with wrap_udf or wrap_uda
name : string (optional)
database : string (optional) | Below is the the instruction that describes the task:
### Input:
Creates a function within Impala
Parameters
----------
func : ImpalaUDF or ImpalaUDA
Created with wrap_udf or wrap_uda
name : string (optional)
database : string (optional)
### Response:
def create_function(self, func, name=None, database=None):
"""
Creates a function within Impala
Parameters
----------
func : ImpalaUDF or ImpalaUDA
Created with wrap_udf or wrap_uda
name : string (optional)
database : string (optional)
"""
if name is None:
name = func.name
database = database or self.current_database
if isinstance(func, udf.ImpalaUDF):
stmt = ddl.CreateUDF(func, name=name, database=database)
elif isinstance(func, udf.ImpalaUDA):
stmt = ddl.CreateUDA(func, name=name, database=database)
else:
raise TypeError(func)
self._execute(stmt) |
def de_duplicate(items):
"""Remove any duplicate item, preserving order
>>> de_duplicate([1, 2, 1, 2])
[1, 2]
"""
result = []
for item in items:
if item not in result:
result.append(item)
return result | Remove any duplicate item, preserving order
>>> de_duplicate([1, 2, 1, 2])
[1, 2] | Below is the the instruction that describes the task:
### Input:
Remove any duplicate item, preserving order
>>> de_duplicate([1, 2, 1, 2])
[1, 2]
### Response:
def de_duplicate(items):
"""Remove any duplicate item, preserving order
>>> de_duplicate([1, 2, 1, 2])
[1, 2]
"""
result = []
for item in items:
if item not in result:
result.append(item)
return result |
def risearch(self):
"Instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.api.base_url, self.api.username, self.api.password)
return self._risearch | Instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials | Below is the the instruction that describes the task:
### Input:
Instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials
### Response:
def risearch(self):
"Instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.api.base_url, self.api.username, self.api.password)
return self._risearch |
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
from th_joplin.models import Joplin
status = False
data['output_format'] = 'markdown_github'
title, content = super(ServiceJoplin, self).save_data(trigger_id, **data)
# get the data of this trigger
trigger = Joplin.objects.get(trigger_id=trigger_id)
status = self.joplin.create_note(title=title, body=content, parent_id=trigger.folder).status_code
if status == 200:
status = True
return status | let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
### Response:
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
from th_joplin.models import Joplin
status = False
data['output_format'] = 'markdown_github'
title, content = super(ServiceJoplin, self).save_data(trigger_id, **data)
# get the data of this trigger
trigger = Joplin.objects.get(trigger_id=trigger_id)
status = self.joplin.create_note(title=title, body=content, parent_id=trigger.folder).status_code
if status == 200:
status = True
return status |
def write(self):
"""Write the configuration."""
if self.dirty:
with open(self._cache_path, "w") as output_file:
self._config.write(output_file) | Write the configuration. | Below is the the instruction that describes the task:
### Input:
Write the configuration.
### Response:
def write(self):
"""Write the configuration."""
if self.dirty:
with open(self._cache_path, "w") as output_file:
self._config.write(output_file) |
def _set_pos(self, pos):
"""
Set current position for scroll bar.
"""
if self._h < len(self._options):
pos *= len(self._options) - self._h
pos = int(round(max(0, pos), 0))
self._start_line = pos | Set current position for scroll bar. | Below is the the instruction that describes the task:
### Input:
Set current position for scroll bar.
### Response:
def _set_pos(self, pos):
"""
Set current position for scroll bar.
"""
if self._h < len(self._options):
pos *= len(self._options) - self._h
pos = int(round(max(0, pos), 0))
self._start_line = pos |
def reset(self):
"""Resets references."""
self.indchar = None
self.comments = {}
self.refs = []
self.set_skips([])
self.docstring = ""
self.ichain_count = 0
self.tre_store_count = 0
self.case_check_count = 0
self.stmt_lambdas = []
if self.strict:
self.unused_imports = set()
self.bind() | Resets references. | Below is the the instruction that describes the task:
### Input:
Resets references.
### Response:
def reset(self):
"""Resets references."""
self.indchar = None
self.comments = {}
self.refs = []
self.set_skips([])
self.docstring = ""
self.ichain_count = 0
self.tre_store_count = 0
self.case_check_count = 0
self.stmt_lambdas = []
if self.strict:
self.unused_imports = set()
self.bind() |
def run_container(image,
name=None,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
bg=False,
replace=False,
force=False,
networks=None,
**kwargs):
'''
.. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
'''
if kwargs.pop('inspect', True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
removed_ids = None
if name is not None:
try:
pre_state = __salt__['docker.state'](name)
except CommandExecutionError:
pass
else:
if pre_state == 'running' and not (replace and force):
raise CommandExecutionError(
'Container \'{0}\' exists and is running. Run with '
'replace=True and force=True to force removal of the '
'existing container.'.format(name)
)
elif not replace:
raise CommandExecutionError(
'Container \'{0}\' exists. Run with replace=True to '
'remove the existing container'.format(name)
)
else:
# We don't have to try/except this, we want it to raise a
# CommandExecutionError if we fail to remove the existing
# container so that we gracefully abort before attempting to go
# any further.
removed_ids = rm_(name, force=force)
log_kwargs = {}
for argname in get_client_args('logs')['logs']:
try:
log_kwargs[argname] = kwargs.pop(argname)
except KeyError:
pass
# Ignore the stream argument if passed
log_kwargs.pop('stream', None)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**kwargs)
# _get_create_kwargs() will have processed auto_remove and put it into the
# host_config, so check the host_config to see whether or not auto_remove
# was enabled.
auto_remove = kwargs.get('host_config', {}).get('AutoRemove', False)
if unused_kwargs:
log.warning(
'The following arguments were ignored because they are not '
'recognized by docker-py: %s', sorted(unused_kwargs)
)
if networks:
if isinstance(networks, six.string_types):
networks = {x: {} for x in networks.split(',')}
if not isinstance(networks, dict) \
or not all(isinstance(x, dict)
for x in six.itervalues(networks)):
raise SaltInvocationError('Invalid format for networks argument')
log.debug(
'docker.create: creating container %susing the following '
'arguments: %s',
'with name \'{0}\' '.format(name) if name is not None else '',
kwargs
)
time_started = time.time()
# Create the container
ret = _client_wrapper('create_container', image, name=name, **kwargs)
if removed_ids:
ret['Replaces'] = removed_ids
if name is None:
name = inspect_container(ret['Id'])['Name'].lstrip('/')
ret['Name'] = name
def _append_warning(ret, msg):
warnings = ret.pop('Warnings', None)
if warnings is None:
warnings = [msg]
elif isinstance(ret, list):
warnings.append(msg)
else:
warnings = [warnings, msg]
ret['Warnings'] = warnings
exc_info = {'return': ret}
try:
if networks:
try:
for net_name, net_conf in six.iteritems(networks):
__salt__['docker.connect_container_to_network'](
ret['Id'],
net_name,
**net_conf)
except CommandExecutionError as exc:
# Make an effort to remove the container if auto_remove was
# enabled
if auto_remove:
try:
rm_(name)
except CommandExecutionError as rm_exc:
exc_info.setdefault('other_errors', []).append(
'Failed to auto_remove container: {0}'.format(
rm_exc)
)
# Raise original exception with additonal info
raise CommandExecutionError(exc.__str__(), info=exc_info)
# Start the container
output = []
start_(ret['Id'])
if not bg:
# Can't use logs() here because we've disabled "stream" in that
# function. Also, note that if you want to troubleshoot this for loop
# in a debugger like pdb or pudb, you'll want to use auto_remove=False
# when running the function, since the container will likely exit
# before you finish stepping through with a debugger. If the container
# exits during iteration, the next iteration of the generator will
# raise an exception since the container will no longer exist.
try:
for line in _client_wrapper('logs',
ret['Id'],
stream=True,
timestamps=False):
output.append(salt.utils.stringutils.to_unicode(line))
except CommandExecutionError:
msg = (
'Failed to get logs from container. This may be because '
'the container exited before Salt was able to attach to '
'it to retrieve the logs. Consider setting auto_remove '
'to False.'
)
_append_warning(ret, msg)
# Container has exited, note the elapsed time
ret['Time_Elapsed'] = time.time() - time_started
_clear_context()
if not bg:
ret['Logs'] = ''.join(output)
if not auto_remove:
try:
cinfo = inspect_container(ret['Id'])
except CommandExecutionError:
_append_warning(
ret, 'Failed to inspect container after running')
else:
cstate = cinfo.get('State', {})
cstatus = cstate.get('Status')
if cstatus != 'exited':
_append_warning(
ret, 'Container state is not \'exited\'')
ret['ExitCode'] = cstate.get('ExitCode')
except CommandExecutionError as exc:
try:
exc_info.update(exc.info)
except (TypeError, ValueError):
# In the event exc.info wasn't a dict (extremely unlikely), append
# it to other_errors as a fallback.
exc_info.setdefault('other_errors', []).append(exc.info)
# Re-raise with all of the available additional info
raise CommandExecutionError(exc.__str__(), info=exc_info)
return ret | .. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}' | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
### Response:
def run_container(image,
name=None,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
bg=False,
replace=False,
force=False,
networks=None,
**kwargs):
'''
.. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
'''
if kwargs.pop('inspect', True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
removed_ids = None
if name is not None:
try:
pre_state = __salt__['docker.state'](name)
except CommandExecutionError:
pass
else:
if pre_state == 'running' and not (replace and force):
raise CommandExecutionError(
'Container \'{0}\' exists and is running. Run with '
'replace=True and force=True to force removal of the '
'existing container.'.format(name)
)
elif not replace:
raise CommandExecutionError(
'Container \'{0}\' exists. Run with replace=True to '
'remove the existing container'.format(name)
)
else:
# We don't have to try/except this, we want it to raise a
# CommandExecutionError if we fail to remove the existing
# container so that we gracefully abort before attempting to go
# any further.
removed_ids = rm_(name, force=force)
log_kwargs = {}
for argname in get_client_args('logs')['logs']:
try:
log_kwargs[argname] = kwargs.pop(argname)
except KeyError:
pass
# Ignore the stream argument if passed
log_kwargs.pop('stream', None)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**kwargs)
# _get_create_kwargs() will have processed auto_remove and put it into the
# host_config, so check the host_config to see whether or not auto_remove
# was enabled.
auto_remove = kwargs.get('host_config', {}).get('AutoRemove', False)
if unused_kwargs:
log.warning(
'The following arguments were ignored because they are not '
'recognized by docker-py: %s', sorted(unused_kwargs)
)
if networks:
if isinstance(networks, six.string_types):
networks = {x: {} for x in networks.split(',')}
if not isinstance(networks, dict) \
or not all(isinstance(x, dict)
for x in six.itervalues(networks)):
raise SaltInvocationError('Invalid format for networks argument')
log.debug(
'docker.create: creating container %susing the following '
'arguments: %s',
'with name \'{0}\' '.format(name) if name is not None else '',
kwargs
)
time_started = time.time()
# Create the container
ret = _client_wrapper('create_container', image, name=name, **kwargs)
if removed_ids:
ret['Replaces'] = removed_ids
if name is None:
name = inspect_container(ret['Id'])['Name'].lstrip('/')
ret['Name'] = name
def _append_warning(ret, msg):
warnings = ret.pop('Warnings', None)
if warnings is None:
warnings = [msg]
elif isinstance(ret, list):
warnings.append(msg)
else:
warnings = [warnings, msg]
ret['Warnings'] = warnings
exc_info = {'return': ret}
try:
if networks:
try:
for net_name, net_conf in six.iteritems(networks):
__salt__['docker.connect_container_to_network'](
ret['Id'],
net_name,
**net_conf)
except CommandExecutionError as exc:
# Make an effort to remove the container if auto_remove was
# enabled
if auto_remove:
try:
rm_(name)
except CommandExecutionError as rm_exc:
exc_info.setdefault('other_errors', []).append(
'Failed to auto_remove container: {0}'.format(
rm_exc)
)
# Raise original exception with additonal info
raise CommandExecutionError(exc.__str__(), info=exc_info)
# Start the container
output = []
start_(ret['Id'])
if not bg:
# Can't use logs() here because we've disabled "stream" in that
# function. Also, note that if you want to troubleshoot this for loop
# in a debugger like pdb or pudb, you'll want to use auto_remove=False
# when running the function, since the container will likely exit
# before you finish stepping through with a debugger. If the container
# exits during iteration, the next iteration of the generator will
# raise an exception since the container will no longer exist.
try:
for line in _client_wrapper('logs',
ret['Id'],
stream=True,
timestamps=False):
output.append(salt.utils.stringutils.to_unicode(line))
except CommandExecutionError:
msg = (
'Failed to get logs from container. This may be because '
'the container exited before Salt was able to attach to '
'it to retrieve the logs. Consider setting auto_remove '
'to False.'
)
_append_warning(ret, msg)
# Container has exited, note the elapsed time
ret['Time_Elapsed'] = time.time() - time_started
_clear_context()
if not bg:
ret['Logs'] = ''.join(output)
if not auto_remove:
try:
cinfo = inspect_container(ret['Id'])
except CommandExecutionError:
_append_warning(
ret, 'Failed to inspect container after running')
else:
cstate = cinfo.get('State', {})
cstatus = cstate.get('Status')
if cstatus != 'exited':
_append_warning(
ret, 'Container state is not \'exited\'')
ret['ExitCode'] = cstate.get('ExitCode')
except CommandExecutionError as exc:
try:
exc_info.update(exc.info)
except (TypeError, ValueError):
# In the event exc.info wasn't a dict (extremely unlikely), append
# it to other_errors as a fallback.
exc_info.setdefault('other_errors', []).append(exc.info)
# Re-raise with all of the available additional info
raise CommandExecutionError(exc.__str__(), info=exc_info)
return ret |
def AddToLayout(self, layout):
"""
Arguments:
layout -- a QFormLayout instance
"""
for param in self.params:
widget = param.RenderWidget()
layout.addRow(param.caption, widget) | Arguments:
layout -- a QFormLayout instance | Below is the the instruction that describes the task:
### Input:
Arguments:
layout -- a QFormLayout instance
### Response:
def AddToLayout(self, layout):
"""
Arguments:
layout -- a QFormLayout instance
"""
for param in self.params:
widget = param.RenderWidget()
layout.addRow(param.caption, widget) |
def add_option(self, name, *args, **kwargs):
"""add an option to the namespace. This can take two forms:
'name' is a string representing the name of an option and the
kwargs are its parameters, or 'name' is an instance of an Option
object
"""
if isinstance(name, Option):
an_option = name
name = an_option.name
else:
an_option = Option(name, *args, **kwargs)
current_namespace = self
name_parts = name.split('.')
for a_path_component in name_parts[:-1]:
if a_path_component not in current_namespace:
current_namespace[a_path_component] = Namespace()
current_namespace = current_namespace[a_path_component]
an_option.name = name_parts[-1]
setattr(current_namespace, an_option.name, an_option)
return an_option | add an option to the namespace. This can take two forms:
'name' is a string representing the name of an option and the
kwargs are its parameters, or 'name' is an instance of an Option
object | Below is the the instruction that describes the task:
### Input:
add an option to the namespace. This can take two forms:
'name' is a string representing the name of an option and the
kwargs are its parameters, or 'name' is an instance of an Option
object
### Response:
def add_option(self, name, *args, **kwargs):
"""add an option to the namespace. This can take two forms:
'name' is a string representing the name of an option and the
kwargs are its parameters, or 'name' is an instance of an Option
object
"""
if isinstance(name, Option):
an_option = name
name = an_option.name
else:
an_option = Option(name, *args, **kwargs)
current_namespace = self
name_parts = name.split('.')
for a_path_component in name_parts[:-1]:
if a_path_component not in current_namespace:
current_namespace[a_path_component] = Namespace()
current_namespace = current_namespace[a_path_component]
an_option.name = name_parts[-1]
setattr(current_namespace, an_option.name, an_option)
return an_option |
async def disconnect(self, sid, namespace=None):
"""Disconnect a client.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace to disconnect. If this
argument is omitted the default namespace is used.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
if self.manager.is_connected(sid, namespace=namespace):
self.logger.info('Disconnecting %s [%s]', sid, namespace)
self.manager.pre_disconnect(sid, namespace=namespace)
await self._send_packet(sid, packet.Packet(packet.DISCONNECT,
namespace=namespace))
await self._trigger_event('disconnect', namespace, sid)
self.manager.disconnect(sid, namespace=namespace) | Disconnect a client.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace to disconnect. If this
argument is omitted the default namespace is used.
Note: this method is a coroutine. | Below is the the instruction that describes the task:
### Input:
Disconnect a client.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace to disconnect. If this
argument is omitted the default namespace is used.
Note: this method is a coroutine.
### Response:
async def disconnect(self, sid, namespace=None):
"""Disconnect a client.
:param sid: Session ID of the client.
:param namespace: The Socket.IO namespace to disconnect. If this
argument is omitted the default namespace is used.
Note: this method is a coroutine.
"""
namespace = namespace or '/'
if self.manager.is_connected(sid, namespace=namespace):
self.logger.info('Disconnecting %s [%s]', sid, namespace)
self.manager.pre_disconnect(sid, namespace=namespace)
await self._send_packet(sid, packet.Packet(packet.DISCONNECT,
namespace=namespace))
await self._trigger_event('disconnect', namespace, sid)
self.manager.disconnect(sid, namespace=namespace) |
def update(self, sample_sid=values.unset, status=values.unset):
"""
Update the QueryInstance
:param unicode sample_sid: The SID of an optional reference to the Sample created from the query
:param unicode status: The new status of the resource
:returns: Updated QueryInstance
:rtype: twilio.rest.autopilot.v1.assistant.query.QueryInstance
"""
return self._proxy.update(sample_sid=sample_sid, status=status, ) | Update the QueryInstance
:param unicode sample_sid: The SID of an optional reference to the Sample created from the query
:param unicode status: The new status of the resource
:returns: Updated QueryInstance
:rtype: twilio.rest.autopilot.v1.assistant.query.QueryInstance | Below is the the instruction that describes the task:
### Input:
Update the QueryInstance
:param unicode sample_sid: The SID of an optional reference to the Sample created from the query
:param unicode status: The new status of the resource
:returns: Updated QueryInstance
:rtype: twilio.rest.autopilot.v1.assistant.query.QueryInstance
### Response:
def update(self, sample_sid=values.unset, status=values.unset):
"""
Update the QueryInstance
:param unicode sample_sid: The SID of an optional reference to the Sample created from the query
:param unicode status: The new status of the resource
:returns: Updated QueryInstance
:rtype: twilio.rest.autopilot.v1.assistant.query.QueryInstance
"""
return self._proxy.update(sample_sid=sample_sid, status=status, ) |
def get_file(self,
path,
dest='',
makedirs=False,
saltenv='base',
gzip=None,
cachedir=None):
'''
Copies a file from the local files or master depending on
implementation
'''
raise NotImplementedError | Copies a file from the local files or master depending on
implementation | Below is the the instruction that describes the task:
### Input:
Copies a file from the local files or master depending on
implementation
### Response:
def get_file(self,
path,
dest='',
makedirs=False,
saltenv='base',
gzip=None,
cachedir=None):
'''
Copies a file from the local files or master depending on
implementation
'''
raise NotImplementedError |
def get_alter_table_sql(self, diff):
"""
Get the ALTER TABLE SQL statement
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list
"""
sql = self._get_simple_alter_table_sql(diff)
if sql is not False:
return sql
from_table = diff.from_table
if not isinstance(from_table, Table):
raise DBALException(
"SQLite platform requires for the alter table the table diff "
"referencing the original table"
)
table = from_table.clone()
columns = OrderedDict()
old_column_names = OrderedDict()
new_column_names = OrderedDict()
column_sql = []
for column_name, column in table.get_columns().items():
column_name = column_name.lower()
columns[column_name] = column
old_column_names[column_name] = column.get_quoted_name(self)
new_column_names[column_name] = column.get_quoted_name(self)
for column_name, column in diff.removed_columns.items():
column_name = column_name.lower()
if column_name in columns:
del columns[column_name]
del old_column_names[column_name]
del new_column_names[column_name]
for old_column_name, column in diff.renamed_columns.items():
old_column_name = old_column_name.lower()
if old_column_name in columns:
del columns[old_column_name]
columns[column.get_name().lower()] = column
if old_column_name in new_column_names:
new_column_names[old_column_name] = column.get_quoted_name(self)
for old_column_name, column_diff in diff.changed_columns.items():
if old_column_name in columns:
del columns[old_column_name]
columns[column_diff.column.get_name().lower()] = column_diff.column
if old_column_name in new_column_names:
new_column_names[old_column_name] = column_diff.column.get_quoted_name(
self
)
for column_name, column in diff.added_columns.items():
columns[column_name.lower()] = column
table_sql = []
data_table = Table("__temp__" + table.get_name())
new_table = Table(
table.get_quoted_name(self),
columns,
self._get_primary_index_in_altered_table(diff),
self._get_foreign_keys_in_altered_table(diff),
table.get_options(),
)
new_table.add_option("alter", True)
sql = self.get_pre_alter_table_index_foreign_key_sql(diff)
sql.append(
"CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s"
% (
data_table.get_quoted_name(self),
", ".join(old_column_names.values()),
table.get_quoted_name(self),
)
)
sql.append(self.get_drop_table_sql(from_table))
sql += self.get_create_table_sql(new_table)
sql.append(
"INSERT INTO %s (%s) SELECT %s FROM %s"
% (
new_table.get_quoted_name(self),
", ".join(new_column_names.values()),
", ".join(old_column_names.values()),
data_table.get_name(),
)
)
sql.append(self.get_drop_table_sql(data_table))
sql += self.get_post_alter_table_index_foreign_key_sql(diff)
return sql | Get the ALTER TABLE SQL statement
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get the ALTER TABLE SQL statement
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list
### Response:
def get_alter_table_sql(self, diff):
"""
Get the ALTER TABLE SQL statement
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list
"""
sql = self._get_simple_alter_table_sql(diff)
if sql is not False:
return sql
from_table = diff.from_table
if not isinstance(from_table, Table):
raise DBALException(
"SQLite platform requires for the alter table the table diff "
"referencing the original table"
)
table = from_table.clone()
columns = OrderedDict()
old_column_names = OrderedDict()
new_column_names = OrderedDict()
column_sql = []
for column_name, column in table.get_columns().items():
column_name = column_name.lower()
columns[column_name] = column
old_column_names[column_name] = column.get_quoted_name(self)
new_column_names[column_name] = column.get_quoted_name(self)
for column_name, column in diff.removed_columns.items():
column_name = column_name.lower()
if column_name in columns:
del columns[column_name]
del old_column_names[column_name]
del new_column_names[column_name]
for old_column_name, column in diff.renamed_columns.items():
old_column_name = old_column_name.lower()
if old_column_name in columns:
del columns[old_column_name]
columns[column.get_name().lower()] = column
if old_column_name in new_column_names:
new_column_names[old_column_name] = column.get_quoted_name(self)
for old_column_name, column_diff in diff.changed_columns.items():
if old_column_name in columns:
del columns[old_column_name]
columns[column_diff.column.get_name().lower()] = column_diff.column
if old_column_name in new_column_names:
new_column_names[old_column_name] = column_diff.column.get_quoted_name(
self
)
for column_name, column in diff.added_columns.items():
columns[column_name.lower()] = column
table_sql = []
data_table = Table("__temp__" + table.get_name())
new_table = Table(
table.get_quoted_name(self),
columns,
self._get_primary_index_in_altered_table(diff),
self._get_foreign_keys_in_altered_table(diff),
table.get_options(),
)
new_table.add_option("alter", True)
sql = self.get_pre_alter_table_index_foreign_key_sql(diff)
sql.append(
"CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s"
% (
data_table.get_quoted_name(self),
", ".join(old_column_names.values()),
table.get_quoted_name(self),
)
)
sql.append(self.get_drop_table_sql(from_table))
sql += self.get_create_table_sql(new_table)
sql.append(
"INSERT INTO %s (%s) SELECT %s FROM %s"
% (
new_table.get_quoted_name(self),
", ".join(new_column_names.values()),
", ".join(old_column_names.values()),
data_table.get_name(),
)
)
sql.append(self.get_drop_table_sql(data_table))
sql += self.get_post_alter_table_index_foreign_key_sql(diff)
return sql |
def HasWarnings(self):
"""Determines if a store contains extraction warnings.
Returns:
bool: True if the store contains extraction warnings.
"""
# To support older storage versions, check for the now deprecated
# extraction errors.
has_errors = self._HasAttributeContainers(
self._CONTAINER_TYPE_EXTRACTION_ERROR)
if has_errors:
return True
return self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING) | Determines if a store contains extraction warnings.
Returns:
bool: True if the store contains extraction warnings. | Below is the the instruction that describes the task:
### Input:
Determines if a store contains extraction warnings.
Returns:
bool: True if the store contains extraction warnings.
### Response:
def HasWarnings(self):
"""Determines if a store contains extraction warnings.
Returns:
bool: True if the store contains extraction warnings.
"""
# To support older storage versions, check for the now deprecated
# extraction errors.
has_errors = self._HasAttributeContainers(
self._CONTAINER_TYPE_EXTRACTION_ERROR)
if has_errors:
return True
return self._HasAttributeContainers(self._CONTAINER_TYPE_EXTRACTION_WARNING) |
def percentile(sorted_values, p):
"""Calculate the percentile using the nearest rank method.
>>> percentile([15, 20, 35, 40, 50], 50)
35
>>> percentile([15, 20, 35, 40, 50], 40)
20
>>> percentile([], 90)
Traceback (most recent call last):
...
ValueError: Too few data points (0) for 90th percentile
"""
size = len(sorted_values)
idx = (p / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise ValueError('Too few data points ({}) for {}th percentile'.format(size, p))
return sorted_values[int(idx)] | Calculate the percentile using the nearest rank method.
>>> percentile([15, 20, 35, 40, 50], 50)
35
>>> percentile([15, 20, 35, 40, 50], 40)
20
>>> percentile([], 90)
Traceback (most recent call last):
...
ValueError: Too few data points (0) for 90th percentile | Below is the the instruction that describes the task:
### Input:
Calculate the percentile using the nearest rank method.
>>> percentile([15, 20, 35, 40, 50], 50)
35
>>> percentile([15, 20, 35, 40, 50], 40)
20
>>> percentile([], 90)
Traceback (most recent call last):
...
ValueError: Too few data points (0) for 90th percentile
### Response:
def percentile(sorted_values, p):
"""Calculate the percentile using the nearest rank method.
>>> percentile([15, 20, 35, 40, 50], 50)
35
>>> percentile([15, 20, 35, 40, 50], 40)
20
>>> percentile([], 90)
Traceback (most recent call last):
...
ValueError: Too few data points (0) for 90th percentile
"""
size = len(sorted_values)
idx = (p / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise ValueError('Too few data points ({}) for {}th percentile'.format(size, p))
return sorted_values[int(idx)] |
def _set_id_from_xml_frameid(self, xml, xmlpath, var):
'''
Set a single variable with the frameids of matching entity
'''
e = xml.find(xmlpath)
if e is not None:
setattr(self, var, e.attrib['frameid']) | Set a single variable with the frameids of matching entity | Below is the the instruction that describes the task:
### Input:
Set a single variable with the frameids of matching entity
### Response:
def _set_id_from_xml_frameid(self, xml, xmlpath, var):
'''
Set a single variable with the frameids of matching entity
'''
e = xml.find(xmlpath)
if e is not None:
setattr(self, var, e.attrib['frameid']) |
def pad(self, minibatch):
"""Pad a batch of examples using this field.
If ``self.nesting_field.sequential`` is ``False``, each example in the batch must
be a list of string tokens, and pads them as if by a ``Field`` with
``sequential=True``. Otherwise, each example must be a list of list of tokens.
Using ``self.nesting_field``, pads the list of tokens to
``self.nesting_field.fix_length`` if provided, or otherwise to the length of the
longest list of tokens in the batch. Next, using this field, pads the result by
filling short examples with ``self.nesting_field.pad_token``.
Example:
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> nesting_field = Field(pad_token='<c>', init_token='<w>', eos_token='</w>')
>>> field = NestedField(nesting_field, init_token='<s>', eos_token='</s>')
>>> minibatch = [
... [list('john'), list('loves'), list('mary')],
... [list('mary'), list('cries')],
... ]
>>> padded = field.pad(minibatch)
>>> pp.pprint(padded)
[ [ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'j', 'o', 'h', 'n', '</w>', '<c>'],
['<w>', 'l', 'o', 'v', 'e', 's', '</w>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>']],
[ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', 'c', 'r', 'i', 'e', 's', '</w>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<c>', '<c>', '<c>', '<c>', '<c>', '<c>', '<c>']]]
Arguments:
minibatch (list): Each element is a list of string if
``self.nesting_field.sequential`` is ``False``, a list of list of string
otherwise.
Returns:
list: The padded minibatch. or (padded, sentence_lens, word_lengths)
"""
minibatch = list(minibatch)
if not self.nesting_field.sequential:
return super(NestedField, self).pad(minibatch)
# Save values of attributes to be monkeypatched
old_pad_token = self.pad_token
old_init_token = self.init_token
old_eos_token = self.eos_token
old_fix_len = self.nesting_field.fix_length
# Monkeypatch the attributes
if self.nesting_field.fix_length is None:
max_len = max(len(xs) for ex in minibatch for xs in ex)
fix_len = max_len + 2 - (self.nesting_field.init_token,
self.nesting_field.eos_token).count(None)
self.nesting_field.fix_length = fix_len
self.pad_token = [self.pad_token] * self.nesting_field.fix_length
if self.init_token is not None:
# self.init_token = self.nesting_field.pad([[self.init_token]])[0]
self.init_token = [self.init_token]
if self.eos_token is not None:
# self.eos_token = self.nesting_field.pad([[self.eos_token]])[0]
self.eos_token = [self.eos_token]
# Do padding
old_include_lengths = self.include_lengths
self.include_lengths = True
self.nesting_field.include_lengths = True
padded, sentence_lengths = super(NestedField, self).pad(minibatch)
padded_with_lengths = [self.nesting_field.pad(ex) for ex in padded]
word_lengths = []
final_padded = []
max_sen_len = len(padded[0])
for (pad, lens), sentence_len in zip(padded_with_lengths, sentence_lengths):
if sentence_len == max_sen_len:
lens = lens
pad = pad
elif self.pad_first:
lens[:(max_sen_len - sentence_len)] = (
[0] * (max_sen_len - sentence_len))
pad[:(max_sen_len - sentence_len)] = (
[self.pad_token] * (max_sen_len - sentence_len))
else:
lens[-(max_sen_len - sentence_len):] = (
[0] * (max_sen_len - sentence_len))
pad[-(max_sen_len - sentence_len):] = (
[self.pad_token] * (max_sen_len - sentence_len))
word_lengths.append(lens)
final_padded.append(pad)
padded = final_padded
# Restore monkeypatched attributes
self.nesting_field.fix_length = old_fix_len
self.pad_token = old_pad_token
self.init_token = old_init_token
self.eos_token = old_eos_token
self.include_lengths = old_include_lengths
if self.include_lengths:
return padded, sentence_lengths, word_lengths
return padded | Pad a batch of examples using this field.
If ``self.nesting_field.sequential`` is ``False``, each example in the batch must
be a list of string tokens, and pads them as if by a ``Field`` with
``sequential=True``. Otherwise, each example must be a list of list of tokens.
Using ``self.nesting_field``, pads the list of tokens to
``self.nesting_field.fix_length`` if provided, or otherwise to the length of the
longest list of tokens in the batch. Next, using this field, pads the result by
filling short examples with ``self.nesting_field.pad_token``.
Example:
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> nesting_field = Field(pad_token='<c>', init_token='<w>', eos_token='</w>')
>>> field = NestedField(nesting_field, init_token='<s>', eos_token='</s>')
>>> minibatch = [
... [list('john'), list('loves'), list('mary')],
... [list('mary'), list('cries')],
... ]
>>> padded = field.pad(minibatch)
>>> pp.pprint(padded)
[ [ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'j', 'o', 'h', 'n', '</w>', '<c>'],
['<w>', 'l', 'o', 'v', 'e', 's', '</w>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>']],
[ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', 'c', 'r', 'i', 'e', 's', '</w>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<c>', '<c>', '<c>', '<c>', '<c>', '<c>', '<c>']]]
Arguments:
minibatch (list): Each element is a list of string if
``self.nesting_field.sequential`` is ``False``, a list of list of string
otherwise.
Returns:
list: The padded minibatch. or (padded, sentence_lens, word_lengths) | Below is the the instruction that describes the task:
### Input:
Pad a batch of examples using this field.
If ``self.nesting_field.sequential`` is ``False``, each example in the batch must
be a list of string tokens, and pads them as if by a ``Field`` with
``sequential=True``. Otherwise, each example must be a list of list of tokens.
Using ``self.nesting_field``, pads the list of tokens to
``self.nesting_field.fix_length`` if provided, or otherwise to the length of the
longest list of tokens in the batch. Next, using this field, pads the result by
filling short examples with ``self.nesting_field.pad_token``.
Example:
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> nesting_field = Field(pad_token='<c>', init_token='<w>', eos_token='</w>')
>>> field = NestedField(nesting_field, init_token='<s>', eos_token='</s>')
>>> minibatch = [
... [list('john'), list('loves'), list('mary')],
... [list('mary'), list('cries')],
... ]
>>> padded = field.pad(minibatch)
>>> pp.pprint(padded)
[ [ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'j', 'o', 'h', 'n', '</w>', '<c>'],
['<w>', 'l', 'o', 'v', 'e', 's', '</w>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>']],
[ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', 'c', 'r', 'i', 'e', 's', '</w>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<c>', '<c>', '<c>', '<c>', '<c>', '<c>', '<c>']]]
Arguments:
minibatch (list): Each element is a list of string if
``self.nesting_field.sequential`` is ``False``, a list of list of string
otherwise.
Returns:
list: The padded minibatch. or (padded, sentence_lens, word_lengths)
### Response:
def pad(self, minibatch):
"""Pad a batch of examples using this field.
If ``self.nesting_field.sequential`` is ``False``, each example in the batch must
be a list of string tokens, and pads them as if by a ``Field`` with
``sequential=True``. Otherwise, each example must be a list of list of tokens.
Using ``self.nesting_field``, pads the list of tokens to
``self.nesting_field.fix_length`` if provided, or otherwise to the length of the
longest list of tokens in the batch. Next, using this field, pads the result by
filling short examples with ``self.nesting_field.pad_token``.
Example:
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> nesting_field = Field(pad_token='<c>', init_token='<w>', eos_token='</w>')
>>> field = NestedField(nesting_field, init_token='<s>', eos_token='</s>')
>>> minibatch = [
... [list('john'), list('loves'), list('mary')],
... [list('mary'), list('cries')],
... ]
>>> padded = field.pad(minibatch)
>>> pp.pprint(padded)
[ [ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'j', 'o', 'h', 'n', '</w>', '<c>'],
['<w>', 'l', 'o', 'v', 'e', 's', '</w>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>']],
[ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', 'c', 'r', 'i', 'e', 's', '</w>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<c>', '<c>', '<c>', '<c>', '<c>', '<c>', '<c>']]]
Arguments:
minibatch (list): Each element is a list of string if
``self.nesting_field.sequential`` is ``False``, a list of list of string
otherwise.
Returns:
list: The padded minibatch. or (padded, sentence_lens, word_lengths)
"""
minibatch = list(minibatch)
if not self.nesting_field.sequential:
return super(NestedField, self).pad(minibatch)
# Save values of attributes to be monkeypatched
old_pad_token = self.pad_token
old_init_token = self.init_token
old_eos_token = self.eos_token
old_fix_len = self.nesting_field.fix_length
# Monkeypatch the attributes
if self.nesting_field.fix_length is None:
max_len = max(len(xs) for ex in minibatch for xs in ex)
fix_len = max_len + 2 - (self.nesting_field.init_token,
self.nesting_field.eos_token).count(None)
self.nesting_field.fix_length = fix_len
self.pad_token = [self.pad_token] * self.nesting_field.fix_length
if self.init_token is not None:
# self.init_token = self.nesting_field.pad([[self.init_token]])[0]
self.init_token = [self.init_token]
if self.eos_token is not None:
# self.eos_token = self.nesting_field.pad([[self.eos_token]])[0]
self.eos_token = [self.eos_token]
# Do padding
old_include_lengths = self.include_lengths
self.include_lengths = True
self.nesting_field.include_lengths = True
padded, sentence_lengths = super(NestedField, self).pad(minibatch)
padded_with_lengths = [self.nesting_field.pad(ex) for ex in padded]
word_lengths = []
final_padded = []
max_sen_len = len(padded[0])
for (pad, lens), sentence_len in zip(padded_with_lengths, sentence_lengths):
if sentence_len == max_sen_len:
lens = lens
pad = pad
elif self.pad_first:
lens[:(max_sen_len - sentence_len)] = (
[0] * (max_sen_len - sentence_len))
pad[:(max_sen_len - sentence_len)] = (
[self.pad_token] * (max_sen_len - sentence_len))
else:
lens[-(max_sen_len - sentence_len):] = (
[0] * (max_sen_len - sentence_len))
pad[-(max_sen_len - sentence_len):] = (
[self.pad_token] * (max_sen_len - sentence_len))
word_lengths.append(lens)
final_padded.append(pad)
padded = final_padded
# Restore monkeypatched attributes
self.nesting_field.fix_length = old_fix_len
self.pad_token = old_pad_token
self.init_token = old_init_token
self.eos_token = old_eos_token
self.include_lengths = old_include_lengths
if self.include_lengths:
return padded, sentence_lengths, word_lengths
return padded |
def get_custom_views(name=None):
"""
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
if name is None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false'
elif name is not None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name='+ name + '&desc=false&total=false'
f_url = url + get_custom_views_url
r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents
if r.status_code == 200:
customviewlist = (json.loads(r.text))['customView']
if type(customviewlist) is dict:
customviewlist = [customviewlist]
return customviewlist
else:
return customviewlist
else:
print(r.status_code)
print("An Error has occured") | function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views. | Below is the the instruction that describes the task:
### Input:
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views.
### Response:
def get_custom_views(name=None):
"""
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:return: list of dictionaries containing attributes of the custom views.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
if name is None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false'
elif name is not None:
get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name='+ name + '&desc=false&total=false'
f_url = url + get_custom_views_url
r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents
if r.status_code == 200:
customviewlist = (json.loads(r.text))['customView']
if type(customviewlist) is dict:
customviewlist = [customviewlist]
return customviewlist
else:
return customviewlist
else:
print(r.status_code)
print("An Error has occured") |
def comment(self, format, *args):
"""
Add a comment to hash table before saving to disk. You can add as many
comment lines as you like. These comment lines are discarded when loading
the file. If you use a null format, all comments are deleted.
"""
return lib.zhashx_comment(self._as_parameter_, format, *args) | Add a comment to hash table before saving to disk. You can add as many
comment lines as you like. These comment lines are discarded when loading
the file. If you use a null format, all comments are deleted. | Below is the the instruction that describes the task:
### Input:
Add a comment to hash table before saving to disk. You can add as many
comment lines as you like. These comment lines are discarded when loading
the file. If you use a null format, all comments are deleted.
### Response:
def comment(self, format, *args):
"""
Add a comment to hash table before saving to disk. You can add as many
comment lines as you like. These comment lines are discarded when loading
the file. If you use a null format, all comments are deleted.
"""
return lib.zhashx_comment(self._as_parameter_, format, *args) |
def calculate_timeout(http_date):
"""Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds.
"""
try:
return int(http_date)
except ValueError:
date_after = parse(http_date)
utc_now = datetime.now(tz=timezone.utc)
return int((date_after - utc_now).total_seconds()) | Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds. | Below is the the instruction that describes the task:
### Input:
Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds.
### Response:
def calculate_timeout(http_date):
"""Extract request timeout from e.g. ``Retry-After`` header.
Notes:
Per :rfc:`2616#section-14.37`, the ``Retry-After`` header can
be either an integer number of seconds or an HTTP date. This
function can handle either.
Arguments:
http_date (:py:class:`str`): The date to parse.
Returns:
:py:class:`int`: The timeout, in seconds.
"""
try:
return int(http_date)
except ValueError:
date_after = parse(http_date)
utc_now = datetime.now(tz=timezone.utc)
return int((date_after - utc_now).total_seconds()) |
def port_ranges():
"""
Returns a list of ephemeral port ranges for current machine.
"""
try:
return _linux_ranges()
except (OSError, IOError): # not linux, try BSD
try:
ranges = _bsd_ranges()
if ranges:
return ranges
except (OSError, IOError):
pass
# fallback
return [DEFAULT_EPHEMERAL_PORT_RANGE] | Returns a list of ephemeral port ranges for current machine. | Below is the the instruction that describes the task:
### Input:
Returns a list of ephemeral port ranges for current machine.
### Response:
def port_ranges():
"""
Returns a list of ephemeral port ranges for current machine.
"""
try:
return _linux_ranges()
except (OSError, IOError): # not linux, try BSD
try:
ranges = _bsd_ranges()
if ranges:
return ranges
except (OSError, IOError):
pass
# fallback
return [DEFAULT_EPHEMERAL_PORT_RANGE] |
def Wilke(ys, mus, MWs):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
\phi_{ij} = \frac{(1 + \sqrt{\eta_i/\eta_j}(MW_j/MW_i)^{0.25})^2}
{\sqrt{8(1+MW_i/MW_j)}}
Parameters
----------
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
Returns
-------
mug : float
Viscosity of gas mixture, Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed or found.
Examples
--------
>>> Wilke([0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07])
9.701614885866193e-06
References
----------
.. [1] TODO
'''
if not none_and_length_check([ys, mus, MWs]): # check same-length inputs
raise Exception('Function inputs are incorrect format')
cmps = range(len(ys))
phis = [[(1 + (mus[i]/mus[j])**0.5*(MWs[j]/MWs[i])**0.25)**2/(8*(1 + MWs[i]/MWs[j]))**0.5
for j in cmps] for i in cmps]
return sum([ys[i]*mus[i]/sum([ys[j]*phis[i][j] for j in cmps]) for i in cmps]) | r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
\phi_{ij} = \frac{(1 + \sqrt{\eta_i/\eta_j}(MW_j/MW_i)^{0.25})^2}
{\sqrt{8(1+MW_i/MW_j)}}
Parameters
----------
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
Returns
-------
mug : float
Viscosity of gas mixture, Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed or found.
Examples
--------
>>> Wilke([0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07])
9.701614885866193e-06
References
----------
.. [1] TODO | Below is the the instruction that describes the task:
### Input:
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
\phi_{ij} = \frac{(1 + \sqrt{\eta_i/\eta_j}(MW_j/MW_i)^{0.25})^2}
{\sqrt{8(1+MW_i/MW_j)}}
Parameters
----------
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
Returns
-------
mug : float
Viscosity of gas mixture, Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed or found.
Examples
--------
>>> Wilke([0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07])
9.701614885866193e-06
References
----------
.. [1] TODO
### Response:
def Wilke(ys, mus, MWs):
r'''Calculates viscosity of a gas mixture according to
mixing rules in [1]_.
.. math::
\eta_{mix} = \sum_{i=1}^n \frac{y_i \eta_i}{\sum_{j=1}^n y_j \phi_{ij}}
\phi_{ij} = \frac{(1 + \sqrt{\eta_i/\eta_j}(MW_j/MW_i)^{0.25})^2}
{\sqrt{8(1+MW_i/MW_j)}}
Parameters
----------
ys : float
Mole fractions of gas components
mus : float
Gas viscosities of all components, [Pa*S]
MWs : float
Molecular weights of all components, [g/mol]
Returns
-------
mug : float
Viscosity of gas mixture, Pa*S]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The original source has not been reviewed or found.
Examples
--------
>>> Wilke([0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07])
9.701614885866193e-06
References
----------
.. [1] TODO
'''
if not none_and_length_check([ys, mus, MWs]): # check same-length inputs
raise Exception('Function inputs are incorrect format')
cmps = range(len(ys))
phis = [[(1 + (mus[i]/mus[j])**0.5*(MWs[j]/MWs[i])**0.25)**2/(8*(1 + MWs[i]/MWs[j]))**0.5
for j in cmps] for i in cmps]
return sum([ys[i]*mus[i]/sum([ys[j]*phis[i][j] for j in cmps]) for i in cmps]) |
def set_bookmark(self, slot_num):
"""Bookmark current position to given slot."""
if self.data:
editor = self.get_current_editor()
editor.add_bookmark(slot_num) | Bookmark current position to given slot. | Below is the the instruction that describes the task:
### Input:
Bookmark current position to given slot.
### Response:
def set_bookmark(self, slot_num):
"""Bookmark current position to given slot."""
if self.data:
editor = self.get_current_editor()
editor.add_bookmark(slot_num) |
def clean(args):
"""
%prog clean fastafile
Remove irregular chars in FASTA seqs.
"""
p = OptionParser(clean.__doc__)
p.add_option("--fancy", default=False, action="store_true",
help="Pretty print the sequence [default: %default]")
p.add_option("--canonical", default=False, action="store_true",
help="Use only acgtnACGTN [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fw = must_open(opts.outfile, "w")
if opts.fancy:
for header, seq in iter_clean_fasta(fastafile):
print(">" + header, file=fw)
fancyprint(fw, seq)
return 0
iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta
for header, seq in iterator(fastafile):
seq = Seq(seq)
s = SeqRecord(seq, id=header, description="")
SeqIO.write([s], fw, "fasta") | %prog clean fastafile
Remove irregular chars in FASTA seqs. | Below is the the instruction that describes the task:
### Input:
%prog clean fastafile
Remove irregular chars in FASTA seqs.
### Response:
def clean(args):
"""
%prog clean fastafile
Remove irregular chars in FASTA seqs.
"""
p = OptionParser(clean.__doc__)
p.add_option("--fancy", default=False, action="store_true",
help="Pretty print the sequence [default: %default]")
p.add_option("--canonical", default=False, action="store_true",
help="Use only acgtnACGTN [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
fw = must_open(opts.outfile, "w")
if opts.fancy:
for header, seq in iter_clean_fasta(fastafile):
print(">" + header, file=fw)
fancyprint(fw, seq)
return 0
iterator = iter_canonical_fasta if opts.canonical else iter_clean_fasta
for header, seq in iterator(fastafile):
seq = Seq(seq)
s = SeqRecord(seq, id=header, description="")
SeqIO.write([s], fw, "fasta") |
def search(self, q, language=None, current_site_only=True):
"""Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes
"""
from cms.plugin_pool import plugin_pool
qs = self.get_queryset()
qs = qs.public()
if current_site_only:
site = Site.objects.get_current()
qs = qs.filter(tree__site=site)
qt = Q(title_set__title__icontains=q)
# find 'searchable' plugins and build query
qp = Q()
plugins = plugin_pool.get_all_plugins()
for plugin in plugins:
cmsplugin = plugin.model
if not (
hasattr(cmsplugin, 'search_fields') and
hasattr(cmsplugin, 'cmsplugin_ptr')
):
continue
field = cmsplugin.cmsplugin_ptr.field
related_query_name = field.related_query_name()
if related_query_name and not related_query_name.startswith('+'):
for field in cmsplugin.search_fields:
qp |= Q(**{
'placeholders__cmsplugin__{0}__{1}__icontains'.format(
related_query_name,
field,
): q})
if language:
qt &= Q(title_set__language=language)
qp &= Q(cmsplugin__language=language)
qs = qs.filter(qt | qp)
return qs.distinct() | Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes | Below is the the instruction that describes the task:
### Input:
Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes
### Response:
def search(self, q, language=None, current_site_only=True):
"""Simple search function
Plugins can define a 'search_fields' tuple similar to ModelAdmin classes
"""
from cms.plugin_pool import plugin_pool
qs = self.get_queryset()
qs = qs.public()
if current_site_only:
site = Site.objects.get_current()
qs = qs.filter(tree__site=site)
qt = Q(title_set__title__icontains=q)
# find 'searchable' plugins and build query
qp = Q()
plugins = plugin_pool.get_all_plugins()
for plugin in plugins:
cmsplugin = plugin.model
if not (
hasattr(cmsplugin, 'search_fields') and
hasattr(cmsplugin, 'cmsplugin_ptr')
):
continue
field = cmsplugin.cmsplugin_ptr.field
related_query_name = field.related_query_name()
if related_query_name and not related_query_name.startswith('+'):
for field in cmsplugin.search_fields:
qp |= Q(**{
'placeholders__cmsplugin__{0}__{1}__icontains'.format(
related_query_name,
field,
): q})
if language:
qt &= Q(title_set__language=language)
qp &= Q(cmsplugin__language=language)
qs = qs.filter(qt | qp)
return qs.distinct() |
def download_image(self, img_url):
""" Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required.
"""
img_request = None
try:
img_request = requests.request(
'get', img_url, stream=True, proxies=self.proxies)
if img_request.status_code != 200:
raise ImageDownloadError(img_request.status_code)
except:
raise ImageDownloadError()
if img_url[-3:] == "svg" or (int(img_request.headers['content-length']) > self.min_filesize and\
int(img_request.headers['content-length']) < self.max_filesize):
img_content = img_request.content
with open(os.path.join(self.download_path, img_url.split('/')[-1]), 'wb') as f:
byte_image = bytes(img_content)
f.write(byte_image)
else:
raise ImageSizeError(img_request.headers['content-length'])
return True | Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required. | Below is the the instruction that describes the task:
### Input:
Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required.
### Response:
def download_image(self, img_url):
""" Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required.
"""
img_request = None
try:
img_request = requests.request(
'get', img_url, stream=True, proxies=self.proxies)
if img_request.status_code != 200:
raise ImageDownloadError(img_request.status_code)
except:
raise ImageDownloadError()
if img_url[-3:] == "svg" or (int(img_request.headers['content-length']) > self.min_filesize and\
int(img_request.headers['content-length']) < self.max_filesize):
img_content = img_request.content
with open(os.path.join(self.download_path, img_url.split('/')[-1]), 'wb') as f:
byte_image = bytes(img_content)
f.write(byte_image)
else:
raise ImageSizeError(img_request.headers['content-length'])
return True |
def nodes_to_object(self, node, object):
"Map all child nodes to one object's attributes"
for n in list(node):
self.node_to_object(n, object) | Map all child nodes to one object's attributes | Below is the the instruction that describes the task:
### Input:
Map all child nodes to one object's attributes
### Response:
def nodes_to_object(self, node, object):
"Map all child nodes to one object's attributes"
for n in list(node):
self.node_to_object(n, object) |
def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') | Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file | Below is the the instruction that describes the task:
### Input:
Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
### Response:
def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') |
def buildASNList(rootnames, asnname, check_for_duplicates=True):
"""
Return the list of filenames for a given set of rootnames
"""
# Recognize when multiple valid inputs with the same rootname are present
# this would happen when both CTE-corrected (_flc) and non-CTE-corrected (_flt)
# products are in the same directory as an ASN table
filelist, duplicates = checkForDuplicateInputs(rootnames)
if check_for_duplicates and duplicates:
# Build new ASN tables for each set of input files
origasn = changeSuffixinASN(asnname, 'flt')
dupasn = changeSuffixinASN(asnname, 'flc')
errstr = 'ERROR:\nMultiple valid input files found:\n'
for fname, dname in zip(filelist, duplicates):
errstr += ' %s %s\n' % (fname, dname)
errstr += ('\nNew association files have been generated for each '
'version of these files.\n %s\n %s\n\nPlease '
're-start astrodrizzle using of these new ASN files or '
'use widlcards for the input to only select one type of '
'input file.' % (dupasn, origasn))
print(textutil.textbox(errstr), file=sys.stderr)
# generate new ASN files for each case,
# report this case of duplicate inputs to the user then quit
raise ValueError
return filelist | Return the list of filenames for a given set of rootnames | Below is the the instruction that describes the task:
### Input:
Return the list of filenames for a given set of rootnames
### Response:
def buildASNList(rootnames, asnname, check_for_duplicates=True):
"""
Return the list of filenames for a given set of rootnames
"""
# Recognize when multiple valid inputs with the same rootname are present
# this would happen when both CTE-corrected (_flc) and non-CTE-corrected (_flt)
# products are in the same directory as an ASN table
filelist, duplicates = checkForDuplicateInputs(rootnames)
if check_for_duplicates and duplicates:
# Build new ASN tables for each set of input files
origasn = changeSuffixinASN(asnname, 'flt')
dupasn = changeSuffixinASN(asnname, 'flc')
errstr = 'ERROR:\nMultiple valid input files found:\n'
for fname, dname in zip(filelist, duplicates):
errstr += ' %s %s\n' % (fname, dname)
errstr += ('\nNew association files have been generated for each '
'version of these files.\n %s\n %s\n\nPlease '
're-start astrodrizzle using of these new ASN files or '
'use widlcards for the input to only select one type of '
'input file.' % (dupasn, origasn))
print(textutil.textbox(errstr), file=sys.stderr)
# generate new ASN files for each case,
# report this case of duplicate inputs to the user then quit
raise ValueError
return filelist |
def _get_response(**kwargs):
"""Get a template response
Use kwargs to add things to the dictionary
"""
if 'code' not in kwargs:
kwargs['code'] = 200
if 'headers' not in kwargs:
kwargs['headers'] = dict()
if 'version' not in kwargs:
kwargs['version'] = 'HTTP/1.1'
return dict(**kwargs) | Get a template response
Use kwargs to add things to the dictionary | Below is the the instruction that describes the task:
### Input:
Get a template response
Use kwargs to add things to the dictionary
### Response:
def _get_response(**kwargs):
"""Get a template response
Use kwargs to add things to the dictionary
"""
if 'code' not in kwargs:
kwargs['code'] = 200
if 'headers' not in kwargs:
kwargs['headers'] = dict()
if 'version' not in kwargs:
kwargs['version'] = 'HTTP/1.1'
return dict(**kwargs) |
def get_session_data(self, request):
"""
Return a tuple ``(user, profile, client)`` from the session.
"""
user = request.session['%suser' % SESSION_KEY]
profile = request.session['%sprofile' % SESSION_KEY]
client = request.session['%sclient' % SESSION_KEY]
return user, profile, client | Return a tuple ``(user, profile, client)`` from the session. | Below is the the instruction that describes the task:
### Input:
Return a tuple ``(user, profile, client)`` from the session.
### Response:
def get_session_data(self, request):
"""
Return a tuple ``(user, profile, client)`` from the session.
"""
user = request.session['%suser' % SESSION_KEY]
profile = request.session['%sprofile' % SESSION_KEY]
client = request.session['%sclient' % SESSION_KEY]
return user, profile, client |
def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
link = escape_link(link)
if not title:
return '<a href="%s">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text) | Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description. | Below is the the instruction that describes the task:
### Input:
Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
### Response:
def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
link = escape_link(link)
if not title:
return '<a href="%s">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text) |
def are_cmddicts_same(dict1, dict2):
"""
Checks to see if two cmddicts are the same.
Two cmddicts are defined to be the same if they have the same callbacks/
helptexts/children/summaries for all nodes.
"""
# If the set of all keys are not the same, they must not be the same.
if set(dict1.keys()) != set(dict2.keys()):
return False
# Everything in dict1 should be in dict2
for key in dict1:
# Check everything except children; Check for children recursively
for propertytype in dict1[key]:
if (not propertytype in dict2[key] or
dict1[key][propertytype] != dict2[key][propertytype]):
return False
# Check children
if not are_cmddicts_same(dict1[key]['children'], dict2[key]['children']):
return False
return True | Checks to see if two cmddicts are the same.
Two cmddicts are defined to be the same if they have the same callbacks/
helptexts/children/summaries for all nodes. | Below is the the instruction that describes the task:
### Input:
Checks to see if two cmddicts are the same.
Two cmddicts are defined to be the same if they have the same callbacks/
helptexts/children/summaries for all nodes.
### Response:
def are_cmddicts_same(dict1, dict2):
"""
Checks to see if two cmddicts are the same.
Two cmddicts are defined to be the same if they have the same callbacks/
helptexts/children/summaries for all nodes.
"""
# If the set of all keys are not the same, they must not be the same.
if set(dict1.keys()) != set(dict2.keys()):
return False
# Everything in dict1 should be in dict2
for key in dict1:
# Check everything except children; Check for children recursively
for propertytype in dict1[key]:
if (not propertytype in dict2[key] or
dict1[key][propertytype] != dict2[key][propertytype]):
return False
# Check children
if not are_cmddicts_same(dict1[key]['children'], dict2[key]['children']):
return False
return True |
def add(key, value):
"""Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing.
"""
tr = TrackedRequest.instance()
tr.tag(key, value) | Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing. | Below is the the instruction that describes the task:
### Input:
Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing.
### Response:
def add(key, value):
"""Adds context to the currently executing request.
:key: Any String identifying the request context.
Example: "user_ip", "plan", "alert_count"
:value: Any json-serializable type.
Example: "1.1.1.1", "free", 100
:returns: nothing.
"""
tr = TrackedRequest.instance()
tr.tag(key, value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.