code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def deleteObserver(self, observer):
"""Remove an observer."""
Observable.deleteObserver(self, observer)
# If self.startOnDemand is True, the reader monitoring
# thread is stopped when there are no more observers.
if self.startOnDemand:
if 0 == self.countObservers():
self.rmthread.stop()
del self.rmthread
self.rmthread = None | Remove an observer. | Below is the the instruction that describes the task:
### Input:
Remove an observer.
### Response:
def deleteObserver(self, observer):
"""Remove an observer."""
Observable.deleteObserver(self, observer)
# If self.startOnDemand is True, the reader monitoring
# thread is stopped when there are no more observers.
if self.startOnDemand:
if 0 == self.countObservers():
self.rmthread.stop()
del self.rmthread
self.rmthread = None |
def s_res(self, components=None):
"""
Get resulting apparent power in kVA at line(s) and transformer(s).
The apparent power at a line (or transformer) is determined from the
maximum values of active power P and reactive power Q.
.. math::
S = max(\sqrt{p_0^2 + q_0^2}, \sqrt{p_1^2 + q_1^2})
Parameters
----------
components : :obj:`list`
List with all components (of type :class:`~.grid.components.Line`
or :class:`~.grid.components.Transformer`) to get apparent power
for. If not provided defaults to return apparent power of all lines
and transformers in the grid.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Apparent power in kVA for lines and/or transformers.
"""
if components is not None:
labels_included = []
labels_not_included = []
labels = [repr(l) for l in components]
for label in labels:
if (label in list(self.pfa_p.columns) and
label in list(self.pfa_q.columns)):
labels_included.append(label)
else:
labels_not_included.append(label)
if labels_not_included:
logging.warning(
"Apparent power for {lines} are not returned from "
"PFA".format(lines=labels_not_included))
else:
labels_included = self.pfa_p.columns
s_res = ((self.pfa_p[labels_included] ** 2 + self.pfa_q[
labels_included] ** 2)).applymap(sqrt)
return s_res | Get resulting apparent power in kVA at line(s) and transformer(s).
The apparent power at a line (or transformer) is determined from the
maximum values of active power P and reactive power Q.
.. math::
S = max(\sqrt{p_0^2 + q_0^2}, \sqrt{p_1^2 + q_1^2})
Parameters
----------
components : :obj:`list`
List with all components (of type :class:`~.grid.components.Line`
or :class:`~.grid.components.Transformer`) to get apparent power
for. If not provided defaults to return apparent power of all lines
and transformers in the grid.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Apparent power in kVA for lines and/or transformers. | Below is the the instruction that describes the task:
### Input:
Get resulting apparent power in kVA at line(s) and transformer(s).
The apparent power at a line (or transformer) is determined from the
maximum values of active power P and reactive power Q.
.. math::
S = max(\sqrt{p_0^2 + q_0^2}, \sqrt{p_1^2 + q_1^2})
Parameters
----------
components : :obj:`list`
List with all components (of type :class:`~.grid.components.Line`
or :class:`~.grid.components.Transformer`) to get apparent power
for. If not provided defaults to return apparent power of all lines
and transformers in the grid.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Apparent power in kVA for lines and/or transformers.
### Response:
def s_res(self, components=None):
"""
Get resulting apparent power in kVA at line(s) and transformer(s).
The apparent power at a line (or transformer) is determined from the
maximum values of active power P and reactive power Q.
.. math::
S = max(\sqrt{p_0^2 + q_0^2}, \sqrt{p_1^2 + q_1^2})
Parameters
----------
components : :obj:`list`
List with all components (of type :class:`~.grid.components.Line`
or :class:`~.grid.components.Transformer`) to get apparent power
for. If not provided defaults to return apparent power of all lines
and transformers in the grid.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Apparent power in kVA for lines and/or transformers.
"""
if components is not None:
labels_included = []
labels_not_included = []
labels = [repr(l) for l in components]
for label in labels:
if (label in list(self.pfa_p.columns) and
label in list(self.pfa_q.columns)):
labels_included.append(label)
else:
labels_not_included.append(label)
if labels_not_included:
logging.warning(
"Apparent power for {lines} are not returned from "
"PFA".format(lines=labels_not_included))
else:
labels_included = self.pfa_p.columns
s_res = ((self.pfa_p[labels_included] ** 2 + self.pfa_q[
labels_included] ** 2)).applymap(sqrt)
return s_res |
def _ready_gzip_fastq(in_files, data, require_bgzip=False):
"""Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files.
"""
all_gzipped = all([not x or x.endswith(".gz") for x in in_files])
if require_bgzip and all_gzipped:
all_gzipped = all([not x or not _check_gzipped_input(x, data)[0] for x in in_files])
needs_convert = dd.get_quality_format(data).lower() == "illumina"
needs_trim = dd.get_trim_ends(data)
do_splitting = dd.get_align_split_size(data) is not False
return (all_gzipped and not needs_convert and not do_splitting and
not objectstore.is_remote(in_files[0]) and not needs_trim and not get_downsample_params(data)) | Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files. | Below is the the instruction that describes the task:
### Input:
Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files.
### Response:
def _ready_gzip_fastq(in_files, data, require_bgzip=False):
"""Check if we have gzipped fastq and don't need format conversion or splitting.
Avoid forcing bgzip if we don't need indexed files.
"""
all_gzipped = all([not x or x.endswith(".gz") for x in in_files])
if require_bgzip and all_gzipped:
all_gzipped = all([not x or not _check_gzipped_input(x, data)[0] for x in in_files])
needs_convert = dd.get_quality_format(data).lower() == "illumina"
needs_trim = dd.get_trim_ends(data)
do_splitting = dd.get_align_split_size(data) is not False
return (all_gzipped and not needs_convert and not do_splitting and
not objectstore.is_remote(in_files[0]) and not needs_trim and not get_downsample_params(data)) |
def sanity_check(self):
"Check the underlying data in the training set can be properly loaded."
final_message = "You can deactivate this warning by passing `no_check=True`."
if not hasattr(self.train_ds, 'items') or len(self.train_ds.items) == 0 or not hasattr(self.train_dl, 'batch_sampler'): return
if len(self.train_dl) == 0:
warn(f"""Your training dataloader is empty, you have only {len(self.train_dl.dataset)} items in your training set.
Your batch size is {self.train_dl.batch_size}, you should lower it.""")
print(final_message)
return
idx = next(iter(self.train_dl.batch_sampler))
samples,fails = [],[]
for i in idx:
try: samples.append(self.train_dl.dataset[i])
except: fails.append(i)
if len(fails) > 0:
warn_msg = "There seems to be something wrong with your dataset, for example, in the first batch can't access"
if len(fails) == len(idx):
warn_msg += f" any element of self.train_ds.\nTried: {show_some(idx)}"
else:
warn_msg += f" these elements in self.train_ds: {show_some(fails)}"
warn(warn_msg)
print(final_message)
return
try: batch = self.collate_fn(samples)
except:
message = "It's not possible to collate samples of your dataset together in a batch."
try:
shapes = [[o[i].data.shape for o in samples] for i in range(2)]
message += f'\nShapes of the inputs/targets:\n{shapes}'
except: pass
warn(message)
print(final_message) | Check the underlying data in the training set can be properly loaded. | Below is the the instruction that describes the task:
### Input:
Check the underlying data in the training set can be properly loaded.
### Response:
def sanity_check(self):
"Check the underlying data in the training set can be properly loaded."
final_message = "You can deactivate this warning by passing `no_check=True`."
if not hasattr(self.train_ds, 'items') or len(self.train_ds.items) == 0 or not hasattr(self.train_dl, 'batch_sampler'): return
if len(self.train_dl) == 0:
warn(f"""Your training dataloader is empty, you have only {len(self.train_dl.dataset)} items in your training set.
Your batch size is {self.train_dl.batch_size}, you should lower it.""")
print(final_message)
return
idx = next(iter(self.train_dl.batch_sampler))
samples,fails = [],[]
for i in idx:
try: samples.append(self.train_dl.dataset[i])
except: fails.append(i)
if len(fails) > 0:
warn_msg = "There seems to be something wrong with your dataset, for example, in the first batch can't access"
if len(fails) == len(idx):
warn_msg += f" any element of self.train_ds.\nTried: {show_some(idx)}"
else:
warn_msg += f" these elements in self.train_ds: {show_some(fails)}"
warn(warn_msg)
print(final_message)
return
try: batch = self.collate_fn(samples)
except:
message = "It's not possible to collate samples of your dataset together in a batch."
try:
shapes = [[o[i].data.shape for o in samples] for i in range(2)]
message += f'\nShapes of the inputs/targets:\n{shapes}'
except: pass
warn(message)
print(final_message) |
def __calculate_clusters(self, k):
"""!
@brief Performs cluster analysis using specified K value.
@param[in] k (uint): Amount of clusters that should be allocated.
@return (array_like) Allocated clusters.
"""
initial_values = kmeans_plusplus_initializer(self.__data, k).initialize(return_index=self.__return_index)
algorithm_type = self.__algorithm.get_type()
return algorithm_type(self.__data, initial_values).process().get_clusters() | !
@brief Performs cluster analysis using specified K value.
@param[in] k (uint): Amount of clusters that should be allocated.
@return (array_like) Allocated clusters. | Below is the the instruction that describes the task:
### Input:
!
@brief Performs cluster analysis using specified K value.
@param[in] k (uint): Amount of clusters that should be allocated.
@return (array_like) Allocated clusters.
### Response:
def __calculate_clusters(self, k):
"""!
@brief Performs cluster analysis using specified K value.
@param[in] k (uint): Amount of clusters that should be allocated.
@return (array_like) Allocated clusters.
"""
initial_values = kmeans_plusplus_initializer(self.__data, k).initialize(return_index=self.__return_index)
algorithm_type = self.__algorithm.get_type()
return algorithm_type(self.__data, initial_values).process().get_clusters() |
def register_peer(self, connection_id, endpoint):
"""Registers a connected connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
endpoint (str): The publically reachable endpoint of the new
peer
"""
with self._lock:
if len(self._peers) < self._maximum_peer_connectivity:
self._peers[connection_id] = endpoint
self._topology.set_connection_status(connection_id,
PeerStatus.PEER)
LOGGER.debug("Added connection_id %s with endpoint %s, "
"connected identities are now %s",
connection_id, endpoint, self._peers)
else:
raise PeeringException(
"At maximum configured number of peers: {} "
"Rejecting peering request from {}.".format(
self._maximum_peer_connectivity,
endpoint))
public_key = self.peer_to_public_key(connection_id)
if public_key:
self._consensus_notifier.notify_peer_connected(public_key) | Registers a connected connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
endpoint (str): The publically reachable endpoint of the new
peer | Below is the the instruction that describes the task:
### Input:
Registers a connected connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
endpoint (str): The publically reachable endpoint of the new
peer
### Response:
def register_peer(self, connection_id, endpoint):
"""Registers a connected connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
endpoint (str): The publically reachable endpoint of the new
peer
"""
with self._lock:
if len(self._peers) < self._maximum_peer_connectivity:
self._peers[connection_id] = endpoint
self._topology.set_connection_status(connection_id,
PeerStatus.PEER)
LOGGER.debug("Added connection_id %s with endpoint %s, "
"connected identities are now %s",
connection_id, endpoint, self._peers)
else:
raise PeeringException(
"At maximum configured number of peers: {} "
"Rejecting peering request from {}.".format(
self._maximum_peer_connectivity,
endpoint))
public_key = self.peer_to_public_key(connection_id)
if public_key:
self._consensus_notifier.notify_peer_connected(public_key) |
def _clear(cls):
"""This is only for testing pourposes, resets the DepotManager status
This is to simplify writing test fixtures, resets the DepotManager global
status and removes the informations related to the current configured depots
and middleware.
"""
cls._default_depot = None
cls._depots = {}
cls._middleware = None
cls._aliases = {} | This is only for testing pourposes, resets the DepotManager status
This is to simplify writing test fixtures, resets the DepotManager global
status and removes the informations related to the current configured depots
and middleware. | Below is the the instruction that describes the task:
### Input:
This is only for testing pourposes, resets the DepotManager status
This is to simplify writing test fixtures, resets the DepotManager global
status and removes the informations related to the current configured depots
and middleware.
### Response:
def _clear(cls):
"""This is only for testing pourposes, resets the DepotManager status
This is to simplify writing test fixtures, resets the DepotManager global
status and removes the informations related to the current configured depots
and middleware.
"""
cls._default_depot = None
cls._depots = {}
cls._middleware = None
cls._aliases = {} |
def start_server(socket, projectname, xmlfilename: str) -> None:
"""Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information).
"""
state.initialise(projectname, xmlfilename)
server = http.server.HTTPServer(('', int(socket)), HydPyServer)
server.serve_forever() | Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information). | Below is the the instruction that describes the task:
### Input:
Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information).
### Response:
def start_server(socket, projectname, xmlfilename: str) -> None:
"""Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information).
"""
state.initialise(projectname, xmlfilename)
server = http.server.HTTPServer(('', int(socket)), HydPyServer)
server.serve_forever() |
def scanResource(uri = None, listRegexp = None, verbosity=1, logFolder= "./logs"):
'''
[Optionally] recursive method to scan the files in a given folder.
:param uri: the URI to be scanned.
:param listRegexp: listRegexp is an array of <RegexpObject>.
:return: a dictionary where the key is the name of the file.
'''
i3visiotools.logger.setupLogger(loggerName="entify", verbosity=verbosity, logFolder=logFolder)
logger = logging.getLogger("entify")
results = {}
logger.debug("Looking for regular expressions in: " + uri)
import urllib2
foundExpr = getEntitiesByRegexp(data = urllib2.urlopen(uri).read(), listRegexp = listRegexp)
logger.debug("Updating the " + str(len(foundExpr)) + " results found on: " + uri)
results[uri] = foundExpr
return results | [Optionally] recursive method to scan the files in a given folder.
:param uri: the URI to be scanned.
:param listRegexp: listRegexp is an array of <RegexpObject>.
:return: a dictionary where the key is the name of the file. | Below is the the instruction that describes the task:
### Input:
[Optionally] recursive method to scan the files in a given folder.
:param uri: the URI to be scanned.
:param listRegexp: listRegexp is an array of <RegexpObject>.
:return: a dictionary where the key is the name of the file.
### Response:
def scanResource(uri = None, listRegexp = None, verbosity=1, logFolder= "./logs"):
'''
[Optionally] recursive method to scan the files in a given folder.
:param uri: the URI to be scanned.
:param listRegexp: listRegexp is an array of <RegexpObject>.
:return: a dictionary where the key is the name of the file.
'''
i3visiotools.logger.setupLogger(loggerName="entify", verbosity=verbosity, logFolder=logFolder)
logger = logging.getLogger("entify")
results = {}
logger.debug("Looking for regular expressions in: " + uri)
import urllib2
foundExpr = getEntitiesByRegexp(data = urllib2.urlopen(uri).read(), listRegexp = listRegexp)
logger.debug("Updating the " + str(len(foundExpr)) + " results found on: " + uri)
results[uri] = foundExpr
return results |
def _get_variables(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr):
"""
Implements @variables and @vars
"""
_rule = list(rule)
_rule[CODESTR] = c_codestr
_rule[PROPERTIES] = rule[CONTEXT]
self.manage_children(
_rule, p_selectors, p_parents, p_children, scope, media) | Implements @variables and @vars | Below is the the instruction that describes the task:
### Input:
Implements @variables and @vars
### Response:
def _get_variables(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr):
"""
Implements @variables and @vars
"""
_rule = list(rule)
_rule[CODESTR] = c_codestr
_rule[PROPERTIES] = rule[CONTEXT]
self.manage_children(
_rule, p_selectors, p_parents, p_children, scope, media) |
def set_maximum(self, q_data, marked, center, bin_lower, foothills):
"""
Grow a region at a certain bin level and check if the region has reached the maximum size.
Args:
q_data: Quantized data array
marked: Array marking points that are objects
center: Coordinates of the center pixel of the region being grown
bin_lower: Intensity level of lower bin being evaluated
foothills: List of points that are associated with a center but fall outside the the size or
intensity criteria
Returns:
True if the object is finished growing and False if the object should be grown again at the next
threshold level.
"""
as_bin = [] # pixels to be included in peak
as_glob = [] # pixels to be globbed up as part of foothills
marked_so_far = [] # pixels that have already been marked
will_be_considered_again = False
as_bin.append(center)
center_data = q_data[center]
while len(as_bin) > 0:
p = as_bin.pop(-1) # remove and return last pixel in as_bin
if marked[p] != self.UNMARKED: # already processed
continue
marked[p] = q_data[center]
marked_so_far.append(p)
# check neighbors
for index,val in np.ndenumerate(marked[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]):
# is neighbor part of peak or part of mountain?
if val == self.UNMARKED:
pixel = (index[0] - 1 + p[0],index[1] - 1 + p[1])
p_data = q_data[pixel]
if (not will_be_considered_again) and (p_data >= 0) and (p_data < center_data):
will_be_considered_again = True
if p_data >= bin_lower and (np.abs(center_data - p_data) <= self.delta):
as_bin.append(pixel)
# Do not check that this is the closest: this way, a narrow channel of globbed pixels form
elif p_data >= 0:
as_glob.append(pixel)
if bin_lower == 0:
will_be_considered_again = False
big_enough = len(marked_so_far) >= self.max_size
if big_enough:
# remove lower values within region of influence
foothills.append((center, as_glob))
elif will_be_considered_again: # remove the check if you want to ignore regions smaller than max_size
for m in marked_so_far:
marked[m] = self.UNMARKED
del as_bin[:]
del as_glob[:]
del marked_so_far[:]
return big_enough or (not will_be_considered_again) | Grow a region at a certain bin level and check if the region has reached the maximum size.
Args:
q_data: Quantized data array
marked: Array marking points that are objects
center: Coordinates of the center pixel of the region being grown
bin_lower: Intensity level of lower bin being evaluated
foothills: List of points that are associated with a center but fall outside the the size or
intensity criteria
Returns:
True if the object is finished growing and False if the object should be grown again at the next
threshold level. | Below is the the instruction that describes the task:
### Input:
Grow a region at a certain bin level and check if the region has reached the maximum size.
Args:
q_data: Quantized data array
marked: Array marking points that are objects
center: Coordinates of the center pixel of the region being grown
bin_lower: Intensity level of lower bin being evaluated
foothills: List of points that are associated with a center but fall outside the the size or
intensity criteria
Returns:
True if the object is finished growing and False if the object should be grown again at the next
threshold level.
### Response:
def set_maximum(self, q_data, marked, center, bin_lower, foothills):
"""
Grow a region at a certain bin level and check if the region has reached the maximum size.
Args:
q_data: Quantized data array
marked: Array marking points that are objects
center: Coordinates of the center pixel of the region being grown
bin_lower: Intensity level of lower bin being evaluated
foothills: List of points that are associated with a center but fall outside the the size or
intensity criteria
Returns:
True if the object is finished growing and False if the object should be grown again at the next
threshold level.
"""
as_bin = [] # pixels to be included in peak
as_glob = [] # pixels to be globbed up as part of foothills
marked_so_far = [] # pixels that have already been marked
will_be_considered_again = False
as_bin.append(center)
center_data = q_data[center]
while len(as_bin) > 0:
p = as_bin.pop(-1) # remove and return last pixel in as_bin
if marked[p] != self.UNMARKED: # already processed
continue
marked[p] = q_data[center]
marked_so_far.append(p)
# check neighbors
for index,val in np.ndenumerate(marked[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]):
# is neighbor part of peak or part of mountain?
if val == self.UNMARKED:
pixel = (index[0] - 1 + p[0],index[1] - 1 + p[1])
p_data = q_data[pixel]
if (not will_be_considered_again) and (p_data >= 0) and (p_data < center_data):
will_be_considered_again = True
if p_data >= bin_lower and (np.abs(center_data - p_data) <= self.delta):
as_bin.append(pixel)
# Do not check that this is the closest: this way, a narrow channel of globbed pixels form
elif p_data >= 0:
as_glob.append(pixel)
if bin_lower == 0:
will_be_considered_again = False
big_enough = len(marked_so_far) >= self.max_size
if big_enough:
# remove lower values within region of influence
foothills.append((center, as_glob))
elif will_be_considered_again: # remove the check if you want to ignore regions smaller than max_size
for m in marked_so_far:
marked[m] = self.UNMARKED
del as_bin[:]
del as_glob[:]
del marked_so_far[:]
return big_enough or (not will_be_considered_again) |
def getdebug(environ=os.environ, true_values=TRUE_VALUES):
'''
Get if app is expected to be ran in debug mode looking at environment
variables.
:param environ: environment dict-like object
:type environ: collections.abc.Mapping
:returns: True if debug contains a true-like string, False otherwise
:rtype: bool
'''
return environ.get('DEBUG', '').lower() in true_values | Get if app is expected to be ran in debug mode looking at environment
variables.
:param environ: environment dict-like object
:type environ: collections.abc.Mapping
:returns: True if debug contains a true-like string, False otherwise
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Get if app is expected to be ran in debug mode looking at environment
variables.
:param environ: environment dict-like object
:type environ: collections.abc.Mapping
:returns: True if debug contains a true-like string, False otherwise
:rtype: bool
### Response:
def getdebug(environ=os.environ, true_values=TRUE_VALUES):
'''
Get if app is expected to be ran in debug mode looking at environment
variables.
:param environ: environment dict-like object
:type environ: collections.abc.Mapping
:returns: True if debug contains a true-like string, False otherwise
:rtype: bool
'''
return environ.get('DEBUG', '').lower() in true_values |
def post_command(self, sender, name, result, args, kwargs):
"""
Call after we got the result of a redis command.
By default, let the instance manage the post_modify signal
"""
return self._instance.post_command(
sender=self,
name=name,
result=result,
args=args,
kwargs=kwargs
) | Call after we got the result of a redis command.
By default, let the instance manage the post_modify signal | Below is the the instruction that describes the task:
### Input:
Call after we got the result of a redis command.
By default, let the instance manage the post_modify signal
### Response:
def post_command(self, sender, name, result, args, kwargs):
"""
Call after we got the result of a redis command.
By default, let the instance manage the post_modify signal
"""
return self._instance.post_command(
sender=self,
name=name,
result=result,
args=args,
kwargs=kwargs
) |
def yaml_conf_as_dict(file_path, encoding=None):
"""
读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量
:param:
* file_path: (string) 需要读入的 yaml 配置文件长文件名
* encoding: (string) 文件编码
* msg: (string) 读取配置信息
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
举例如下::
print('--- yaml_conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.yaml'
# 读取配置文件
ds = yaml_conf_as_dict(conf_filename, encoding='utf-8')
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict length:', len(ds[1]))
print('msg:', len(ds[1]))
print('conf info: ', ds[1].get('tree'))
print('---')
执行结果::
--- yaml_conf_as_dict demo---
flag: True
dict length: 2
msg: Success
conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']},
{'test': ['__init__.py']}, {'doc': ['doc.rst']}]
---
"""
if not pathlib.Path(file_path).is_file():
return False, {}, 'File not exist'
try:
if sys.version > '3':
with open(file_path, 'r', encoding=encoding) as f:
d = OrderedDict(yaml.load(f.read()))
return True, d, 'Success'
else:
with open(file_path, 'r') as f:
d = OrderedDict(yaml.load(f.read()))
return True, d, 'Success'
except:
return False, {}, 'Unknow error' | 读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量
:param:
* file_path: (string) 需要读入的 yaml 配置文件长文件名
* encoding: (string) 文件编码
* msg: (string) 读取配置信息
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
举例如下::
print('--- yaml_conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.yaml'
# 读取配置文件
ds = yaml_conf_as_dict(conf_filename, encoding='utf-8')
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict length:', len(ds[1]))
print('msg:', len(ds[1]))
print('conf info: ', ds[1].get('tree'))
print('---')
执行结果::
--- yaml_conf_as_dict demo---
flag: True
dict length: 2
msg: Success
conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']},
{'test': ['__init__.py']}, {'doc': ['doc.rst']}]
--- | Below is the the instruction that describes the task:
### Input:
读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量
:param:
* file_path: (string) 需要读入的 yaml 配置文件长文件名
* encoding: (string) 文件编码
* msg: (string) 读取配置信息
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
举例如下::
print('--- yaml_conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.yaml'
# 读取配置文件
ds = yaml_conf_as_dict(conf_filename, encoding='utf-8')
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict length:', len(ds[1]))
print('msg:', len(ds[1]))
print('conf info: ', ds[1].get('tree'))
print('---')
执行结果::
--- yaml_conf_as_dict demo---
flag: True
dict length: 2
msg: Success
conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']},
{'test': ['__init__.py']}, {'doc': ['doc.rst']}]
---
### Response:
def yaml_conf_as_dict(file_path, encoding=None):
"""
读入 yaml 配置文件,返回根据配置文件内容生成的字典类型变量
:param:
* file_path: (string) 需要读入的 yaml 配置文件长文件名
* encoding: (string) 文件编码
* msg: (string) 读取配置信息
:return:
* flag: (bool) 读取配置文件是否正确,正确返回 True,错误返回 False
* d: (dict) 如果读取配置文件正确返回的包含配置文件内容的字典,字典内容顺序与配置文件顺序保持一致
举例如下::
print('--- yaml_conf_as_dict demo---')
# 定义配置文件名
conf_filename = 'test_conf.yaml'
# 读取配置文件
ds = yaml_conf_as_dict(conf_filename, encoding='utf-8')
# 显示是否成功,所有 dict 的内容,dict 的 key 数量
print('flag:', ds[0])
print('dict length:', len(ds[1]))
print('msg:', len(ds[1]))
print('conf info: ', ds[1].get('tree'))
print('---')
执行结果::
--- yaml_conf_as_dict demo---
flag: True
dict length: 2
msg: Success
conf info: ['README.md', 'requirements.txt', {'hellopackage': ['__init__.py']},
{'test': ['__init__.py']}, {'doc': ['doc.rst']}]
---
"""
if not pathlib.Path(file_path).is_file():
return False, {}, 'File not exist'
try:
if sys.version > '3':
with open(file_path, 'r', encoding=encoding) as f:
d = OrderedDict(yaml.load(f.read()))
return True, d, 'Success'
else:
with open(file_path, 'r') as f:
d = OrderedDict(yaml.load(f.read()))
return True, d, 'Success'
except:
return False, {}, 'Unknow error' |
def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg) | Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError. | Below is the the instruction that describes the task:
### Input:
Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
### Response:
def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg) |
def upload(self, f):
"""Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object.
"""
if hasattr(f, 'read'):
needs_closing = False
else:
f = open(f, 'rb')
needs_closing = True
# The Puush server can't handle non-ASCII filenames.
# The official Puush desktop app actually substitutes ? for
# non-ISO-8859-1 characters, which helps some Unicode filenames,
# but some are still let through and encounter server errors.
# Try uploading a file named åäö.txt through the desktop app -
# it won't work. It's better to let this Python API do that,
# however, with the behavior probably intended in the desktop app.
filename = os.path.basename(f.name).encode('ascii', 'replace')
filename = filename.decode('ascii') # Requests doesn't like bytes
md5 = md5_file(f)
data = {
'z': 'meaningless',
'c': md5
}
files = {
'f': (filename, f)
}
res = self._api_request('up', data=data, files=files)[0]
if res[0] == '-1':
raise PuushError("File upload failed.")
elif res[0] == '-3':
raise PuushError("File upload failed: hash didn't match with "
"the file the server received.")
if needs_closing:
f.close()
_, url, id, size = res
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return self._File(id, url, filename, now, 0) | Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object. | Below is the the instruction that describes the task:
### Input:
Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object.
### Response:
def upload(self, f):
"""Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object.
"""
if hasattr(f, 'read'):
needs_closing = False
else:
f = open(f, 'rb')
needs_closing = True
# The Puush server can't handle non-ASCII filenames.
# The official Puush desktop app actually substitutes ? for
# non-ISO-8859-1 characters, which helps some Unicode filenames,
# but some are still let through and encounter server errors.
# Try uploading a file named åäö.txt through the desktop app -
# it won't work. It's better to let this Python API do that,
# however, with the behavior probably intended in the desktop app.
filename = os.path.basename(f.name).encode('ascii', 'replace')
filename = filename.decode('ascii') # Requests doesn't like bytes
md5 = md5_file(f)
data = {
'z': 'meaningless',
'c': md5
}
files = {
'f': (filename, f)
}
res = self._api_request('up', data=data, files=files)[0]
if res[0] == '-1':
raise PuushError("File upload failed.")
elif res[0] == '-3':
raise PuushError("File upload failed: hash didn't match with "
"the file the server received.")
if needs_closing:
f.close()
_, url, id, size = res
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return self._File(id, url, filename, now, 0) |
def getnodes(fnods, up=None, verbose=False):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node", MyBar("nodes:"))
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f=open(fnods)
l=[int(x) for x in f.readline().split()]
npoints,dim,nattrib,nbound=l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
if verbose and up is not None: up.init(npoints)
nodes=[]
for line in f:
if line[0]=="#": continue
l=[float(x) for x in line.split()]
l = l[:(dim + 1)]
assert_( int(l[0])==len(nodes)+1 )
l = l[1:]
nodes.append(tuple(l + ndapp))
if verbose and up is not None: up.update(len(nodes))
assert_( npoints==len(nodes) )
return nodes | Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node", MyBar("nodes:"))
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ] | Below is the the instruction that describes the task:
### Input:
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node", MyBar("nodes:"))
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
### Response:
def getnodes(fnods, up=None, verbose=False):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node", MyBar("nodes:"))
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f=open(fnods)
l=[int(x) for x in f.readline().split()]
npoints,dim,nattrib,nbound=l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
if verbose and up is not None: up.init(npoints)
nodes=[]
for line in f:
if line[0]=="#": continue
l=[float(x) for x in line.split()]
l = l[:(dim + 1)]
assert_( int(l[0])==len(nodes)+1 )
l = l[1:]
nodes.append(tuple(l + ndapp))
if verbose and up is not None: up.update(len(nodes))
assert_( npoints==len(nodes) )
return nodes |
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True | This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False | Below is the the instruction that describes the task:
### Input:
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
### Response:
def doi_exists(self, doi):
"""
This method retrieve a boolean according to the existence of a crossref
DOI number. It returns False if the API results a 404 status code.
args: Crossref DOI id (String)
return: Boolean
Example 1:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001')
True
Example 2:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi')
False
"""
request_url = build_url_endpoint(
'/'.join([self.ENDPOINT, doi])
)
request_params = {}
result = self.do_http_request(
'get',
request_url,
data=request_params,
only_headers=True,
custom_header=str(self.etiquette)
)
if result.status_code == 404:
return False
return True |
def return_locals(func):
'''Modifies decorated function to return its locals'''
@functools.wraps(func)
def wrap(*args, **kwargs):
frames = []
def tracer(frame, event, arg): # pragma: no cover
# coverage does not work in this function because the tracer
# is deactivated here
frames.append(frame)
sys.settrace(old_tracer)
if old_tracer is not None:
return old_tracer(frame, event, arg)
old_tracer = sys.gettrace()
# tracer is activated on next call, return or exception
sys.settrace(tracer)
try:
func(*args, **kwargs)
finally:
sys.settrace(old_tracer)
assert len(frames) == 1
argspec = inspect.getargspec(func)
argnames = list(argspec.args)
if argspec.varargs is not None:
argnames.append(argspec.varargs)
if argspec.keywords is not None:
argnames.append(argspec.keywords)
return {name: value for name, value in frames.pop(0).f_locals.items()
if name not in argnames}
return wrap | Modifies decorated function to return its locals | Below is the the instruction that describes the task:
### Input:
Modifies decorated function to return its locals
### Response:
def return_locals(func):
'''Modifies decorated function to return its locals'''
@functools.wraps(func)
def wrap(*args, **kwargs):
frames = []
def tracer(frame, event, arg): # pragma: no cover
# coverage does not work in this function because the tracer
# is deactivated here
frames.append(frame)
sys.settrace(old_tracer)
if old_tracer is not None:
return old_tracer(frame, event, arg)
old_tracer = sys.gettrace()
# tracer is activated on next call, return or exception
sys.settrace(tracer)
try:
func(*args, **kwargs)
finally:
sys.settrace(old_tracer)
assert len(frames) == 1
argspec = inspect.getargspec(func)
argnames = list(argspec.args)
if argspec.varargs is not None:
argnames.append(argspec.varargs)
if argspec.keywords is not None:
argnames.append(argspec.keywords)
return {name: value for name, value in frames.pop(0).f_locals.items()
if name not in argnames}
return wrap |
def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \
-> None:
"""
Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout.
"""
print(' ' * self._INDENT_SIZE * level + str(self), file=file) | Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout. | Below is the the instruction that describes the task:
### Input:
Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout.
### Response:
def print_hierarchy(self, level: int = 0, file: IO[str] = sys.stdout) \
-> None:
"""
Print this comparison and its children with indentation to represent
nesting.
:param level: The level of indentation to use. This is mostly for
internal use, but you can use it to inset the root
comparison.
:param file: The stream to print to. Defaults to stdout.
"""
print(' ' * self._INDENT_SIZE * level + str(self), file=file) |
def inorder(self, funct, stopOn=None):
""" Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
"""
if stopOn is None:
for i in self.children:
i.inorder(funct)
else:
for i in self.children:
if i.inorder(funct) == stopOn:
return stopOn
return funct(self) | Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false. | Below is the the instruction that describes the task:
### Input:
Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
### Response:
def inorder(self, funct, stopOn=None):
""" Iterates in order, calling the function with the current node.
If stopOn is set to True or False, it will stop on true or false.
"""
if stopOn is None:
for i in self.children:
i.inorder(funct)
else:
for i in self.children:
if i.inorder(funct) == stopOn:
return stopOn
return funct(self) |
def equals(self, other):
"""
Ensures :attr:`subject` is equal to *other*.
"""
self._run(unittest_case.assertEqual, (self._subject, other))
return ChainInspector(self._subject) | Ensures :attr:`subject` is equal to *other*. | Below is the the instruction that describes the task:
### Input:
Ensures :attr:`subject` is equal to *other*.
### Response:
def equals(self, other):
"""
Ensures :attr:`subject` is equal to *other*.
"""
self._run(unittest_case.assertEqual, (self._subject, other))
return ChainInspector(self._subject) |
def process_post_tag(self, bulk_mode, api_tag):
"""
Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object
"""
tag = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
tag = self.ref_data_map["tags"].get(api_tag["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not tag:
tag, created = Tag.objects.get_or_create(site_id=self.site_id,
wp_id=api_tag["ID"],
defaults=self.api_object_data("tag", api_tag))
if tag and not created:
self.update_existing_tag(tag, api_tag)
# add to ref data map so later lookups work
if tag:
self.ref_data_map["tags"][api_tag["ID"]] = tag
return tag | Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object | Below is the the instruction that describes the task:
### Input:
Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object
### Response:
def process_post_tag(self, bulk_mode, api_tag):
"""
Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object
"""
tag = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
tag = self.ref_data_map["tags"].get(api_tag["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not tag:
tag, created = Tag.objects.get_or_create(site_id=self.site_id,
wp_id=api_tag["ID"],
defaults=self.api_object_data("tag", api_tag))
if tag and not created:
self.update_existing_tag(tag, api_tag)
# add to ref data map so later lookups work
if tag:
self.ref_data_map["tags"][api_tag["ID"]] = tag
return tag |
def create_secgroup(self, name, desc):
"""
Creates a new server security group.
:param str name: The name of the security group to create.
:param str desc: A short description of the group.
"""
self.nova.security_groups.create(name, desc) | Creates a new server security group.
:param str name: The name of the security group to create.
:param str desc: A short description of the group. | Below is the the instruction that describes the task:
### Input:
Creates a new server security group.
:param str name: The name of the security group to create.
:param str desc: A short description of the group.
### Response:
def create_secgroup(self, name, desc):
"""
Creates a new server security group.
:param str name: The name of the security group to create.
:param str desc: A short description of the group.
"""
self.nova.security_groups.create(name, desc) |
def update(self, data=None, timeout=-1, custom_headers=None, force=False):
"""Makes a PUT request to update a resource when a request body is required.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
force: Force the update operation.
Returns:
A dict with the updated resource data.
"""
uri = self.data['uri']
resource = deepcopy(self.data)
resource.update(data)
logger.debug('Update async (uri = %s, resource = %s)' %
(uri, str(resource)))
self.data = self._helper.update(resource, uri, force, timeout, custom_headers)
return self | Makes a PUT request to update a resource when a request body is required.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
force: Force the update operation.
Returns:
A dict with the updated resource data. | Below is the the instruction that describes the task:
### Input:
Makes a PUT request to update a resource when a request body is required.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
force: Force the update operation.
Returns:
A dict with the updated resource data.
### Response:
def update(self, data=None, timeout=-1, custom_headers=None, force=False):
"""Makes a PUT request to update a resource when a request body is required.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
custom_headers: Allows to add custom HTTP headers.
force: Force the update operation.
Returns:
A dict with the updated resource data.
"""
uri = self.data['uri']
resource = deepcopy(self.data)
resource.update(data)
logger.debug('Update async (uri = %s, resource = %s)' %
(uri, str(resource)))
self.data = self._helper.update(resource, uri, force, timeout, custom_headers)
return self |
def __graceful_shutdown(self):
""" call shutdown routines """
retcode = 1
self.log.info("Trying to shutdown gracefully...")
retcode = self.core.plugins_end_test(retcode)
retcode = self.core.plugins_post_process(retcode)
self.log.info("Done graceful shutdown")
return retcode | call shutdown routines | Below is the the instruction that describes the task:
### Input:
call shutdown routines
### Response:
def __graceful_shutdown(self):
""" call shutdown routines """
retcode = 1
self.log.info("Trying to shutdown gracefully...")
retcode = self.core.plugins_end_test(retcode)
retcode = self.core.plugins_post_process(retcode)
self.log.info("Done graceful shutdown")
return retcode |
def extend_service_volume(self, stack, service, volume, args):
"""扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}/volumes/{3}/extend'.format(self.host, stack, service, volume)
return self.__post(url, args) | 扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 | Below is the the instruction that describes the task:
### Input:
扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
### Response:
def extend_service_volume(self, stack, service, volume, args):
"""扩容存储卷
为指定名称的服务增加存储卷资源,并挂载到部署的容器中。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- volume: 存储卷名
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}/volumes/{3}/extend'.format(self.host, stack, service, volume)
return self.__post(url, args) |
def _handle_watch_message(self, message):
"""
Processes a binary message received from the watch and broadcasts the relevant events.
:param message: A raw message from the watch, without any transport framing.
:type message: bytes
"""
if self.log_protocol_level is not None:
logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode())
message = self.pending_bytes + message
while len(message) >= 4:
try:
packet, length = PebblePacket.parse_message(message)
except IncompleteMessage:
self.pending_bytes = message
break
except:
# At this point we've failed to deconstruct the message via normal means, but we don't want to end
# up permanently desynced (because we wiped a partial message), nor do we want to get stuck (because
# we didn't wipe anything). We therefore parse the packet length manually and skip ahead that far.
# If the expected length is 0, we wipe everything to ensure forward motion (but we are quite probably
# screwed).
expected_length, = struct.unpack('!H', message[:2])
if expected_length == 0:
self.pending_bytes = b''
else:
self.pending_bytes = message[expected_length + 4:]
raise
self.event_handler.broadcast_event("raw_inbound", message[:length])
if self.log_packet_level is not None:
logger.log(self.log_packet_level, "<- %s", packet)
message = message[length:]
self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet)
if length == 0:
break
self.pending_bytes = message | Processes a binary message received from the watch and broadcasts the relevant events.
:param message: A raw message from the watch, without any transport framing.
:type message: bytes | Below is the the instruction that describes the task:
### Input:
Processes a binary message received from the watch and broadcasts the relevant events.
:param message: A raw message from the watch, without any transport framing.
:type message: bytes
### Response:
def _handle_watch_message(self, message):
"""
Processes a binary message received from the watch and broadcasts the relevant events.
:param message: A raw message from the watch, without any transport framing.
:type message: bytes
"""
if self.log_protocol_level is not None:
logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode())
message = self.pending_bytes + message
while len(message) >= 4:
try:
packet, length = PebblePacket.parse_message(message)
except IncompleteMessage:
self.pending_bytes = message
break
except:
# At this point we've failed to deconstruct the message via normal means, but we don't want to end
# up permanently desynced (because we wiped a partial message), nor do we want to get stuck (because
# we didn't wipe anything). We therefore parse the packet length manually and skip ahead that far.
# If the expected length is 0, we wipe everything to ensure forward motion (but we are quite probably
# screwed).
expected_length, = struct.unpack('!H', message[:2])
if expected_length == 0:
self.pending_bytes = b''
else:
self.pending_bytes = message[expected_length + 4:]
raise
self.event_handler.broadcast_event("raw_inbound", message[:length])
if self.log_packet_level is not None:
logger.log(self.log_packet_level, "<- %s", packet)
message = message[length:]
self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet)
if length == 0:
break
self.pending_bytes = message |
def _CollectHistoryAgg_(contactHist, fieldHistObj, fieldName):
"""
Return updated history dictionary with new field change
:param dict contactHist: Existing contact history dictionary
:param dict fieldHistObj: Output of _CollectHistory_
:param string fieldName: field name
"""
if fieldHistObj!={}:
if fieldName not in contactHist.keys():
contactHist[fieldName] = {}
for lookupType in fieldHistObj.keys():
contactHist[fieldName][lookupType] = fieldHistObj[lookupType]
return contactHist | Return updated history dictionary with new field change
:param dict contactHist: Existing contact history dictionary
:param dict fieldHistObj: Output of _CollectHistory_
:param string fieldName: field name | Below is the the instruction that describes the task:
### Input:
Return updated history dictionary with new field change
:param dict contactHist: Existing contact history dictionary
:param dict fieldHistObj: Output of _CollectHistory_
:param string fieldName: field name
### Response:
def _CollectHistoryAgg_(contactHist, fieldHistObj, fieldName):
"""
Return updated history dictionary with new field change
:param dict contactHist: Existing contact history dictionary
:param dict fieldHistObj: Output of _CollectHistory_
:param string fieldName: field name
"""
if fieldHistObj!={}:
if fieldName not in contactHist.keys():
contactHist[fieldName] = {}
for lookupType in fieldHistObj.keys():
contactHist[fieldName][lookupType] = fieldHistObj[lookupType]
return contactHist |
def get_decoders(decoder_num,
input_layer,
encoded_layer,
head_num,
hidden_dim,
attention_activation=None,
feed_forward_activation='relu',
dropout_rate=0.0,
trainable=True):
"""Get decoders.
:param decoder_num: Number of decoder components.
:param input_layer: Input layer.
:param encoded_layer: Encoded layer from encoder.
:param head_num: Number of heads in multi-head self-attention.
:param hidden_dim: Hidden dimension of feed forward layer.
:param attention_activation: Activation for multi-head self-attention.
:param feed_forward_activation: Activation for feed-forward layer.
:param dropout_rate: Dropout rate.
:param trainable: Whether the layers are trainable.
:return: Output layer.
"""
last_layer = input_layer
for i in range(decoder_num):
last_layer = get_decoder_component(
name='Decoder-%d' % (i + 1),
input_layer=last_layer,
encoded_layer=encoded_layer,
head_num=head_num,
hidden_dim=hidden_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=trainable,
)
return last_layer | Get decoders.
:param decoder_num: Number of decoder components.
:param input_layer: Input layer.
:param encoded_layer: Encoded layer from encoder.
:param head_num: Number of heads in multi-head self-attention.
:param hidden_dim: Hidden dimension of feed forward layer.
:param attention_activation: Activation for multi-head self-attention.
:param feed_forward_activation: Activation for feed-forward layer.
:param dropout_rate: Dropout rate.
:param trainable: Whether the layers are trainable.
:return: Output layer. | Below is the the instruction that describes the task:
### Input:
Get decoders.
:param decoder_num: Number of decoder components.
:param input_layer: Input layer.
:param encoded_layer: Encoded layer from encoder.
:param head_num: Number of heads in multi-head self-attention.
:param hidden_dim: Hidden dimension of feed forward layer.
:param attention_activation: Activation for multi-head self-attention.
:param feed_forward_activation: Activation for feed-forward layer.
:param dropout_rate: Dropout rate.
:param trainable: Whether the layers are trainable.
:return: Output layer.
### Response:
def get_decoders(decoder_num,
input_layer,
encoded_layer,
head_num,
hidden_dim,
attention_activation=None,
feed_forward_activation='relu',
dropout_rate=0.0,
trainable=True):
"""Get decoders.
:param decoder_num: Number of decoder components.
:param input_layer: Input layer.
:param encoded_layer: Encoded layer from encoder.
:param head_num: Number of heads in multi-head self-attention.
:param hidden_dim: Hidden dimension of feed forward layer.
:param attention_activation: Activation for multi-head self-attention.
:param feed_forward_activation: Activation for feed-forward layer.
:param dropout_rate: Dropout rate.
:param trainable: Whether the layers are trainable.
:return: Output layer.
"""
last_layer = input_layer
for i in range(decoder_num):
last_layer = get_decoder_component(
name='Decoder-%d' % (i + 1),
input_layer=last_layer,
encoded_layer=encoded_layer,
head_num=head_num,
hidden_dim=hidden_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=trainable,
)
return last_layer |
def iter_sources(self):
"""Iterates over all source names and IDs."""
for src_id in xrange(self.get_source_count()):
yield src_id, self.get_source_name(src_id) | Iterates over all source names and IDs. | Below is the the instruction that describes the task:
### Input:
Iterates over all source names and IDs.
### Response:
def iter_sources(self):
"""Iterates over all source names and IDs."""
for src_id in xrange(self.get_source_count()):
yield src_id, self.get_source_name(src_id) |
def get_terms(self):
''' GROUP BY is a shortcut to only getting the first in every list of group '''
if not self.terms.empty:
return self.terms
if self.from_backup:
self.terms = open_pickle(TERMS_BACKUP_PATH)
return self.terms
engine = create_engine(self.db_url)
data = """
SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version
FROM terms t
GROUP BY t.ilx
"""
self.terms = pd.read_sql(data, engine)
create_pickle(self.terms, TERMS_BACKUP_PATH)
return self.terms | GROUP BY is a shortcut to only getting the first in every list of group | Below is the the instruction that describes the task:
### Input:
GROUP BY is a shortcut to only getting the first in every list of group
### Response:
def get_terms(self):
''' GROUP BY is a shortcut to only getting the first in every list of group '''
if not self.terms.empty:
return self.terms
if self.from_backup:
self.terms = open_pickle(TERMS_BACKUP_PATH)
return self.terms
engine = create_engine(self.db_url)
data = """
SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version
FROM terms t
GROUP BY t.ilx
"""
self.terms = pd.read_sql(data, engine)
create_pickle(self.terms, TERMS_BACKUP_PATH)
return self.terms |
def listar_por_equip(self, equip_id):
"""Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
"""
if equip_id is None:
raise InvalidParameterError(
u'O id do equipamento não foi informado.')
url = 'ambiente/equip/' + str(equip_id) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta. | Below is the the instruction that describes the task:
### Input:
Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
### Response:
def listar_por_equip(self, equip_id):
"""Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
"""
if equip_id is None:
raise InvalidParameterError(
u'O id do equipamento não foi informado.')
url = 'ambiente/equip/' + str(equip_id) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) |
def moreland_adjusthue (msh, m_unsat):
"""Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value.
"""
if msh[M] >= m_unsat:
return msh[H] # "Best we can do"
hspin = (msh[S] * np.sqrt (m_unsat**2 - msh[M]**2) /
(msh[M] * np.sin (msh[S])))
if msh[H] > -np.pi / 3: # "Spin away from purple"
return msh[H] + hspin
return msh[H] - hspin | Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value. | Below is the the instruction that describes the task:
### Input:
Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value.
### Response:
def moreland_adjusthue (msh, m_unsat):
"""Moreland's AdjustHue procedure to adjust the hue value of an Msh color
based on ... some criterion.
*msh* should be of of shape (3, ). *m_unsat* is a scalar.
Return value is the adjusted h (hue) value.
"""
if msh[M] >= m_unsat:
return msh[H] # "Best we can do"
hspin = (msh[S] * np.sqrt (m_unsat**2 - msh[M]**2) /
(msh[M] * np.sin (msh[S])))
if msh[H] > -np.pi / 3: # "Spin away from purple"
return msh[H] + hspin
return msh[H] - hspin |
def set_xlim_cb(self, redraw=True):
"""Set plot limit based on user values."""
try:
xmin = float(self.w.x_lo.get_text())
except Exception:
set_min = True
else:
set_min = False
try:
xmax = float(self.w.x_hi.get_text())
except Exception:
set_max = True
else:
set_max = False
if set_min or set_max:
self.tab_plot.draw()
self.set_xlimits_widgets(set_min=set_min, set_max=set_max)
if not (set_min and set_max):
self.tab_plot.ax.set_xlim(xmin, xmax)
if redraw:
self.tab_plot.draw() | Set plot limit based on user values. | Below is the the instruction that describes the task:
### Input:
Set plot limit based on user values.
### Response:
def set_xlim_cb(self, redraw=True):
"""Set plot limit based on user values."""
try:
xmin = float(self.w.x_lo.get_text())
except Exception:
set_min = True
else:
set_min = False
try:
xmax = float(self.w.x_hi.get_text())
except Exception:
set_max = True
else:
set_max = False
if set_min or set_max:
self.tab_plot.draw()
self.set_xlimits_widgets(set_min=set_min, set_max=set_max)
if not (set_min and set_max):
self.tab_plot.ax.set_xlim(xmin, xmax)
if redraw:
self.tab_plot.draw() |
def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):
"""
发送消息 (异步版本)
------------
:param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组
:param int user_id: 对方 QQ 号(消息类型为 `private` 时需要)
:param int group_id: 群号(消息类型为 `group` 时需要)
:param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要)
:param str | list[ dict[ str, unknown ] ] message: 要发送的内容
:param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效
:return: None
:rtype: None
"""
return super().__getattr__('send_msg_async') \
(message_type=message_type, user_id=user_id, group_id=group_id,
discuss_id=discuss_id, message=message, auto_escape=auto_escape) | 发送消息 (异步版本)
------------
:param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组
:param int user_id: 对方 QQ 号(消息类型为 `private` 时需要)
:param int group_id: 群号(消息类型为 `group` 时需要)
:param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要)
:param str | list[ dict[ str, unknown ] ] message: 要发送的内容
:param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效
:return: None
:rtype: None | Below is the the instruction that describes the task:
### Input:
发送消息 (异步版本)
------------
:param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组
:param int user_id: 对方 QQ 号(消息类型为 `private` 时需要)
:param int group_id: 群号(消息类型为 `group` 时需要)
:param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要)
:param str | list[ dict[ str, unknown ] ] message: 要发送的内容
:param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效
:return: None
:rtype: None
### Response:
def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):
"""
发送消息 (异步版本)
------------
:param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组
:param int user_id: 对方 QQ 号(消息类型为 `private` 时需要)
:param int group_id: 群号(消息类型为 `group` 时需要)
:param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要)
:param str | list[ dict[ str, unknown ] ] message: 要发送的内容
:param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效
:return: None
:rtype: None
"""
return super().__getattr__('send_msg_async') \
(message_type=message_type, user_id=user_id, group_id=group_id,
discuss_id=discuss_id, message=message, auto_escape=auto_escape) |
def cartesian(nodes, order='C'):
'''
Cartesian product of a list of arrays
Parameters
----------
nodes : list(array_like(ndim=1))
order : str, optional(default='C')
('C' or 'F') order in which the product is enumerated
Returns
-------
out : ndarray(ndim=2)
each line corresponds to one point of the product space
'''
nodes = [np.array(e) for e in nodes]
shapes = [e.shape[0] for e in nodes]
dtype = nodes[0].dtype
n = len(nodes)
l = np.prod(shapes)
out = np.zeros((l, n), dtype=dtype)
if order == 'C':
repetitions = np.cumprod([1] + shapes[:-1])
else:
shapes.reverse()
sh = [1] + shapes[:-1]
repetitions = np.cumprod(sh)
repetitions = repetitions.tolist()
repetitions.reverse()
for i in range(n):
_repeat_1d(nodes[i], repetitions[i], out[:, i])
return out | Cartesian product of a list of arrays
Parameters
----------
nodes : list(array_like(ndim=1))
order : str, optional(default='C')
('C' or 'F') order in which the product is enumerated
Returns
-------
out : ndarray(ndim=2)
each line corresponds to one point of the product space | Below is the the instruction that describes the task:
### Input:
Cartesian product of a list of arrays
Parameters
----------
nodes : list(array_like(ndim=1))
order : str, optional(default='C')
('C' or 'F') order in which the product is enumerated
Returns
-------
out : ndarray(ndim=2)
each line corresponds to one point of the product space
### Response:
def cartesian(nodes, order='C'):
'''
Cartesian product of a list of arrays
Parameters
----------
nodes : list(array_like(ndim=1))
order : str, optional(default='C')
('C' or 'F') order in which the product is enumerated
Returns
-------
out : ndarray(ndim=2)
each line corresponds to one point of the product space
'''
nodes = [np.array(e) for e in nodes]
shapes = [e.shape[0] for e in nodes]
dtype = nodes[0].dtype
n = len(nodes)
l = np.prod(shapes)
out = np.zeros((l, n), dtype=dtype)
if order == 'C':
repetitions = np.cumprod([1] + shapes[:-1])
else:
shapes.reverse()
sh = [1] + shapes[:-1]
repetitions = np.cumprod(sh)
repetitions = repetitions.tolist()
repetitions.reverse()
for i in range(n):
_repeat_1d(nodes[i], repetitions[i], out[:, i])
return out |
def get_closest_points(self, mesh):
"""
For each point in ``mesh`` find the closest surface element, and return
the corresponding closest point.
See :meth:`superclass method
<.base.BaseSurface.get_closest_points>`
for spec of input and result values.
"""
# first, for each point in mesh compute minimum distance to each
# surface. The distance matrix is flattend, because mesh can be of
# an arbitrary shape. By flattening we obtain a ``distances`` matrix
# for which the first dimension represents the different surfaces
# and the second dimension the mesh points.
dists = numpy.array(
[surf.get_min_distance(mesh).flatten() for surf in self.surfaces]
)
# find for each point in mesh the index of closest surface
idx = dists == numpy.min(dists, axis=0)
# loop again over surfaces. For each surface compute the closest
# points, and associate them to the mesh points for which the surface
# is the closest. Note that if a surface is not the closest to any of
# the mesh points then the calculation is skipped
lons = numpy.empty_like(mesh.lons.flatten())
lats = numpy.empty_like(mesh.lats.flatten())
depths = None if mesh.depths is None else \
numpy.empty_like(mesh.depths.flatten())
for i, surf in enumerate(self.surfaces):
if not idx[i, :].any():
continue
cps = surf.get_closest_points(mesh)
lons[idx[i, :]] = cps.lons.flatten()[idx[i, :]]
lats[idx[i, :]] = cps.lats.flatten()[idx[i, :]]
if depths is not None:
depths[idx[i, :]] = cps.depths.flatten()[idx[i, :]]
lons = lons.reshape(mesh.lons.shape)
lats = lats.reshape(mesh.lats.shape)
if depths is not None:
depths = depths.reshape(mesh.depths.shape)
return Mesh(lons, lats, depths) | For each point in ``mesh`` find the closest surface element, and return
the corresponding closest point.
See :meth:`superclass method
<.base.BaseSurface.get_closest_points>`
for spec of input and result values. | Below is the the instruction that describes the task:
### Input:
For each point in ``mesh`` find the closest surface element, and return
the corresponding closest point.
See :meth:`superclass method
<.base.BaseSurface.get_closest_points>`
for spec of input and result values.
### Response:
def get_closest_points(self, mesh):
"""
For each point in ``mesh`` find the closest surface element, and return
the corresponding closest point.
See :meth:`superclass method
<.base.BaseSurface.get_closest_points>`
for spec of input and result values.
"""
# first, for each point in mesh compute minimum distance to each
# surface. The distance matrix is flattend, because mesh can be of
# an arbitrary shape. By flattening we obtain a ``distances`` matrix
# for which the first dimension represents the different surfaces
# and the second dimension the mesh points.
dists = numpy.array(
[surf.get_min_distance(mesh).flatten() for surf in self.surfaces]
)
# find for each point in mesh the index of closest surface
idx = dists == numpy.min(dists, axis=0)
# loop again over surfaces. For each surface compute the closest
# points, and associate them to the mesh points for which the surface
# is the closest. Note that if a surface is not the closest to any of
# the mesh points then the calculation is skipped
lons = numpy.empty_like(mesh.lons.flatten())
lats = numpy.empty_like(mesh.lats.flatten())
depths = None if mesh.depths is None else \
numpy.empty_like(mesh.depths.flatten())
for i, surf in enumerate(self.surfaces):
if not idx[i, :].any():
continue
cps = surf.get_closest_points(mesh)
lons[idx[i, :]] = cps.lons.flatten()[idx[i, :]]
lats[idx[i, :]] = cps.lats.flatten()[idx[i, :]]
if depths is not None:
depths[idx[i, :]] = cps.depths.flatten()[idx[i, :]]
lons = lons.reshape(mesh.lons.shape)
lats = lats.reshape(mesh.lats.shape)
if depths is not None:
depths = depths.reshape(mesh.depths.shape)
return Mesh(lons, lats, depths) |
def download_rsr(**kwargs):
"""Download the pre-compiled hdf5 formatet relative spectral response functions
from the internet
"""
#
import tarfile
import requests
TQDM_LOADED = True
try:
from tqdm import tqdm
except ImportError:
TQDM_LOADED = False
dest_dir = kwargs.get('dest_dir', LOCAL_RSR_DIR)
dry_run = kwargs.get('dry_run', False)
LOG.info("Download RSR files and store in directory %s", dest_dir)
filename = os.path.join(dest_dir, "pyspectral_rsr_data.tgz")
LOG.debug("Get data. URL: %s", HTTP_PYSPECTRAL_RSR)
LOG.debug("Destination = %s", dest_dir)
if dry_run:
return
response = requests.get(HTTP_PYSPECTRAL_RSR)
if TQDM_LOADED:
with open(filename, "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
else:
with open(filename, "wb") as handle:
for data in response.iter_content():
handle.write(data)
tar = tarfile.open(filename)
tar.extractall(dest_dir)
tar.close()
os.remove(filename) | Download the pre-compiled hdf5 formatet relative spectral response functions
from the internet | Below is the the instruction that describes the task:
### Input:
Download the pre-compiled hdf5 formatet relative spectral response functions
from the internet
### Response:
def download_rsr(**kwargs):
"""Download the pre-compiled hdf5 formatet relative spectral response functions
from the internet
"""
#
import tarfile
import requests
TQDM_LOADED = True
try:
from tqdm import tqdm
except ImportError:
TQDM_LOADED = False
dest_dir = kwargs.get('dest_dir', LOCAL_RSR_DIR)
dry_run = kwargs.get('dry_run', False)
LOG.info("Download RSR files and store in directory %s", dest_dir)
filename = os.path.join(dest_dir, "pyspectral_rsr_data.tgz")
LOG.debug("Get data. URL: %s", HTTP_PYSPECTRAL_RSR)
LOG.debug("Destination = %s", dest_dir)
if dry_run:
return
response = requests.get(HTTP_PYSPECTRAL_RSR)
if TQDM_LOADED:
with open(filename, "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
else:
with open(filename, "wb") as handle:
for data in response.iter_content():
handle.write(data)
tar = tarfile.open(filename)
tar.extractall(dest_dir)
tar.close()
os.remove(filename) |
def generate_sleepy_cat_graph(filepath, prefixes=None, identifier="NautilusSparql"):
""" Generate a graph and change the global graph to this one
:param filepath: A Uri for the graph
:param prefixes: A dictionary of prefixes and namespaces to bind to the graph
:param identifier: An identifier that will identify the Graph root
"""
registerplugins()
ident = URIRef(identifier)
graph = Graph('Sleepycat', identifier=ident)
graph.open(filepath, create=True)
for prefix, ns in (prefixes or GRAPH_BINDINGS).items():
if prefix == "":
prefix = "cts" # Fix until ALchemy Store accepts empty prefixes
graph.bind(prefix, ns)
return graph, identifier, filepath | Generate a graph and change the global graph to this one
:param filepath: A Uri for the graph
:param prefixes: A dictionary of prefixes and namespaces to bind to the graph
:param identifier: An identifier that will identify the Graph root | Below is the the instruction that describes the task:
### Input:
Generate a graph and change the global graph to this one
:param filepath: A Uri for the graph
:param prefixes: A dictionary of prefixes and namespaces to bind to the graph
:param identifier: An identifier that will identify the Graph root
### Response:
def generate_sleepy_cat_graph(filepath, prefixes=None, identifier="NautilusSparql"):
""" Generate a graph and change the global graph to this one
:param filepath: A Uri for the graph
:param prefixes: A dictionary of prefixes and namespaces to bind to the graph
:param identifier: An identifier that will identify the Graph root
"""
registerplugins()
ident = URIRef(identifier)
graph = Graph('Sleepycat', identifier=ident)
graph.open(filepath, create=True)
for prefix, ns in (prefixes or GRAPH_BINDINGS).items():
if prefix == "":
prefix = "cts" # Fix until ALchemy Store accepts empty prefixes
graph.bind(prefix, ns)
return graph, identifier, filepath |
def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
] | Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
### Response:
def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
] |
def _api_key_patch_replace(conn, apiKey, path, value):
'''
the replace patch operation on an ApiKey resource
'''
response = conn.update_api_key(apiKey=apiKey,
patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
return response | the replace patch operation on an ApiKey resource | Below is the the instruction that describes the task:
### Input:
the replace patch operation on an ApiKey resource
### Response:
def _api_key_patch_replace(conn, apiKey, path, value):
'''
the replace patch operation on an ApiKey resource
'''
response = conn.update_api_key(apiKey=apiKey,
patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
return response |
def reload_including_local(module):
"""
Reload a module. If it isn"t found, try to include the local service
directory. This must be called from a thread that has acquired the import
lock.
:param module: the module to reload.
"""
try:
reload(module)
except ImportError:
# This can happen if the module was loaded in the immediate script
# directory. Add the service path and try again.
if not hasattr(cherrypy.thread_data, "modulepath"):
raise
path = os.path.abspath(cherrypy.thread_data.modulepath)
root = os.path.abspath(cherrypy.config.get("webroot"))
if path not in sys.path and (path == root or path.startswith(root + os.path.sep)):
oldpath = sys.path
try:
sys.path = [path] + sys.path
reload(module)
finally:
sys.path = oldpath
else:
raise | Reload a module. If it isn"t found, try to include the local service
directory. This must be called from a thread that has acquired the import
lock.
:param module: the module to reload. | Below is the the instruction that describes the task:
### Input:
Reload a module. If it isn"t found, try to include the local service
directory. This must be called from a thread that has acquired the import
lock.
:param module: the module to reload.
### Response:
def reload_including_local(module):
"""
Reload a module. If it isn"t found, try to include the local service
directory. This must be called from a thread that has acquired the import
lock.
:param module: the module to reload.
"""
try:
reload(module)
except ImportError:
# This can happen if the module was loaded in the immediate script
# directory. Add the service path and try again.
if not hasattr(cherrypy.thread_data, "modulepath"):
raise
path = os.path.abspath(cherrypy.thread_data.modulepath)
root = os.path.abspath(cherrypy.config.get("webroot"))
if path not in sys.path and (path == root or path.startswith(root + os.path.sep)):
oldpath = sys.path
try:
sys.path = [path] + sys.path
reload(module)
finally:
sys.path = oldpath
else:
raise |
def _serialize(
self, obj, fields_dict, error_store, many=False,
accessor=None, dict_class=dict, index_errors=True,
index=None,
):
"""Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param ErrorStore error_store: Structure to store errors.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
"""
index = index if index_errors else None
if many and obj is not None:
self._pending = True
ret = [
self._serialize(
d, fields_dict, error_store, many=False,
dict_class=dict_class, accessor=accessor,
index=idx, index_errors=index_errors,
)
for idx, d in enumerate(obj)
]
self._pending = False
return ret
items = []
for attr_name, field_obj in iteritems(fields_dict):
if getattr(field_obj, 'load_only', False):
continue
key = field_obj.data_key or attr_name
getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor)
value = self._call_and_store(
getter_func=getter,
data=obj,
field_name=key,
error_store=error_store,
index=index,
)
if value is missing:
continue
items.append((key, value))
ret = dict_class(items)
return ret | Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param ErrorStore error_store: Structure to store errors.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``. | Below is the the instruction that describes the task:
### Input:
Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param ErrorStore error_store: Structure to store errors.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
### Response:
def _serialize(
self, obj, fields_dict, error_store, many=False,
accessor=None, dict_class=dict, index_errors=True,
index=None,
):
"""Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param ErrorStore error_store: Structure to store errors.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
"""
index = index if index_errors else None
if many and obj is not None:
self._pending = True
ret = [
self._serialize(
d, fields_dict, error_store, many=False,
dict_class=dict_class, accessor=accessor,
index=idx, index_errors=index_errors,
)
for idx, d in enumerate(obj)
]
self._pending = False
return ret
items = []
for attr_name, field_obj in iteritems(fields_dict):
if getattr(field_obj, 'load_only', False):
continue
key = field_obj.data_key or attr_name
getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor)
value = self._call_and_store(
getter_func=getter,
data=obj,
field_name=key,
error_store=error_store,
index=index,
)
if value is missing:
continue
items.append((key, value))
ret = dict_class(items)
return ret |
def iter_walk_menus(self):
"""
Useful for example to spread changes to the prompt, e.g. enabling or
disabling colors.
"""
yield self
for item in self.name_to_command.values():
if isinstance(item, _Menu):
# "Generator delegation" ;)
# https://docs.python.org/3/whatsnew/3.3.html#pep-380
yield from item.iter_walk_menus() | Useful for example to spread changes to the prompt, e.g. enabling or
disabling colors. | Below is the the instruction that describes the task:
### Input:
Useful for example to spread changes to the prompt, e.g. enabling or
disabling colors.
### Response:
def iter_walk_menus(self):
"""
Useful for example to spread changes to the prompt, e.g. enabling or
disabling colors.
"""
yield self
for item in self.name_to_command.values():
if isinstance(item, _Menu):
# "Generator delegation" ;)
# https://docs.python.org/3/whatsnew/3.3.html#pep-380
yield from item.iter_walk_menus() |
def should_copy(column):
"""
Determine if a column should be copied.
"""
if not isinstance(column.type, Serial):
return True
if column.nullable:
return True
if not column.server_default:
return True
# do not create temporary serial values; they will be defaulted on upsert/insert
return False | Determine if a column should be copied. | Below is the the instruction that describes the task:
### Input:
Determine if a column should be copied.
### Response:
def should_copy(column):
"""
Determine if a column should be copied.
"""
if not isinstance(column.type, Serial):
return True
if column.nullable:
return True
if not column.server_default:
return True
# do not create temporary serial values; they will be defaulted on upsert/insert
return False |
def setattr_at_path( obj, path, val ):
"""
Traverses a set of nested attributes to the value on an object
:param mixed obj: The object to set the attribute on
:param tuple path: The path to the attribute on the object
:param mixed val: The value at the attribute
:rtype None:
"""
target = obj
last_attr = path[-1]
for attr in path[0:-1]:
try:
if type(attr) in ( str, unicode ) and target and hasattr( target, attr ):
target = getattr( target, attr )
else:
target = target[attr]
except:
pass
# Ensures we set by reference
try:
setattr( target, last_attr, val )
except:
target[last_attr] = val | Traverses a set of nested attributes to the value on an object
:param mixed obj: The object to set the attribute on
:param tuple path: The path to the attribute on the object
:param mixed val: The value at the attribute
:rtype None: | Below is the the instruction that describes the task:
### Input:
Traverses a set of nested attributes to the value on an object
:param mixed obj: The object to set the attribute on
:param tuple path: The path to the attribute on the object
:param mixed val: The value at the attribute
:rtype None:
### Response:
def setattr_at_path( obj, path, val ):
"""
Traverses a set of nested attributes to the value on an object
:param mixed obj: The object to set the attribute on
:param tuple path: The path to the attribute on the object
:param mixed val: The value at the attribute
:rtype None:
"""
target = obj
last_attr = path[-1]
for attr in path[0:-1]:
try:
if type(attr) in ( str, unicode ) and target and hasattr( target, attr ):
target = getattr( target, attr )
else:
target = target[attr]
except:
pass
# Ensures we set by reference
try:
setattr( target, last_attr, val )
except:
target[last_attr] = val |
def quotation(self, origin, target):
"""Return quotation between two currencies (origin, target)"""
if not self._backend:
raise ExchangeBackendNotInstalled()
return self._backend.quotation(origin, target) | Return quotation between two currencies (origin, target) | Below is the the instruction that describes the task:
### Input:
Return quotation between two currencies (origin, target)
### Response:
def quotation(self, origin, target):
"""Return quotation between two currencies (origin, target)"""
if not self._backend:
raise ExchangeBackendNotInstalled()
return self._backend.quotation(origin, target) |
def descending(self, name):
''' Add a descending index for ``name`` to this index.
:param name: Name to be used in the index
'''
self.components.append((name, Index.DESCENDING))
return self | Add a descending index for ``name`` to this index.
:param name: Name to be used in the index | Below is the the instruction that describes the task:
### Input:
Add a descending index for ``name`` to this index.
:param name: Name to be used in the index
### Response:
def descending(self, name):
''' Add a descending index for ``name`` to this index.
:param name: Name to be used in the index
'''
self.components.append((name, Index.DESCENDING))
return self |
def clean_missing_exponent(pst_filename,clean_filename="clean.pst"):
"""fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Parameters
----------
pst_filename : str
the pest control file
clean_filename : str
the new pest control file to write. Default is "clean.pst"
Returns
-------
None
"""
lines = []
with open(pst_filename,'r') as f:
for line in f:
line = line.lower().strip()
if '+' in line:
raw = line.split('+')
for i,r in enumerate(raw[:-1]):
if r[-1] != 'e':
r = r + 'e'
raw[i] = r
lines.append('+'.join(raw))
else:
lines.append(line)
with open(clean_filename,'w') as f:
for line in lines:
f.write(line+'\n') | fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Parameters
----------
pst_filename : str
the pest control file
clean_filename : str
the new pest control file to write. Default is "clean.pst"
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Parameters
----------
pst_filename : str
the pest control file
clean_filename : str
the new pest control file to write. Default is "clean.pst"
Returns
-------
None
### Response:
def clean_missing_exponent(pst_filename,clean_filename="clean.pst"):
"""fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Parameters
----------
pst_filename : str
the pest control file
clean_filename : str
the new pest control file to write. Default is "clean.pst"
Returns
-------
None
"""
lines = []
with open(pst_filename,'r') as f:
for line in f:
line = line.lower().strip()
if '+' in line:
raw = line.split('+')
for i,r in enumerate(raw[:-1]):
if r[-1] != 'e':
r = r + 'e'
raw[i] = r
lines.append('+'.join(raw))
else:
lines.append(line)
with open(clean_filename,'w') as f:
for line in lines:
f.write(line+'\n') |
def _set_interface_loopback_ospf_conf(self, v, load=False):
"""
Setter method for interface_loopback_ospf_conf, mapped from YANG variable /routing_system/interface/loopback/ip/interface_loopback_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_loopback_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_loopback_ospf_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_loopback_ospf_conf.interface_loopback_ospf_conf, is_container='container', presence=False, yang_name="interface-loopback-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFLoopbackInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_loopback_ospf_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_loopback_ospf_conf.interface_loopback_ospf_conf, is_container='container', presence=False, yang_name="interface-loopback-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFLoopbackInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__interface_loopback_ospf_conf = t
if hasattr(self, '_set'):
self._set() | Setter method for interface_loopback_ospf_conf, mapped from YANG variable /routing_system/interface/loopback/ip/interface_loopback_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_loopback_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_loopback_ospf_conf() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for interface_loopback_ospf_conf, mapped from YANG variable /routing_system/interface/loopback/ip/interface_loopback_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_loopback_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_loopback_ospf_conf() directly.
### Response:
def _set_interface_loopback_ospf_conf(self, v, load=False):
"""
Setter method for interface_loopback_ospf_conf, mapped from YANG variable /routing_system/interface/loopback/ip/interface_loopback_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_loopback_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_loopback_ospf_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_loopback_ospf_conf.interface_loopback_ospf_conf, is_container='container', presence=False, yang_name="interface-loopback-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFLoopbackInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_loopback_ospf_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_loopback_ospf_conf.interface_loopback_ospf_conf, is_container='container', presence=False, yang_name="interface-loopback-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFLoopbackInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__interface_loopback_ospf_conf = t
if hasattr(self, '_set'):
self._set() |
def unreduce(array, shape, axis, keepdims):
"""Reverse summing over a dimension.
Args:
array: The array that was reduced.
shape: The original shape of the array before reduction.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
"""
unreducer = unreducers[type(array)]
return unreducer(array, shape, axis, keepdims) | Reverse summing over a dimension.
Args:
array: The array that was reduced.
shape: The original shape of the array before reduction.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array. | Below is the the instruction that describes the task:
### Input:
Reverse summing over a dimension.
Args:
array: The array that was reduced.
shape: The original shape of the array before reduction.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
### Response:
def unreduce(array, shape, axis, keepdims):
"""Reverse summing over a dimension.
Args:
array: The array that was reduced.
shape: The original shape of the array before reduction.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
"""
unreducer = unreducers[type(array)]
return unreducer(array, shape, axis, keepdims) |
def learn(self, grad_arr, fix_opt_flag=False):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
'''
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, grad_arr)
if fix_opt_flag is False:
self.__lstm_model.optimize(
grads_list,
self.__learning_rate,
1
)
return delta_arr | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients. | Below is the the instruction that describes the task:
### Input:
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
### Response:
def learn(self, grad_arr, fix_opt_flag=False):
'''
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
'''
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, grad_arr)
if fix_opt_flag is False:
self.__lstm_model.optimize(
grads_list,
self.__learning_rate,
1
)
return delta_arr |
def _raise_for_status(response):
"""Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
"""
http_error_msg = ''
if 400 <= response.status_code < 500:
http_error_msg = '%s Client Error: %s for url: %s' % (
response.status_code, response.reason, response.url)
elif 500 <= response.status_code < 600:
http_error_msg = '%s Server Error: %s for url: %s' % (
response.status_code, response.reason, response.url)
if http_error_msg:
spacetrack_error_msg = None
try:
json = response.json()
if isinstance(json, Mapping):
spacetrack_error_msg = json['error']
except (ValueError, KeyError):
pass
if not spacetrack_error_msg:
spacetrack_error_msg = response.text
if spacetrack_error_msg:
http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg
raise requests.HTTPError(http_error_msg, response=response) | Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given. | Below is the the instruction that describes the task:
### Input:
Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
### Response:
def _raise_for_status(response):
"""Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
"""
http_error_msg = ''
if 400 <= response.status_code < 500:
http_error_msg = '%s Client Error: %s for url: %s' % (
response.status_code, response.reason, response.url)
elif 500 <= response.status_code < 600:
http_error_msg = '%s Server Error: %s for url: %s' % (
response.status_code, response.reason, response.url)
if http_error_msg:
spacetrack_error_msg = None
try:
json = response.json()
if isinstance(json, Mapping):
spacetrack_error_msg = json['error']
except (ValueError, KeyError):
pass
if not spacetrack_error_msg:
spacetrack_error_msg = response.text
if spacetrack_error_msg:
http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg
raise requests.HTTPError(http_error_msg, response=response) |
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
from airflow.models.dag import DagModel # Avoid circular import
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id) | Gets the DAG out of the dictionary, and refreshes it if expired | Below is the the instruction that describes the task:
### Input:
Gets the DAG out of the dictionary, and refreshes it if expired
### Response:
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
from airflow.models.dag import DagModel # Avoid circular import
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id) |
def upload(self, remote_path, file_content, ondup=None, **kwargs):
"""上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_content: 上传文件的内容/文件对象 。
(e.g. ``open('foobar', 'rb')`` )
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象
"""
params = {
'path': remote_path,
'ondup': ondup
}
files = {'file': ('file', file_content, '')}
url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file'
return self._request('file', 'upload', url=url, extra_params=params,
files=files, **kwargs) | 上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_content: 上传文件的内容/文件对象 。
(e.g. ``open('foobar', 'rb')`` )
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象 | Below is the the instruction that describes the task:
### Input:
上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_content: 上传文件的内容/文件对象 。
(e.g. ``open('foobar', 'rb')`` )
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象
### Response:
def upload(self, remote_path, file_content, ondup=None, **kwargs):
"""上传单个文件(<2G).
| 百度PCS服务目前支持最大2G的单个文件上传。
| 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。
:param remote_path: 网盘中文件的保存路径(包含文件名)。
必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:param file_content: 上传文件的内容/文件对象 。
(e.g. ``open('foobar', 'rb')`` )
:param ondup: (可选)
* 'overwrite':表示覆盖同名文件;
* 'newcopy':表示生成文件副本并进行重命名,命名规则为“
文件名_日期.后缀”。
:return: Response 对象
"""
params = {
'path': remote_path,
'ondup': ondup
}
files = {'file': ('file', file_content, '')}
url = 'https://c.pcs.baidu.com/rest/2.0/pcs/file'
return self._request('file', 'upload', url=url, extra_params=params,
files=files, **kwargs) |
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id()) | This lets us use a user's username to pull favourite dashboards | Below is the the instruction that describes the task:
### Input:
This lets us use a user's username to pull favourite dashboards
### Response:
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id()) |
def relative_symlink(target, link_name):
"""Make a symlink to target using the shortest possible relative path."""
link_name = os.path.abspath(link_name)
abs_target = os.path.abspath(target)
rel_target = os.path.relpath(target, os.path.dirname(link_name))
if os.path.exists(link_name):
os.remove(link_name)
os.symlink(rel_target, link_name) | Make a symlink to target using the shortest possible relative path. | Below is the the instruction that describes the task:
### Input:
Make a symlink to target using the shortest possible relative path.
### Response:
def relative_symlink(target, link_name):
"""Make a symlink to target using the shortest possible relative path."""
link_name = os.path.abspath(link_name)
abs_target = os.path.abspath(target)
rel_target = os.path.relpath(target, os.path.dirname(link_name))
if os.path.exists(link_name):
os.remove(link_name)
os.symlink(rel_target, link_name) |
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock') | Disconnect an existing publisher socket and remove it from the local
thread's cache. | Below is the the instruction that describes the task:
### Input:
Disconnect an existing publisher socket and remove it from the local
thread's cache.
### Response:
def pub_close(self):
'''
Disconnect an existing publisher socket and remove it from the local
thread's cache.
'''
if hasattr(self._sock_data, 'sock'):
self._sock_data.sock.close()
delattr(self._sock_data, 'sock') |
def trmm(self,B,trans='N'):
r"""
Multiplication with product-form Cholesky factor. Computes
.. math::
B &:= L B \text{ if trans is 'N'}
B &:= L^T B \text{ if trans is 'T'}
"""
if trans=='N':
pftrmm(self._V,self._L,self._B,B,trans='N')
cp.trmm(self._L0,B)
elif trans=='T':
cp.trmm(self._L0,B,trans='T')
pftrmm(self._V,self._L,self._B,B,trans='T')
elif type(trans) is str:
raise ValueError("trans must be 'N' or 'T'")
else:
raise TypeError("trans must be 'N' or 'T'")
return | r"""
Multiplication with product-form Cholesky factor. Computes
.. math::
B &:= L B \text{ if trans is 'N'}
B &:= L^T B \text{ if trans is 'T'} | Below is the the instruction that describes the task:
### Input:
r"""
Multiplication with product-form Cholesky factor. Computes
.. math::
B &:= L B \text{ if trans is 'N'}
B &:= L^T B \text{ if trans is 'T'}
### Response:
def trmm(self,B,trans='N'):
r"""
Multiplication with product-form Cholesky factor. Computes
.. math::
B &:= L B \text{ if trans is 'N'}
B &:= L^T B \text{ if trans is 'T'}
"""
if trans=='N':
pftrmm(self._V,self._L,self._B,B,trans='N')
cp.trmm(self._L0,B)
elif trans=='T':
cp.trmm(self._L0,B,trans='T')
pftrmm(self._V,self._L,self._B,B,trans='T')
elif type(trans) is str:
raise ValueError("trans must be 'N' or 'T'")
else:
raise TypeError("trans must be 'N' or 'T'")
return |
def content_location(self) -> Optional[UnstructuredHeader]:
"""The ``Content-Location`` header."""
try:
return cast(UnstructuredHeader, self[b'content-location'][0])
except (KeyError, IndexError):
return None | The ``Content-Location`` header. | Below is the the instruction that describes the task:
### Input:
The ``Content-Location`` header.
### Response:
def content_location(self) -> Optional[UnstructuredHeader]:
"""The ``Content-Location`` header."""
try:
return cast(UnstructuredHeader, self[b'content-location'][0])
except (KeyError, IndexError):
return None |
def get(key, profile=None):
'''
Get a value from memcached
'''
conn = salt.utils.memcached.get_conn(profile)
return salt.utils.memcached.get(conn, key) | Get a value from memcached | Below is the the instruction that describes the task:
### Input:
Get a value from memcached
### Response:
def get(key, profile=None):
'''
Get a value from memcached
'''
conn = salt.utils.memcached.get_conn(profile)
return salt.utils.memcached.get(conn, key) |
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number)."""
dseg = self.derivative(t)
return dseg/abs(dseg) | returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number). | Below is the the instruction that describes the task:
### Input:
returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number).
### Response:
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number)."""
dseg = self.derivative(t)
return dseg/abs(dseg) |
def rsky_distribution(self,rmax=None,smooth=0.1,nbins=100):
"""
Distribution of projected separations
Returns a :class:`simpledists.Hist_Distribution` object.
:param rmax: (optional)
Maximum radius to calculate distribution.
:param dr: (optional)
Bin width for histogram
:param smooth: (optional)
Smoothing parameter for :class:`simpledists.Hist_Distribution`
:param nbins: (optional)
Number of bins for histogram
:return:
:class:`simpledists.Hist_Distribution` describing Rsky distribution
"""
if rmax is None:
if hasattr(self,'maxrad'):
rmax = self.maxrad
else:
rmax = np.percentile(self.Rsky,99)
dist = dists.Hist_Distribution(self.Rsky.value,bins=nbins,maxval=rmax,smooth=smooth)
return dist | Distribution of projected separations
Returns a :class:`simpledists.Hist_Distribution` object.
:param rmax: (optional)
Maximum radius to calculate distribution.
:param dr: (optional)
Bin width for histogram
:param smooth: (optional)
Smoothing parameter for :class:`simpledists.Hist_Distribution`
:param nbins: (optional)
Number of bins for histogram
:return:
:class:`simpledists.Hist_Distribution` describing Rsky distribution | Below is the the instruction that describes the task:
### Input:
Distribution of projected separations
Returns a :class:`simpledists.Hist_Distribution` object.
:param rmax: (optional)
Maximum radius to calculate distribution.
:param dr: (optional)
Bin width for histogram
:param smooth: (optional)
Smoothing parameter for :class:`simpledists.Hist_Distribution`
:param nbins: (optional)
Number of bins for histogram
:return:
:class:`simpledists.Hist_Distribution` describing Rsky distribution
### Response:
def rsky_distribution(self,rmax=None,smooth=0.1,nbins=100):
"""
Distribution of projected separations
Returns a :class:`simpledists.Hist_Distribution` object.
:param rmax: (optional)
Maximum radius to calculate distribution.
:param dr: (optional)
Bin width for histogram
:param smooth: (optional)
Smoothing parameter for :class:`simpledists.Hist_Distribution`
:param nbins: (optional)
Number of bins for histogram
:return:
:class:`simpledists.Hist_Distribution` describing Rsky distribution
"""
if rmax is None:
if hasattr(self,'maxrad'):
rmax = self.maxrad
else:
rmax = np.percentile(self.Rsky,99)
dist = dists.Hist_Distribution(self.Rsky.value,bins=nbins,maxval=rmax,smooth=smooth)
return dist |
def _join_chemical(query, cas_rn, chemical_id, chemical_name, chemical_definition):
"""helper function to add a query join to Chemical model
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param cas_rn:
:param chemical_id:
:param chemical_name:
:param chemical_definition:
:return: `sqlalchemy.orm.query.Query` object
"""
if cas_rn or chemical_id or chemical_name or chemical_definition:
query = query.join(models.Chemical)
if cas_rn:
query = query.filter(models.Chemical.cas_rn.like(cas_rn))
if chemical_id:
query = query.filter(models.Chemical.chemical_id == chemical_id)
if chemical_name:
query = query.filter(models.Chemical.chemical_name.like(chemical_name))
if chemical_definition:
query = query.filter(models.Chemical.definition.like(chemical_definition))
return query | helper function to add a query join to Chemical model
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param cas_rn:
:param chemical_id:
:param chemical_name:
:param chemical_definition:
:return: `sqlalchemy.orm.query.Query` object | Below is the the instruction that describes the task:
### Input:
helper function to add a query join to Chemical model
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param cas_rn:
:param chemical_id:
:param chemical_name:
:param chemical_definition:
:return: `sqlalchemy.orm.query.Query` object
### Response:
def _join_chemical(query, cas_rn, chemical_id, chemical_name, chemical_definition):
"""helper function to add a query join to Chemical model
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param cas_rn:
:param chemical_id:
:param chemical_name:
:param chemical_definition:
:return: `sqlalchemy.orm.query.Query` object
"""
if cas_rn or chemical_id or chemical_name or chemical_definition:
query = query.join(models.Chemical)
if cas_rn:
query = query.filter(models.Chemical.cas_rn.like(cas_rn))
if chemical_id:
query = query.filter(models.Chemical.chemical_id == chemical_id)
if chemical_name:
query = query.filter(models.Chemical.chemical_name.like(chemical_name))
if chemical_definition:
query = query.filter(models.Chemical.definition.like(chemical_definition))
return query |
def get_manager(self, model):
"""
Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client.
"""
if isinstance(model, six.string_types):
# undocumented string lookup
for k, m in self._manager_map.items():
if k.__name__ == model:
return m
else:
raise KeyError(model)
return self._manager_map[model] | Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client. | Below is the the instruction that describes the task:
### Input:
Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client.
### Response:
def get_manager(self, model):
"""
Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client.
"""
if isinstance(model, six.string_types):
# undocumented string lookup
for k, m in self._manager_map.items():
if k.__name__ == model:
return m
else:
raise KeyError(model)
return self._manager_map[model] |
def addField(self, name, label, type=None, draw=None, info=None, #@ReservedAssignment
extinfo=None, colour=None, negative=None, graph=None,
min=None, max=None, cdef=None, line=None, #@ReservedAssignment
warning=None, critical=None):
"""Add field to Munin Graph
@param name: Field Name
@param label: Field Label
@param type: Stat Type:
'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE'
@param draw: Graph Type:
'AREA' / 'LINE{1,2,3}' /
'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK'
@param info: Detailed Field Info
@param extinfo: Extended Field Info
@param colour: Field Colour
@param negative: Mirror Value
@param graph: Draw on Graph - True / False (Default: True)
@param min: Minimum Valid Value
@param max: Maximum Valid Value
@param cdef: CDEF
@param line: Adds horizontal line at value defined for field.
@param warning: Warning Value
@param critical: Critical Value
"""
if self._autoFixNames:
name = self._fixName(name)
if negative is not None:
negative = self._fixName(negative)
self._fieldAttrDict[name] = dict(((k,v) for (k,v) in locals().iteritems()
if (v is not None
and k not in ('self',))))
self._fieldNameList.append(name) | Add field to Munin Graph
@param name: Field Name
@param label: Field Label
@param type: Stat Type:
'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE'
@param draw: Graph Type:
'AREA' / 'LINE{1,2,3}' /
'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK'
@param info: Detailed Field Info
@param extinfo: Extended Field Info
@param colour: Field Colour
@param negative: Mirror Value
@param graph: Draw on Graph - True / False (Default: True)
@param min: Minimum Valid Value
@param max: Maximum Valid Value
@param cdef: CDEF
@param line: Adds horizontal line at value defined for field.
@param warning: Warning Value
@param critical: Critical Value | Below is the the instruction that describes the task:
### Input:
Add field to Munin Graph
@param name: Field Name
@param label: Field Label
@param type: Stat Type:
'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE'
@param draw: Graph Type:
'AREA' / 'LINE{1,2,3}' /
'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK'
@param info: Detailed Field Info
@param extinfo: Extended Field Info
@param colour: Field Colour
@param negative: Mirror Value
@param graph: Draw on Graph - True / False (Default: True)
@param min: Minimum Valid Value
@param max: Maximum Valid Value
@param cdef: CDEF
@param line: Adds horizontal line at value defined for field.
@param warning: Warning Value
@param critical: Critical Value
### Response:
def addField(self, name, label, type=None, draw=None, info=None, #@ReservedAssignment
extinfo=None, colour=None, negative=None, graph=None,
min=None, max=None, cdef=None, line=None, #@ReservedAssignment
warning=None, critical=None):
"""Add field to Munin Graph
@param name: Field Name
@param label: Field Label
@param type: Stat Type:
'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE'
@param draw: Graph Type:
'AREA' / 'LINE{1,2,3}' /
'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK'
@param info: Detailed Field Info
@param extinfo: Extended Field Info
@param colour: Field Colour
@param negative: Mirror Value
@param graph: Draw on Graph - True / False (Default: True)
@param min: Minimum Valid Value
@param max: Maximum Valid Value
@param cdef: CDEF
@param line: Adds horizontal line at value defined for field.
@param warning: Warning Value
@param critical: Critical Value
"""
if self._autoFixNames:
name = self._fixName(name)
if negative is not None:
negative = self._fixName(negative)
self._fieldAttrDict[name] = dict(((k,v) for (k,v) in locals().iteritems()
if (v is not None
and k not in ('self',))))
self._fieldNameList.append(name) |
def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale | Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor | Below is the the instruction that describes the task:
### Input:
Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
### Response:
def get_sub_dim(src_ds, scale=None, maxdim=1024):
"""Compute dimensions of subsampled dataset
Parameters
----------
ds : gdal.Dataset
Input GDAL Datset
scale : int, optional
Scaling factor
maxdim : int, optional
Maximum dimension along either axis, in pixels
Returns
-------
ns
Numper of samples in subsampled output
nl
Numper of lines in subsampled output
scale
Final scaling factor
"""
ns = src_ds.RasterXSize
nl = src_ds.RasterYSize
maxdim = float(maxdim)
if scale is None:
scale_ns = ns/maxdim
scale_nl = nl/maxdim
scale = max(scale_ns, scale_nl)
#Need to check to make sure scale is positive real
if scale > 1:
ns = int(round(ns/scale))
nl = int(round(nl/scale))
return ns, nl, scale |
def make_inst1():
"""creates example data set 1"""
I,d = multidict({1:80, 2:270, 3:250 , 4:160, 5:180}) # demand
J,M = multidict({1:500, 2:500, 3:500}) # capacity
c = {(1,1):4, (1,2):6, (1,3):9, # cost
(2,1):5, (2,2):4, (2,3):7,
(3,1):6, (3,2):3, (3,3):4,
(4,1):8, (4,2):5, (4,3):3,
(5,1):10, (5,2):8, (5,3):4,
}
return I,J,c,d,M | creates example data set 1 | Below is the the instruction that describes the task:
### Input:
creates example data set 1
### Response:
def make_inst1():
"""creates example data set 1"""
I,d = multidict({1:80, 2:270, 3:250 , 4:160, 5:180}) # demand
J,M = multidict({1:500, 2:500, 3:500}) # capacity
c = {(1,1):4, (1,2):6, (1,3):9, # cost
(2,1):5, (2,2):4, (2,3):7,
(3,1):6, (3,2):3, (3,3):4,
(4,1):8, (4,2):5, (4,3):3,
(5,1):10, (5,2):8, (5,3):4,
}
return I,J,c,d,M |
def _get_solver(self, **kwargs):
"""Return a new :class:`psamm.lpsolver.lp.Solver` instance"""
solver_args = dict(kwargs)
solver_args.update(self._solver_args)
return generic.Solver(**solver_args) | Return a new :class:`psamm.lpsolver.lp.Solver` instance | Below is the the instruction that describes the task:
### Input:
Return a new :class:`psamm.lpsolver.lp.Solver` instance
### Response:
def _get_solver(self, **kwargs):
"""Return a new :class:`psamm.lpsolver.lp.Solver` instance"""
solver_args = dict(kwargs)
solver_args.update(self._solver_args)
return generic.Solver(**solver_args) |
def image_create(self, disk, label=None, description=None):
"""
Creates a new Image from a disk you own.
:param disk: The Disk to imagize.
:type disk: Disk or int
:param label: The label for the resulting Image (defaults to the disk's
label.
:type label: str
:param description: The description for the new Image.
:type description: str
:returns: The new Image.
:rtype: Image
"""
params = {
"disk_id": disk.id if issubclass(type(disk), Base) else disk,
}
if label is not None:
params["label"] = label
if description is not None:
params["description"] = description
result = self.post('/images', data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when creating an '
'Image from disk {}'.format(disk))
return Image(self, result['id'], result) | Creates a new Image from a disk you own.
:param disk: The Disk to imagize.
:type disk: Disk or int
:param label: The label for the resulting Image (defaults to the disk's
label.
:type label: str
:param description: The description for the new Image.
:type description: str
:returns: The new Image.
:rtype: Image | Below is the the instruction that describes the task:
### Input:
Creates a new Image from a disk you own.
:param disk: The Disk to imagize.
:type disk: Disk or int
:param label: The label for the resulting Image (defaults to the disk's
label.
:type label: str
:param description: The description for the new Image.
:type description: str
:returns: The new Image.
:rtype: Image
### Response:
def image_create(self, disk, label=None, description=None):
"""
Creates a new Image from a disk you own.
:param disk: The Disk to imagize.
:type disk: Disk or int
:param label: The label for the resulting Image (defaults to the disk's
label.
:type label: str
:param description: The description for the new Image.
:type description: str
:returns: The new Image.
:rtype: Image
"""
params = {
"disk_id": disk.id if issubclass(type(disk), Base) else disk,
}
if label is not None:
params["label"] = label
if description is not None:
params["description"] = description
result = self.post('/images', data=params)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when creating an '
'Image from disk {}'.format(disk))
return Image(self, result['id'], result) |
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, (int, long)):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, (int, long)):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';' | Returns a string representation of the edge in dot language. | Below is the the instruction that describes the task:
### Input:
Returns a string representation of the edge in dot language.
### Response:
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, (int, long)):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, (int, long)):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';' |
def add_device_override(self, addr, cat, subcat, firmware=None):
"""Add a device override to the PLM."""
self.plm.devices.add_override(addr, 'cat', cat)
self.plm.devices.add_override(addr, 'subcat', subcat)
if firmware:
self.plm.devices.add_override(addr, 'firmware', firmware) | Add a device override to the PLM. | Below is the the instruction that describes the task:
### Input:
Add a device override to the PLM.
### Response:
def add_device_override(self, addr, cat, subcat, firmware=None):
"""Add a device override to the PLM."""
self.plm.devices.add_override(addr, 'cat', cat)
self.plm.devices.add_override(addr, 'subcat', subcat)
if firmware:
self.plm.devices.add_override(addr, 'firmware', firmware) |
def get_safe_label(self):
"""Returns a label that is safe to add to a path in the mountpoint for this volume."""
if self.info.get('label') == '/':
return 'root'
suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else ""
if suffix and suffix[0] == '_':
suffix = suffix[1:]
if len(suffix) > 2 and suffix[-1] == '_':
suffix = suffix[:-1]
return suffix | Returns a label that is safe to add to a path in the mountpoint for this volume. | Below is the the instruction that describes the task:
### Input:
Returns a label that is safe to add to a path in the mountpoint for this volume.
### Response:
def get_safe_label(self):
"""Returns a label that is safe to add to a path in the mountpoint for this volume."""
if self.info.get('label') == '/':
return 'root'
suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else ""
if suffix and suffix[0] == '_':
suffix = suffix[1:]
if len(suffix) > 2 and suffix[-1] == '_':
suffix = suffix[:-1]
return suffix |
def objects_reachable_from(obj):
"""
Return graph of objects reachable from *obj* via ``gc.get_referrers``.
Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all
objects reachable from the given one by following the output of
``gc.get_referrers``. Note that unlike the
:func:`~refcycle.creators.snapshot` function, the output graph may
include non-gc-tracked objects.
"""
# Depth-first search.
found = ObjectGraph.vertex_set()
to_process = [obj]
while to_process:
obj = to_process.pop()
found.add(obj)
for referent in gc.get_referents(obj):
if referent not in found:
to_process.append(referent)
return ObjectGraph(found) | Return graph of objects reachable from *obj* via ``gc.get_referrers``.
Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all
objects reachable from the given one by following the output of
``gc.get_referrers``. Note that unlike the
:func:`~refcycle.creators.snapshot` function, the output graph may
include non-gc-tracked objects. | Below is the the instruction that describes the task:
### Input:
Return graph of objects reachable from *obj* via ``gc.get_referrers``.
Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all
objects reachable from the given one by following the output of
``gc.get_referrers``. Note that unlike the
:func:`~refcycle.creators.snapshot` function, the output graph may
include non-gc-tracked objects.
### Response:
def objects_reachable_from(obj):
"""
Return graph of objects reachable from *obj* via ``gc.get_referrers``.
Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all
objects reachable from the given one by following the output of
``gc.get_referrers``. Note that unlike the
:func:`~refcycle.creators.snapshot` function, the output graph may
include non-gc-tracked objects.
"""
# Depth-first search.
found = ObjectGraph.vertex_set()
to_process = [obj]
while to_process:
obj = to_process.pop()
found.add(obj)
for referent in gc.get_referents(obj):
if referent not in found:
to_process.append(referent)
return ObjectGraph(found) |
def zpoppush(self, source, destination, count, score, new_score,
client=None, withscores=False, on_success=None,
if_exists=None):
"""
Pops the first ``count`` members from the ZSET ``source`` and adds them
to the ZSET ``destination`` with a score of ``new_score``. If ``score``
is not None, only members up to a score of ``score`` are used. Returns
the members that were moved and, if ``withscores`` is True, their
original scores.
If items were moved, the action defined in ``on_success`` is executed.
The only implemented option is a tuple in the form ('update_sets',
``set_value``, ``remove_from_set``, ``add_to_set``
[, ``add_to_set_if_exists``]).
If no items are left in the ``source`` ZSET, the ``set_value`` is
removed from ``remove_from_set``. If any items were moved to the
``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If
any items were moved to the ``if_exists_key`` ZSET (see below), the
``set_value`` is added to the ``add_to_set_if_exists`` set.
If ``if_exists`` is specified as a tuple ('add', if_exists_key,
if_exists_score, if_exists_mode), then members that are already in the
``destination`` set will not be returned or updated, but they will be
added to a ZSET ``if_exists_key`` with a score of ``if_exists_score``
and the given behavior specified in ``if_exists_mode`` for members that
already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be
one of the following:
- "nx": Don't update the score
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
If ``if_exists`` is specified as a tuple ('noupdate',), then no action
will be taken for members that are already in the ``destination`` ZSET
(their score will not be updated).
"""
if score is None:
score = '+inf' # Include all elements.
if withscores:
if on_success:
raise NotImplementedError()
return self._zpoppush_withscores(
keys=[source, destination],
args=[score, count, new_score],
client=client)
else:
if if_exists and if_exists[0] == 'add':
_, if_exists_key, if_exists_score, if_exists_mode = if_exists
if if_exists_mode != 'min':
raise NotImplementedError()
if not on_success or on_success[0] != 'update_sets':
raise NotImplementedError()
set_value, remove_from_set, add_to_set, add_to_set_if_exists \
= on_success[1:]
return self._zpoppush_exists_min_update_sets(
keys=[source, destination, remove_from_set, add_to_set,
add_to_set_if_exists, if_exists_key],
args=[score, count, new_score, set_value, if_exists_score],
)
elif if_exists and if_exists[0] == 'noupdate':
if not on_success or on_success[0] != 'update_sets':
raise NotImplementedError()
set_value, remove_from_set, add_to_set \
= on_success[1:]
return self._zpoppush_exists_ignore_update_sets(
keys=[source, destination, remove_from_set, add_to_set],
args=[score, count, new_score, set_value],
)
if on_success:
if on_success[0] != 'update_sets':
raise NotImplementedError()
else:
set_value, remove_from_set, add_to_set = on_success[1:]
return self._zpoppush_update_sets(
keys=[source, destination, remove_from_set, add_to_set],
args=[score, count, new_score, set_value],
client=client)
else:
return self._zpoppush(
keys=[source, destination],
args=[score, count, new_score],
client=client) | Pops the first ``count`` members from the ZSET ``source`` and adds them
to the ZSET ``destination`` with a score of ``new_score``. If ``score``
is not None, only members up to a score of ``score`` are used. Returns
the members that were moved and, if ``withscores`` is True, their
original scores.
If items were moved, the action defined in ``on_success`` is executed.
The only implemented option is a tuple in the form ('update_sets',
``set_value``, ``remove_from_set``, ``add_to_set``
[, ``add_to_set_if_exists``]).
If no items are left in the ``source`` ZSET, the ``set_value`` is
removed from ``remove_from_set``. If any items were moved to the
``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If
any items were moved to the ``if_exists_key`` ZSET (see below), the
``set_value`` is added to the ``add_to_set_if_exists`` set.
If ``if_exists`` is specified as a tuple ('add', if_exists_key,
if_exists_score, if_exists_mode), then members that are already in the
``destination`` set will not be returned or updated, but they will be
added to a ZSET ``if_exists_key`` with a score of ``if_exists_score``
and the given behavior specified in ``if_exists_mode`` for members that
already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be
one of the following:
- "nx": Don't update the score
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
If ``if_exists`` is specified as a tuple ('noupdate',), then no action
will be taken for members that are already in the ``destination`` ZSET
(their score will not be updated). | Below is the the instruction that describes the task:
### Input:
Pops the first ``count`` members from the ZSET ``source`` and adds them
to the ZSET ``destination`` with a score of ``new_score``. If ``score``
is not None, only members up to a score of ``score`` are used. Returns
the members that were moved and, if ``withscores`` is True, their
original scores.
If items were moved, the action defined in ``on_success`` is executed.
The only implemented option is a tuple in the form ('update_sets',
``set_value``, ``remove_from_set``, ``add_to_set``
[, ``add_to_set_if_exists``]).
If no items are left in the ``source`` ZSET, the ``set_value`` is
removed from ``remove_from_set``. If any items were moved to the
``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If
any items were moved to the ``if_exists_key`` ZSET (see below), the
``set_value`` is added to the ``add_to_set_if_exists`` set.
If ``if_exists`` is specified as a tuple ('add', if_exists_key,
if_exists_score, if_exists_mode), then members that are already in the
``destination`` set will not be returned or updated, but they will be
added to a ZSET ``if_exists_key`` with a score of ``if_exists_score``
and the given behavior specified in ``if_exists_mode`` for members that
already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be
one of the following:
- "nx": Don't update the score
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
If ``if_exists`` is specified as a tuple ('noupdate',), then no action
will be taken for members that are already in the ``destination`` ZSET
(their score will not be updated).
### Response:
def zpoppush(self, source, destination, count, score, new_score,
client=None, withscores=False, on_success=None,
if_exists=None):
"""
Pops the first ``count`` members from the ZSET ``source`` and adds them
to the ZSET ``destination`` with a score of ``new_score``. If ``score``
is not None, only members up to a score of ``score`` are used. Returns
the members that were moved and, if ``withscores`` is True, their
original scores.
If items were moved, the action defined in ``on_success`` is executed.
The only implemented option is a tuple in the form ('update_sets',
``set_value``, ``remove_from_set``, ``add_to_set``
[, ``add_to_set_if_exists``]).
If no items are left in the ``source`` ZSET, the ``set_value`` is
removed from ``remove_from_set``. If any items were moved to the
``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If
any items were moved to the ``if_exists_key`` ZSET (see below), the
``set_value`` is added to the ``add_to_set_if_exists`` set.
If ``if_exists`` is specified as a tuple ('add', if_exists_key,
if_exists_score, if_exists_mode), then members that are already in the
``destination`` set will not be returned or updated, but they will be
added to a ZSET ``if_exists_key`` with a score of ``if_exists_score``
and the given behavior specified in ``if_exists_mode`` for members that
already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be
one of the following:
- "nx": Don't update the score
- "min": Use the smaller of the given and existing score
- "max": Use the larger of the given and existing score
If ``if_exists`` is specified as a tuple ('noupdate',), then no action
will be taken for members that are already in the ``destination`` ZSET
(their score will not be updated).
"""
if score is None:
score = '+inf' # Include all elements.
if withscores:
if on_success:
raise NotImplementedError()
return self._zpoppush_withscores(
keys=[source, destination],
args=[score, count, new_score],
client=client)
else:
if if_exists and if_exists[0] == 'add':
_, if_exists_key, if_exists_score, if_exists_mode = if_exists
if if_exists_mode != 'min':
raise NotImplementedError()
if not on_success or on_success[0] != 'update_sets':
raise NotImplementedError()
set_value, remove_from_set, add_to_set, add_to_set_if_exists \
= on_success[1:]
return self._zpoppush_exists_min_update_sets(
keys=[source, destination, remove_from_set, add_to_set,
add_to_set_if_exists, if_exists_key],
args=[score, count, new_score, set_value, if_exists_score],
)
elif if_exists and if_exists[0] == 'noupdate':
if not on_success or on_success[0] != 'update_sets':
raise NotImplementedError()
set_value, remove_from_set, add_to_set \
= on_success[1:]
return self._zpoppush_exists_ignore_update_sets(
keys=[source, destination, remove_from_set, add_to_set],
args=[score, count, new_score, set_value],
)
if on_success:
if on_success[0] != 'update_sets':
raise NotImplementedError()
else:
set_value, remove_from_set, add_to_set = on_success[1:]
return self._zpoppush_update_sets(
keys=[source, destination, remove_from_set, add_to_set],
args=[score, count, new_score, set_value],
client=client)
else:
return self._zpoppush(
keys=[source, destination],
args=[score, count, new_score],
client=client) |
def load_yaml(fname):
"""Load a YAML file."""
yaml = YAML(typ="safe")
# Compat with HASS
yaml.allow_duplicate_keys = True
# Stub HASS constructors
HassSafeConstructor.name = fname
yaml.Constructor = HassSafeConstructor
with open(fname, encoding="utf-8") as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or {} | Load a YAML file. | Below is the the instruction that describes the task:
### Input:
Load a YAML file.
### Response:
def load_yaml(fname):
"""Load a YAML file."""
yaml = YAML(typ="safe")
# Compat with HASS
yaml.allow_duplicate_keys = True
# Stub HASS constructors
HassSafeConstructor.name = fname
yaml.Constructor = HassSafeConstructor
with open(fname, encoding="utf-8") as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or {} |
def QA_fetch_get_globalfuture_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
14 3 伦敦金属 LM
15 3 伦敦石油 IP
16 3 纽约商品 CO
17 3 纽约石油 NY
18 3 芝加哥谷 CB
19 3 东京工业品 TO
20 3 纽约期货 NB
77 3 新加坡期货 SX
39 3 马来期货 ML
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query(
'market==14 or market==15 or market==16 or market==17 or market==18 or market==19 or market==20 or market==77 or market==39') | [summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
14 3 伦敦金属 LM
15 3 伦敦石油 IP
16 3 纽约商品 CO
17 3 纽约石油 NY
18 3 芝加哥谷 CB
19 3 东京工业品 TO
20 3 纽约期货 NB
77 3 新加坡期货 SX
39 3 马来期货 ML | Below is the the instruction that describes the task:
### Input:
[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
14 3 伦敦金属 LM
15 3 伦敦石油 IP
16 3 纽约商品 CO
17 3 纽约石油 NY
18 3 芝加哥谷 CB
19 3 东京工业品 TO
20 3 纽约期货 NB
77 3 新加坡期货 SX
39 3 马来期货 ML
### Response:
def QA_fetch_get_globalfuture_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
14 3 伦敦金属 LM
15 3 伦敦石油 IP
16 3 纽约商品 CO
17 3 纽约石油 NY
18 3 芝加哥谷 CB
19 3 东京工业品 TO
20 3 纽约期货 NB
77 3 新加坡期货 SX
39 3 马来期货 ML
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query(
'market==14 or market==15 or market==16 or market==17 or market==18 or market==19 or market==20 or market==77 or market==39') |
def ask_user(prompt: str, default: str = None) -> Optional[str]:
"""
Prompts the user, with a default. Returns user input from ``stdin``.
"""
if default is None:
prompt += ": "
else:
prompt += " [" + default + "]: "
result = input(prompt)
return result if len(result) > 0 else default | Prompts the user, with a default. Returns user input from ``stdin``. | Below is the the instruction that describes the task:
### Input:
Prompts the user, with a default. Returns user input from ``stdin``.
### Response:
def ask_user(prompt: str, default: str = None) -> Optional[str]:
"""
Prompts the user, with a default. Returns user input from ``stdin``.
"""
if default is None:
prompt += ": "
else:
prompt += " [" + default + "]: "
result = input(prompt)
return result if len(result) > 0 else default |
def is_real(arg):
'''
is_real(x) yields True if x is a non-complex numeric object and False otherwise.
Note that is_real(i) will yield True for an integer or bool i; to check for floating-point
representations of numbers, use is_array(x, numpy.floating) or similar.
'''
return (is_real(mag(arg)) if is_quantity(arg) else
True if isinstance(arg, float) else
is_npscalar(arg, 'real') or is_npvalue(arg, 'real')) | is_real(x) yields True if x is a non-complex numeric object and False otherwise.
Note that is_real(i) will yield True for an integer or bool i; to check for floating-point
representations of numbers, use is_array(x, numpy.floating) or similar. | Below is the the instruction that describes the task:
### Input:
is_real(x) yields True if x is a non-complex numeric object and False otherwise.
Note that is_real(i) will yield True for an integer or bool i; to check for floating-point
representations of numbers, use is_array(x, numpy.floating) or similar.
### Response:
def is_real(arg):
'''
is_real(x) yields True if x is a non-complex numeric object and False otherwise.
Note that is_real(i) will yield True for an integer or bool i; to check for floating-point
representations of numbers, use is_array(x, numpy.floating) or similar.
'''
return (is_real(mag(arg)) if is_quantity(arg) else
True if isinstance(arg, float) else
is_npscalar(arg, 'real') or is_npvalue(arg, 'real')) |
def _scalar_coef_op_left(func):
"""decorator for operator overloading when ScalarCoef is on the
left"""
@wraps(func)
def verif(self, scoef):
if isinstance(scoef, ScalarCoefs):
if len(self._vec) == len(scoef._vec):
return ScalarCoefs(func(self, self._vec, scoef._vec),
self.nmax,
self.mmax)
else:
raise ValueError(err_msg['SC_sz_msmtch'] % \
(self.nmax, self.mmax,
scoef.nmax, scoef.mmax))
elif isinstance(scoef, numbers.Number):
return ScalarCoefs(func(self, self._vec, scoef), self.nmax,
self.mmax)
else:
raise TypeError(err_msg['no_combi_SC'])
return verif | decorator for operator overloading when ScalarCoef is on the
left | Below is the the instruction that describes the task:
### Input:
decorator for operator overloading when ScalarCoef is on the
left
### Response:
def _scalar_coef_op_left(func):
"""decorator for operator overloading when ScalarCoef is on the
left"""
@wraps(func)
def verif(self, scoef):
if isinstance(scoef, ScalarCoefs):
if len(self._vec) == len(scoef._vec):
return ScalarCoefs(func(self, self._vec, scoef._vec),
self.nmax,
self.mmax)
else:
raise ValueError(err_msg['SC_sz_msmtch'] % \
(self.nmax, self.mmax,
scoef.nmax, scoef.mmax))
elif isinstance(scoef, numbers.Number):
return ScalarCoefs(func(self, self._vec, scoef), self.nmax,
self.mmax)
else:
raise TypeError(err_msg['no_combi_SC'])
return verif |
def get_resources_nodes(call=None, resFilter=None):
'''
Retrieve all hypervisors (nodes) available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_nodes my-proxmox-config
'''
log.debug('Getting resource: nodes.. (filter: %s)', resFilter)
resources = query('get', 'cluster/resources')
ret = {}
for resource in resources:
if 'type' in resource and resource['type'] == 'node':
name = resource['node']
ret[name] = resource
if resFilter is not None:
log.debug('Filter given: %s, returning requested '
'resource: nodes', resFilter)
return ret[resFilter]
log.debug('Filter not given: %s, returning all resource: nodes', ret)
return ret | Retrieve all hypervisors (nodes) available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_nodes my-proxmox-config | Below is the the instruction that describes the task:
### Input:
Retrieve all hypervisors (nodes) available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_nodes my-proxmox-config
### Response:
def get_resources_nodes(call=None, resFilter=None):
'''
Retrieve all hypervisors (nodes) available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_nodes my-proxmox-config
'''
log.debug('Getting resource: nodes.. (filter: %s)', resFilter)
resources = query('get', 'cluster/resources')
ret = {}
for resource in resources:
if 'type' in resource and resource['type'] == 'node':
name = resource['node']
ret[name] = resource
if resFilter is not None:
log.debug('Filter given: %s, returning requested '
'resource: nodes', resFilter)
return ret[resFilter]
log.debug('Filter not given: %s, returning all resource: nodes', ret)
return ret |
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField | Returns an encoder for a boolean field. | Below is the the instruction that describes the task:
### Input:
Returns an encoder for a boolean field.
### Response:
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField |
def export_records(self, bucket_name, export_path='', overwrite=True):
'''
a method to export all the records from a bucket to local files
:param bucket_name: string with name of bucket
:param export_path: [optional] string with path to root directory for record dump
:param overwrite: [optional] boolean to overwrite existing files matching records
:return: True
'''
title = '%s.export_records' % self.__class__.__name__
# validate inputs
input_fields = {
'bucket_name': bucket_name,
'export_path': export_path
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate path
from os import path, makedirs
if not export_path:
export_path = './'
if not path.exists(export_path):
raise ValueError('%s(export_path="%s") is not a valid path.' % (title, export_path))
elif not path.isdir(export_path):
raise ValueError('%s(export_path="%s") must be a directory.' % (title, export_path))
# verify existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name))
# retrieve list of records in bucket
record_list, next_key = self.list_records(bucket_name)
if next_key:
record_number = 'first %s' % str(len(record_list))
else:
record_number = str(len(record_list))
plural = ''
if len(record_list) != 1:
plural = 's'
self.iam.printer('Exporting %s record%s from bucket "%s" to path "%s"' % (record_number, plural, bucket_name, export_path), flush=True)
# define local save function
def save_to_file(_export_path, _bucket_name, _record_key, _overwrite):
try:
response = self.connection.get_object(Bucket=_bucket_name,Key=_record_key)
except:
raise AWSConnectionError(title)
record_data = response['Body'].read()
file_path = path.join(_export_path, _record_key)
dir_path = path.dirname(file_path)
if not path.exists(dir_path):
makedirs(dir_path)
if path.exists(file_path) and not _overwrite:
self.iam.printer('.\n%s already exists. File skipped. Continuing.' % file_path, flush=True)
else:
with open(file_path, 'wb') as file:
file.write(record_data)
file.close()
self.iam.printer('.', flush=True)
# retrieve data for records in bucket
for record in record_list:
save_to_file(export_path, bucket_name, record['key'], overwrite)
# continue exporting records in bucket until all exported
if next_key:
while next_key or record_list:
record_list, next_key = self.list_records(bucket_name)
if next_key:
record_number = 'next %s' % str(len(record_list))
else:
record_number = 'last %s' % str(len(record_list))
self.iam.printer('.')
plural = ''
if len(record_list) != 1:
plural = 's'
self.iam.printer('Exporting %s record%s from bucket "%s" to path "%s"' % (record_number, plural, bucket_name, export_path), flush=True)
for record in record_list:
save_to_file(export_path, bucket_name, record['key'], overwrite)
# report completion and return true
self.iam.printer(' done.')
return True | a method to export all the records from a bucket to local files
:param bucket_name: string with name of bucket
:param export_path: [optional] string with path to root directory for record dump
:param overwrite: [optional] boolean to overwrite existing files matching records
:return: True | Below is the the instruction that describes the task:
### Input:
a method to export all the records from a bucket to local files
:param bucket_name: string with name of bucket
:param export_path: [optional] string with path to root directory for record dump
:param overwrite: [optional] boolean to overwrite existing files matching records
:return: True
### Response:
def export_records(self, bucket_name, export_path='', overwrite=True):
'''
a method to export all the records from a bucket to local files
:param bucket_name: string with name of bucket
:param export_path: [optional] string with path to root directory for record dump
:param overwrite: [optional] boolean to overwrite existing files matching records
:return: True
'''
title = '%s.export_records' % self.__class__.__name__
# validate inputs
input_fields = {
'bucket_name': bucket_name,
'export_path': export_path
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate path
from os import path, makedirs
if not export_path:
export_path = './'
if not path.exists(export_path):
raise ValueError('%s(export_path="%s") is not a valid path.' % (title, export_path))
elif not path.isdir(export_path):
raise ValueError('%s(export_path="%s") must be a directory.' % (title, export_path))
# verify existence of bucket
if not bucket_name in self.bucket_list:
if not bucket_name in self.list_buckets():
raise ValueError('S3 bucket "%s" does not exist in aws region %s.' % (bucket_name, self.iam.region_name))
# retrieve list of records in bucket
record_list, next_key = self.list_records(bucket_name)
if next_key:
record_number = 'first %s' % str(len(record_list))
else:
record_number = str(len(record_list))
plural = ''
if len(record_list) != 1:
plural = 's'
self.iam.printer('Exporting %s record%s from bucket "%s" to path "%s"' % (record_number, plural, bucket_name, export_path), flush=True)
# define local save function
def save_to_file(_export_path, _bucket_name, _record_key, _overwrite):
try:
response = self.connection.get_object(Bucket=_bucket_name,Key=_record_key)
except:
raise AWSConnectionError(title)
record_data = response['Body'].read()
file_path = path.join(_export_path, _record_key)
dir_path = path.dirname(file_path)
if not path.exists(dir_path):
makedirs(dir_path)
if path.exists(file_path) and not _overwrite:
self.iam.printer('.\n%s already exists. File skipped. Continuing.' % file_path, flush=True)
else:
with open(file_path, 'wb') as file:
file.write(record_data)
file.close()
self.iam.printer('.', flush=True)
# retrieve data for records in bucket
for record in record_list:
save_to_file(export_path, bucket_name, record['key'], overwrite)
# continue exporting records in bucket until all exported
if next_key:
while next_key or record_list:
record_list, next_key = self.list_records(bucket_name)
if next_key:
record_number = 'next %s' % str(len(record_list))
else:
record_number = 'last %s' % str(len(record_list))
self.iam.printer('.')
plural = ''
if len(record_list) != 1:
plural = 's'
self.iam.printer('Exporting %s record%s from bucket "%s" to path "%s"' % (record_number, plural, bucket_name, export_path), flush=True)
for record in record_list:
save_to_file(export_path, bucket_name, record['key'], overwrite)
# report completion and return true
self.iam.printer(' done.')
return True |
def calling_convention(self):
"""function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values"""
if self._calling_convention is None:
self._calling_convention = \
calldef_types.CALLING_CONVENTION_TYPES.extract(self.attributes)
if not self._calling_convention:
self._calling_convention = self.guess_calling_convention()
return self._calling_convention | function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values | Below is the the instruction that describes the task:
### Input:
function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values
### Response:
def calling_convention(self):
"""function calling convention. See
:class:CALLING_CONVENTION_TYPES class for possible values"""
if self._calling_convention is None:
self._calling_convention = \
calldef_types.CALLING_CONVENTION_TYPES.extract(self.attributes)
if not self._calling_convention:
self._calling_convention = self.guess_calling_convention()
return self._calling_convention |
def merge_infos(info1, info2):
"""We often need to aggregate together multiple infos. Most keys can
just be clobbered by the new info, but e.g. any keys which contain
counts should be added. The merge schema is indicated by the key
namespace.
Namespaces:
- stats.timers: Timing
- stats.gauges: Gauge values
- stats.*: Counts of a quantity
"""
for key, value in six.iteritems(info2):
if key in info1 and key.startswith('stats'):
if key.startswith('stats.timers'):
# timer
info1[key] += value
elif key.startswith('stats.gauges'):
# gauge
info1[key] = value
else:
# counter
info1[key] += value
else:
info1[key] = value | We often need to aggregate together multiple infos. Most keys can
just be clobbered by the new info, but e.g. any keys which contain
counts should be added. The merge schema is indicated by the key
namespace.
Namespaces:
- stats.timers: Timing
- stats.gauges: Gauge values
- stats.*: Counts of a quantity | Below is the the instruction that describes the task:
### Input:
We often need to aggregate together multiple infos. Most keys can
just be clobbered by the new info, but e.g. any keys which contain
counts should be added. The merge schema is indicated by the key
namespace.
Namespaces:
- stats.timers: Timing
- stats.gauges: Gauge values
- stats.*: Counts of a quantity
### Response:
def merge_infos(info1, info2):
"""We often need to aggregate together multiple infos. Most keys can
just be clobbered by the new info, but e.g. any keys which contain
counts should be added. The merge schema is indicated by the key
namespace.
Namespaces:
- stats.timers: Timing
- stats.gauges: Gauge values
- stats.*: Counts of a quantity
"""
for key, value in six.iteritems(info2):
if key in info1 and key.startswith('stats'):
if key.startswith('stats.timers'):
# timer
info1[key] += value
elif key.startswith('stats.gauges'):
# gauge
info1[key] = value
else:
# counter
info1[key] += value
else:
info1[key] = value |
def datetime(past=True, random=random):
"""
Returns a random datetime from the past... or the future!
>>> mock_random.seed(0)
>>> datetime(random=mock_random).isoformat()
'1950-02-03T03:04:05'
"""
def year():
if past:
return random.choice(range(1950,2005))
else:
return _datetime.datetime.now().year + random.choice(range(1, 50))
def month():
return random.choice(range(1,12))
def day():
return random.choice(range(1,31))
def hour():
return random.choice(range(0,23))
def minute():
return random.choice(range(0,59))
def second():
return random.choice(range(0,59))
try:
return _datetime.datetime(year=year(),
month=month(),
day=day(),
hour=hour(),
minute=minute(),
second=second())
except ValueError:
return datetime(past=past) | Returns a random datetime from the past... or the future!
>>> mock_random.seed(0)
>>> datetime(random=mock_random).isoformat()
'1950-02-03T03:04:05' | Below is the the instruction that describes the task:
### Input:
Returns a random datetime from the past... or the future!
>>> mock_random.seed(0)
>>> datetime(random=mock_random).isoformat()
'1950-02-03T03:04:05'
### Response:
def datetime(past=True, random=random):
"""
Returns a random datetime from the past... or the future!
>>> mock_random.seed(0)
>>> datetime(random=mock_random).isoformat()
'1950-02-03T03:04:05'
"""
def year():
if past:
return random.choice(range(1950,2005))
else:
return _datetime.datetime.now().year + random.choice(range(1, 50))
def month():
return random.choice(range(1,12))
def day():
return random.choice(range(1,31))
def hour():
return random.choice(range(0,23))
def minute():
return random.choice(range(0,59))
def second():
return random.choice(range(0,59))
try:
return _datetime.datetime(year=year(),
month=month(),
day=day(),
hour=hour(),
minute=minute(),
second=second())
except ValueError:
return datetime(past=past) |
def make_parser():
"""
Make an ArgumentParser that accepts DNS RRs
"""
line_parser = ZonefileLineParser()
subparsers = line_parser.add_subparsers()
# parse $ORIGIN
sp = subparsers.add_parser("$ORIGIN")
sp.add_argument("$ORIGIN", type=str)
# parse $TTL
sp = subparsers.add_parser("$TTL")
sp.add_argument("$TTL", type=int)
# parse each RR
args_and_types = [
("mname", str), ("rname", str), ("serial", int), ("refresh", int),
("retry", int), ("expire", int), ("minimum", int)
]
make_rr_subparser(subparsers, "SOA", args_and_types)
make_rr_subparser(subparsers, "NS", [("host", str)])
make_rr_subparser(subparsers, "A", [("ip", str)])
make_rr_subparser(subparsers, "AAAA", [("ip", str)])
make_rr_subparser(subparsers, "CNAME", [("alias", str)])
make_rr_subparser(subparsers, "ALIAS", [("host", str)])
make_rr_subparser(subparsers, "MX", [("preference", str), ("host", str)])
make_txt_subparser(subparsers)
make_rr_subparser(subparsers, "PTR", [("host", str)])
make_rr_subparser(subparsers, "SRV", [("priority", int), ("weight", int), ("port", int), ("target", str)])
make_rr_subparser(subparsers, "SPF", [("data", str)])
make_rr_subparser(subparsers, "URI", [("priority", int), ("weight", int), ("target", str)])
return line_parser | Make an ArgumentParser that accepts DNS RRs | Below is the the instruction that describes the task:
### Input:
Make an ArgumentParser that accepts DNS RRs
### Response:
def make_parser():
"""
Make an ArgumentParser that accepts DNS RRs
"""
line_parser = ZonefileLineParser()
subparsers = line_parser.add_subparsers()
# parse $ORIGIN
sp = subparsers.add_parser("$ORIGIN")
sp.add_argument("$ORIGIN", type=str)
# parse $TTL
sp = subparsers.add_parser("$TTL")
sp.add_argument("$TTL", type=int)
# parse each RR
args_and_types = [
("mname", str), ("rname", str), ("serial", int), ("refresh", int),
("retry", int), ("expire", int), ("minimum", int)
]
make_rr_subparser(subparsers, "SOA", args_and_types)
make_rr_subparser(subparsers, "NS", [("host", str)])
make_rr_subparser(subparsers, "A", [("ip", str)])
make_rr_subparser(subparsers, "AAAA", [("ip", str)])
make_rr_subparser(subparsers, "CNAME", [("alias", str)])
make_rr_subparser(subparsers, "ALIAS", [("host", str)])
make_rr_subparser(subparsers, "MX", [("preference", str), ("host", str)])
make_txt_subparser(subparsers)
make_rr_subparser(subparsers, "PTR", [("host", str)])
make_rr_subparser(subparsers, "SRV", [("priority", int), ("weight", int), ("port", int), ("target", str)])
make_rr_subparser(subparsers, "SPF", [("data", str)])
make_rr_subparser(subparsers, "URI", [("priority", int), ("weight", int), ("target", str)])
return line_parser |
def scope_required(*scopes):
"""
Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
from django.http import HttpResponseBadRequest, HttpResponseForbidden
from .exceptions.base import InvalidRequest, InsufficientScope
from .models import Scope
from .utils import request_error_header
try:
if not hasattr(request, "access_token"):
raise CredentialsNotProvided()
access_token = request.access_token
for scope_name in scopes:
try:
scope = access_token.scope.for_short_name(scope_name)
except Scope.DoesNotExist:
raise ScopeNotEnough()
except InvalidRequest as e:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(e)
return response
except InsufficientScope as e:
response = HttpResponseForbidden()
response["WWW-Authenticate"] = request_error_header(e)
return response
return view_func(request, *args, **kwargs)
return _wrapped_view
if scopes and hasattr(scopes[0], "__call__"):
func = scopes[0]
scopes = scopes[1:]
return decorator(func)
return decorator | Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope. | Below is the the instruction that describes the task:
### Input:
Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope.
### Response:
def scope_required(*scopes):
"""
Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
from django.http import HttpResponseBadRequest, HttpResponseForbidden
from .exceptions.base import InvalidRequest, InsufficientScope
from .models import Scope
from .utils import request_error_header
try:
if not hasattr(request, "access_token"):
raise CredentialsNotProvided()
access_token = request.access_token
for scope_name in scopes:
try:
scope = access_token.scope.for_short_name(scope_name)
except Scope.DoesNotExist:
raise ScopeNotEnough()
except InvalidRequest as e:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(e)
return response
except InsufficientScope as e:
response = HttpResponseForbidden()
response["WWW-Authenticate"] = request_error_header(e)
return response
return view_func(request, *args, **kwargs)
return _wrapped_view
if scopes and hasattr(scopes[0], "__call__"):
func = scopes[0]
scopes = scopes[1:]
return decorator(func)
return decorator |
def add_analyses(cls, source):
"""Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work.
"""
if isinstance(source, types.FunctionType):
_add_single_method(source.__name__, source)
else:
if isinstance(source, types.ModuleType):
mod = source
elif isinstance(source, types.StringTypes):
import os
import imp
path = os.path.abspath(source)
if os.path.isfile(path) and path[-3:] == '.py':
dir, file = os.path.split(path)
name = file[:-3]
module_info = imp.find_module(name, [dir])
mod = imp.load_module('nsim.' + name, *module_info)
elif (os.path.isdir(path) and
'__init__.py' in os.listdir(path)):
module_info = imp.find_module('__init__', [path])
name = os.path.basename(path)
mod = imp.load_module('nsim.' + name, *module_info)
else:
raise Error('"%s" is not a file or directory' % source)
else:
raise ValueError('`source` argument not a function or module')
for name, obj in mod.__dict__.items():
if name[0] != '_' and isinstance(obj, types.FunctionType):
cls._add_single_method(name, obj) | Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work. | Below is the the instruction that describes the task:
### Input:
Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work.
### Response:
def add_analyses(cls, source):
"""Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work.
"""
if isinstance(source, types.FunctionType):
_add_single_method(source.__name__, source)
else:
if isinstance(source, types.ModuleType):
mod = source
elif isinstance(source, types.StringTypes):
import os
import imp
path = os.path.abspath(source)
if os.path.isfile(path) and path[-3:] == '.py':
dir, file = os.path.split(path)
name = file[:-3]
module_info = imp.find_module(name, [dir])
mod = imp.load_module('nsim.' + name, *module_info)
elif (os.path.isdir(path) and
'__init__.py' in os.listdir(path)):
module_info = imp.find_module('__init__', [path])
name = os.path.basename(path)
mod = imp.load_module('nsim.' + name, *module_info)
else:
raise Error('"%s" is not a file or directory' % source)
else:
raise ValueError('`source` argument not a function or module')
for name, obj in mod.__dict__.items():
if name[0] != '_' and isinstance(obj, types.FunctionType):
cls._add_single_method(name, obj) |
def thread_safe(method):
""" wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock """
@functools.wraps(method)
def _locker(self, *args, **kwargs):
assert hasattr(self, 'lock'), \
'thread_safe decorator applied to method {0}.{1}: missing required field {0}.lock'\
.format(self.__class__.__name__, method.__name__)
try:
self.lock.acquire()
return method(self, *args, **kwargs)
finally:
try:
self.lock.release()
except:
sys.stderr.write('Exception on releasing lock at method {0}'.format(method.__name__))
traceback.print_exc(file=sys.stderr)
return _locker | wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock | Below is the the instruction that describes the task:
### Input:
wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock
### Response:
def thread_safe(method):
""" wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock """
@functools.wraps(method)
def _locker(self, *args, **kwargs):
assert hasattr(self, 'lock'), \
'thread_safe decorator applied to method {0}.{1}: missing required field {0}.lock'\
.format(self.__class__.__name__, method.__name__)
try:
self.lock.acquire()
return method(self, *args, **kwargs)
finally:
try:
self.lock.release()
except:
sys.stderr.write('Exception on releasing lock at method {0}'.format(method.__name__))
traceback.print_exc(file=sys.stderr)
return _locker |
def apply(self, event = None):
"""Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config."""
for section in self.config.sections():
# Run through the sections to check all the option values:
for option, o in self.config.config[section].items():
# Check the actual values against the validators and complain if necessary:
if not o['include']:
continue # This value is hidden, so there's no control for it.
control = self._controls[section][option] # Get the actual control for GetValue
try:
value = type(o['value'])(control.GetValue()) # Try and convert the value
except ValueError as msg:
self.displayError(section, option, str(msg)) # Woops, something went wrong
return False # Tells self.onOk not to close the window
problem = None # Set up the problem variable.
try:
problem = o['validate'](value) # See if it passes the test
except Exception as e:
problem = str(e) # The lambda raised an exception.
if problem:
self.displayError(section, option, problem) # It didn't
return False # Tells self.onOk not to close the window
self.config.set(section, option, value) # All clear
return True | Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config. | Below is the the instruction that describes the task:
### Input:
Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config.
### Response:
def apply(self, event = None):
"""Before self.onOk closes the window, it calls this function to sync the config changes from the GUI back to self.config."""
for section in self.config.sections():
# Run through the sections to check all the option values:
for option, o in self.config.config[section].items():
# Check the actual values against the validators and complain if necessary:
if not o['include']:
continue # This value is hidden, so there's no control for it.
control = self._controls[section][option] # Get the actual control for GetValue
try:
value = type(o['value'])(control.GetValue()) # Try and convert the value
except ValueError as msg:
self.displayError(section, option, str(msg)) # Woops, something went wrong
return False # Tells self.onOk not to close the window
problem = None # Set up the problem variable.
try:
problem = o['validate'](value) # See if it passes the test
except Exception as e:
problem = str(e) # The lambda raised an exception.
if problem:
self.displayError(section, option, problem) # It didn't
return False # Tells self.onOk not to close the window
self.config.set(section, option, value) # All clear
return True |
def handler_view(self, request, resource_name, ids=None):
""" Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse
"""
signal_request.send(sender=self, request=request)
time_start = time.time()
self.update_urls(request, resource_name=resource_name, ids=ids)
resource = self.resource_map[resource_name]
allowed_http_methods = resource.Meta.allowed_methods
if request.method not in allowed_http_methods:
response = HttpResponseNotAllowed(
permitted_methods=allowed_http_methods)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
if resource.Meta.authenticators and not (
request.method == "GET" and
resource.Meta.disable_get_authentication):
user = resource.authenticate(request)
if user is None or not user.is_authenticated():
response = HttpResponse("Not Authenticated", status=401)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
kwargs = dict(request=request)
if ids is not None:
kwargs['ids'] = ids.split(",")
try:
if request.method == "GET":
response = self.handler_view_get(resource, **kwargs)
elif request.method == "POST":
response = self.handler_view_post(resource, **kwargs)
elif request.method == "PUT":
response = self.handler_view_put(resource, **kwargs)
elif request.method == "DELETE":
response = self.handler_view_delete(resource, **kwargs)
except JSONAPIError as e:
response = HttpResponse(
json.dumps({"errors": [e.data]}, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=e.status)
signal_response.send(sender=self, request=request, response=response,
duration=time.time() - time_start)
return response | Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse | Below is the the instruction that describes the task:
### Input:
Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse
### Response:
def handler_view(self, request, resource_name, ids=None):
""" Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse
"""
signal_request.send(sender=self, request=request)
time_start = time.time()
self.update_urls(request, resource_name=resource_name, ids=ids)
resource = self.resource_map[resource_name]
allowed_http_methods = resource.Meta.allowed_methods
if request.method not in allowed_http_methods:
response = HttpResponseNotAllowed(
permitted_methods=allowed_http_methods)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
if resource.Meta.authenticators and not (
request.method == "GET" and
resource.Meta.disable_get_authentication):
user = resource.authenticate(request)
if user is None or not user.is_authenticated():
response = HttpResponse("Not Authenticated", status=401)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
kwargs = dict(request=request)
if ids is not None:
kwargs['ids'] = ids.split(",")
try:
if request.method == "GET":
response = self.handler_view_get(resource, **kwargs)
elif request.method == "POST":
response = self.handler_view_post(resource, **kwargs)
elif request.method == "PUT":
response = self.handler_view_put(resource, **kwargs)
elif request.method == "DELETE":
response = self.handler_view_delete(resource, **kwargs)
except JSONAPIError as e:
response = HttpResponse(
json.dumps({"errors": [e.data]}, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=e.status)
signal_response.send(sender=self, request=request, response=response,
duration=time.time() - time_start)
return response |
def _query_nsot(url, headers, device=None):
'''
if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return:
'''
url = urlparse.urljoin(url, 'devices')
ret = {}
if not device:
query = salt.utils.http.query(url, header_dict=headers, decode=True)
else:
url = urlparse.urljoin(url, device)
query = salt.utils.http.query(url, header_dict=headers,
decode=True)
error = query.get('error')
if error:
log.error('can\'t get device(s) from nsot! reason: %s', error)
else:
ret = query['dict']
return ret | if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return: | Below is the the instruction that describes the task:
### Input:
if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return:
### Response:
def _query_nsot(url, headers, device=None):
'''
if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return:
'''
url = urlparse.urljoin(url, 'devices')
ret = {}
if not device:
query = salt.utils.http.query(url, header_dict=headers, decode=True)
else:
url = urlparse.urljoin(url, device)
query = salt.utils.http.query(url, header_dict=headers,
decode=True)
error = query.get('error')
if error:
log.error('can\'t get device(s) from nsot! reason: %s', error)
else:
ret = query['dict']
return ret |
def trivialInput(symbol):
"""
Create a new L{IRichInput} implementation for the given input symbol.
This creates a new type object and is intended to be used at module scope
to define rich input types. Generally, only one use per symbol should be
required. For example::
Apple = trivialInput(Fruit.apple)
@param symbol: A symbol from some state machine's input alphabet.
@return: A new type object usable as a rich input for the given symbol.
@rtype: L{type}
"""
return implementer(IRichInput)(type(
symbol.name.title(), (FancyStrMixin, object), {
"symbol": _symbol(symbol),
})) | Create a new L{IRichInput} implementation for the given input symbol.
This creates a new type object and is intended to be used at module scope
to define rich input types. Generally, only one use per symbol should be
required. For example::
Apple = trivialInput(Fruit.apple)
@param symbol: A symbol from some state machine's input alphabet.
@return: A new type object usable as a rich input for the given symbol.
@rtype: L{type} | Below is the the instruction that describes the task:
### Input:
Create a new L{IRichInput} implementation for the given input symbol.
This creates a new type object and is intended to be used at module scope
to define rich input types. Generally, only one use per symbol should be
required. For example::
Apple = trivialInput(Fruit.apple)
@param symbol: A symbol from some state machine's input alphabet.
@return: A new type object usable as a rich input for the given symbol.
@rtype: L{type}
### Response:
def trivialInput(symbol):
"""
Create a new L{IRichInput} implementation for the given input symbol.
This creates a new type object and is intended to be used at module scope
to define rich input types. Generally, only one use per symbol should be
required. For example::
Apple = trivialInput(Fruit.apple)
@param symbol: A symbol from some state machine's input alphabet.
@return: A new type object usable as a rich input for the given symbol.
@rtype: L{type}
"""
return implementer(IRichInput)(type(
symbol.name.title(), (FancyStrMixin, object), {
"symbol": _symbol(symbol),
})) |
def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
# the iterator must be available to the step when it executes
context['i'] = i
# conditional operators apply to each iteration, so might be an
# iteration run, skips or swallows.
self.run_conditional_decorators(context)
logger.debug(f"foreach: done step {i}")
logger.debug(f"foreach decorator looped {foreach_length} times.")
logger.debug("done") | Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | Below is the the instruction that describes the task:
### Input:
Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
### Response:
def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
# the iterator must be available to the step when it executes
context['i'] = i
# conditional operators apply to each iteration, so might be an
# iteration run, skips or swallows.
self.run_conditional_decorators(context)
logger.debug(f"foreach: done step {i}")
logger.debug(f"foreach decorator looped {foreach_length} times.")
logger.debug("done") |
def mixer(servo1, servo2, mixtype=1, gain=0.5):
'''mix two servos'''
s1 = servo1 - 1500
s2 = servo2 - 1500
v1 = (s1-s2)*gain
v2 = (s1+s2)*gain
if mixtype == 2:
v2 = -v2
elif mixtype == 3:
v1 = -v1
elif mixtype == 4:
v1 = -v1
v2 = -v2
if v1 > 600:
v1 = 600
elif v1 < -600:
v1 = -600
if v2 > 600:
v2 = 600
elif v2 < -600:
v2 = -600
return (1500+v1,1500+v2) | mix two servos | Below is the the instruction that describes the task:
### Input:
mix two servos
### Response:
def mixer(servo1, servo2, mixtype=1, gain=0.5):
'''mix two servos'''
s1 = servo1 - 1500
s2 = servo2 - 1500
v1 = (s1-s2)*gain
v2 = (s1+s2)*gain
if mixtype == 2:
v2 = -v2
elif mixtype == 3:
v1 = -v1
elif mixtype == 4:
v1 = -v1
v2 = -v2
if v1 > 600:
v1 = 600
elif v1 < -600:
v1 = -600
if v2 > 600:
v2 = 600
elif v2 < -600:
v2 = -600
return (1500+v1,1500+v2) |
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs | Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value | Below is the the instruction that describes the task:
### Input:
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
### Response:
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs |
def dump(self):
"""Dump to file"""
# NO Dump file selected -> DO NOTHING
if self.running_config.output_file:
# Determinate file format
_, extension = op.splitext(self.running_config.output_file)
extension = extension.replace(".", "")
if extension not in self.ALLOWED_DUMP_FORMATS:
raise PCException(
f"Extension of dump file is not available. "
f"Allowed extensions are: "
f"{', '.join(self.ALLOWED_DUMP_FORMATS)}")
with open(self.running_config.output_file, "w") as f:
if extension == "csv":
csv_writer = csv.writer(f)
csv_writer.writerow(("# Name",
"CPE",
"CVE",
"Score",
"Summary"))
csv_writer.writerows(self._to_csv())
elif extension == "json":
json.dump(self.results,
f,
indent=4,
sort_keys=True)
elif extension == "raw":
f.write(self._to_table()) | Dump to file | Below is the the instruction that describes the task:
### Input:
Dump to file
### Response:
def dump(self):
"""Dump to file"""
# NO Dump file selected -> DO NOTHING
if self.running_config.output_file:
# Determinate file format
_, extension = op.splitext(self.running_config.output_file)
extension = extension.replace(".", "")
if extension not in self.ALLOWED_DUMP_FORMATS:
raise PCException(
f"Extension of dump file is not available. "
f"Allowed extensions are: "
f"{', '.join(self.ALLOWED_DUMP_FORMATS)}")
with open(self.running_config.output_file, "w") as f:
if extension == "csv":
csv_writer = csv.writer(f)
csv_writer.writerow(("# Name",
"CPE",
"CVE",
"Score",
"Summary"))
csv_writer.writerows(self._to_csv())
elif extension == "json":
json.dump(self.results,
f,
indent=4,
sort_keys=True)
elif extension == "raw":
f.write(self._to_table()) |
def from_record(self, record):
"""
Constructs and returns a sequenced item object, from given ORM object.
"""
kwargs = self.get_field_kwargs(record)
return self.sequenced_item_class(**kwargs) | Constructs and returns a sequenced item object, from given ORM object. | Below is the the instruction that describes the task:
### Input:
Constructs and returns a sequenced item object, from given ORM object.
### Response:
def from_record(self, record):
"""
Constructs and returns a sequenced item object, from given ORM object.
"""
kwargs = self.get_field_kwargs(record)
return self.sequenced_item_class(**kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.