code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def getRfree(self):
'''
Returns an array of size self.AgentCount with self.RfreeNow in every entry.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
'''
RfreeNow = self.RfreeNow*np.ones(self.AgentCount)
return RfreeNow | Returns an array of size self.AgentCount with self.RfreeNow in every entry.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent. | Below is the the instruction that describes the task:
### Input:
Returns an array of size self.AgentCount with self.RfreeNow in every entry.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
### Response:
def getRfree(self):
'''
Returns an array of size self.AgentCount with self.RfreeNow in every entry.
Parameters
----------
None
Returns
-------
RfreeNow : np.array
Array of size self.AgentCount with risk free interest rate for each agent.
'''
RfreeNow = self.RfreeNow*np.ones(self.AgentCount)
return RfreeNow |
def get_bin_query_session(self, proxy):
"""Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.*
"""
if not self.supports_bin_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BinQuerySession(proxy=proxy, runtime=self._runtime) | Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.*
### Response:
def get_bin_query_session(self, proxy):
"""Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.*
"""
if not self.supports_bin_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BinQuerySession(proxy=proxy, runtime=self._runtime) |
def makeStatic():
""" Provide static access to underscore class
"""
p = lambda value: inspect.ismethod(value) or inspect.isfunction(value)
for eachMethod in inspect.getmembers(underscore,
predicate=p):
m = eachMethod[0]
if not hasattr(_, m):
def caller(a):
def execute(*args):
if len(args) == 1:
r = getattr(underscore(args[0]), a)()
elif len(args) > 1:
rargs = args[1:]
r = getattr(underscore(args[0]), a)(*rargs)
else:
r = getattr(underscore([]), a)()
return r
return execute
_.__setattr__(m, caller(m))
# put the class itself as a parameter so that we can use it on outside
_.__setattr__("underscore", underscore)
_.templateSettings = {} | Provide static access to underscore class | Below is the the instruction that describes the task:
### Input:
Provide static access to underscore class
### Response:
def makeStatic():
""" Provide static access to underscore class
"""
p = lambda value: inspect.ismethod(value) or inspect.isfunction(value)
for eachMethod in inspect.getmembers(underscore,
predicate=p):
m = eachMethod[0]
if not hasattr(_, m):
def caller(a):
def execute(*args):
if len(args) == 1:
r = getattr(underscore(args[0]), a)()
elif len(args) > 1:
rargs = args[1:]
r = getattr(underscore(args[0]), a)(*rargs)
else:
r = getattr(underscore([]), a)()
return r
return execute
_.__setattr__(m, caller(m))
# put the class itself as a parameter so that we can use it on outside
_.__setattr__("underscore", underscore)
_.templateSettings = {} |
def get_function_in_models(service, operation):
"""refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
inputs = op_model.input_shape.members
if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
if input_names:
body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names))
else:
body = 'def {}(self)\n'
body += ' # implement here\n'
body += ' return {}\n\n'.format(', '.join(output_names))
return body | refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json | Below is the the instruction that describes the task:
### Input:
refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
### Response:
def get_function_in_models(service, operation):
"""refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
inputs = op_model.input_shape.members
if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
if input_names:
body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names))
else:
body = 'def {}(self)\n'
body += ' # implement here\n'
body += ' return {}\n\n'.format(', '.join(output_names))
return body |
def yield_pair_energies(self, index1, index2):
"""Yields pairs ((s(r_ij), v(bar{r}_ij))"""
d_1 = 1/self.distances[index1, index2]
if self.charges is not None:
c1 = self.charges[index1]
c2 = self.charges[index2]
yield c1*c2*d_1, 1
if self.dipoles is not None:
d_3 = d_1**3
d_5 = d_1**5
delta = self.deltas[index1, index2]
p1 = self.dipoles[index1]
p2 = self.dipoles[index2]
yield d_3*np.dot(p1, p2), 1
yield -3*d_5, np.dot(p1, delta)*np.dot(delta, p2)
if self.charges is not None:
yield c1*d_3, np.dot(p2, delta)
yield c2*d_3, np.dot(p1, -delta) | Yields pairs ((s(r_ij), v(bar{r}_ij)) | Below is the the instruction that describes the task:
### Input:
Yields pairs ((s(r_ij), v(bar{r}_ij))
### Response:
def yield_pair_energies(self, index1, index2):
"""Yields pairs ((s(r_ij), v(bar{r}_ij))"""
d_1 = 1/self.distances[index1, index2]
if self.charges is not None:
c1 = self.charges[index1]
c2 = self.charges[index2]
yield c1*c2*d_1, 1
if self.dipoles is not None:
d_3 = d_1**3
d_5 = d_1**5
delta = self.deltas[index1, index2]
p1 = self.dipoles[index1]
p2 = self.dipoles[index2]
yield d_3*np.dot(p1, p2), 1
yield -3*d_5, np.dot(p1, delta)*np.dot(delta, p2)
if self.charges is not None:
yield c1*d_3, np.dot(p2, delta)
yield c2*d_3, np.dot(p1, -delta) |
def attribute(self, attribute_id, action='GET', params=None):
"""
Gets the attribute from a Group/Indicator or Victim
Args:
action:
params:
attribute_id:
Returns: attribute json
"""
if params is None:
params = {}
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if action == 'GET':
return self.tc_requests.get_attribute(
self.api_type,
self.api_sub_type,
self.unique_id,
attribute_id,
owner=self.owner,
params=params,
)
if action == 'DELETE':
return self.tc_requests.delete_attribute(
self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner
)
self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action])
return None | Gets the attribute from a Group/Indicator or Victim
Args:
action:
params:
attribute_id:
Returns: attribute json | Below is the the instruction that describes the task:
### Input:
Gets the attribute from a Group/Indicator or Victim
Args:
action:
params:
attribute_id:
Returns: attribute json
### Response:
def attribute(self, attribute_id, action='GET', params=None):
"""
Gets the attribute from a Group/Indicator or Victim
Args:
action:
params:
attribute_id:
Returns: attribute json
"""
if params is None:
params = {}
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if action == 'GET':
return self.tc_requests.get_attribute(
self.api_type,
self.api_sub_type,
self.unique_id,
attribute_id,
owner=self.owner,
params=params,
)
if action == 'DELETE':
return self.tc_requests.delete_attribute(
self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner
)
self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action])
return None |
def upload_tree(self):
""" upload_tree: sends processed channel data to server to create tree
Args: None
Returns: link to uploadedchannel
"""
from datetime import datetime
start_time = datetime.now()
root, channel_id = self.add_channel()
self.node_count_dict = {"upload_count": 0, "total_count": self.channel.count()}
config.LOGGER.info("\tPreparing fields...")
self.truncate_fields(self.channel)
self.add_nodes(root, self.channel)
if self.check_failed(print_warning=False):
failed = self.failed_node_builds
self.failed_node_builds = {}
self.reattempt_failed(failed)
self.check_failed()
channel_id, channel_link = self.commit_channel(channel_id)
end_time = datetime.now()
config.LOGGER.info("Upload time: {time}s".format(time=(end_time - start_time).total_seconds()))
return channel_id, channel_link | upload_tree: sends processed channel data to server to create tree
Args: None
Returns: link to uploadedchannel | Below is the the instruction that describes the task:
### Input:
upload_tree: sends processed channel data to server to create tree
Args: None
Returns: link to uploadedchannel
### Response:
def upload_tree(self):
""" upload_tree: sends processed channel data to server to create tree
Args: None
Returns: link to uploadedchannel
"""
from datetime import datetime
start_time = datetime.now()
root, channel_id = self.add_channel()
self.node_count_dict = {"upload_count": 0, "total_count": self.channel.count()}
config.LOGGER.info("\tPreparing fields...")
self.truncate_fields(self.channel)
self.add_nodes(root, self.channel)
if self.check_failed(print_warning=False):
failed = self.failed_node_builds
self.failed_node_builds = {}
self.reattempt_failed(failed)
self.check_failed()
channel_id, channel_link = self.commit_channel(channel_id)
end_time = datetime.now()
config.LOGGER.info("Upload time: {time}s".format(time=(end_time - start_time).total_seconds()))
return channel_id, channel_link |
def get_auth_stdin(refresh_token_filename, manual_login=False):
"""Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
"""
refresh_token_cache = RefreshTokenCache(refresh_token_filename)
return get_auth(
CredentialsPrompt(), refresh_token_cache, manual_login=manual_login
) | Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails. | Below is the the instruction that describes the task:
### Input:
Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
### Response:
def get_auth_stdin(refresh_token_filename, manual_login=False):
"""Simple wrapper for :func:`get_auth` that prompts the user using stdin.
Args:
refresh_token_filename (str): Path to file where refresh token will be
cached.
manual_login (bool): If true, prompt user to log in through a browser
and enter authorization code manually. Defaults to false.
Raises:
GoogleAuthError: If authentication with Google fails.
"""
refresh_token_cache = RefreshTokenCache(refresh_token_filename)
return get_auth(
CredentialsPrompt(), refresh_token_cache, manual_login=manual_login
) |
def foreach(layer, drop_factor=1.0):
"""Map a layer across list items"""
def foreach_fwd(docs, drop=0.0):
sents = []
lengths = []
for doc in docs:
doc_sents = [sent for sent in doc if len(sent)]
subset = [
s for s in doc_sents if numpy.random.random() >= drop * drop_factor
]
if subset:
sents.extend(subset)
lengths.append(len(subset))
else:
numpy.random.shuffle(doc_sents)
sents.append(doc_sents[0])
lengths.append(1)
flat, bp_flat = layer.begin_update(sents, drop=0.0)
output = layer.ops.unflatten(flat, lengths)
def foreach_bwd(d_output, sgd=None):
d_flat = layer.ops.flatten(d_output)
d_sents = bp_flat(d_flat, sgd=sgd)
if d_sents is None:
return d_sents
else:
return layer.ops.unflatten(d_sents, lengths)
return output, foreach_bwd
model = wrap(foreach_fwd, layer)
def _run_foreach_child_hooks(model, X, y):
for layer in model._layers:
for hook in layer.on_data_hooks:
hook(layer, X[0], y[0])
model.on_data_hooks = [_run_foreach_child_hooks]
return model | Map a layer across list items | Below is the the instruction that describes the task:
### Input:
Map a layer across list items
### Response:
def foreach(layer, drop_factor=1.0):
"""Map a layer across list items"""
def foreach_fwd(docs, drop=0.0):
sents = []
lengths = []
for doc in docs:
doc_sents = [sent for sent in doc if len(sent)]
subset = [
s for s in doc_sents if numpy.random.random() >= drop * drop_factor
]
if subset:
sents.extend(subset)
lengths.append(len(subset))
else:
numpy.random.shuffle(doc_sents)
sents.append(doc_sents[0])
lengths.append(1)
flat, bp_flat = layer.begin_update(sents, drop=0.0)
output = layer.ops.unflatten(flat, lengths)
def foreach_bwd(d_output, sgd=None):
d_flat = layer.ops.flatten(d_output)
d_sents = bp_flat(d_flat, sgd=sgd)
if d_sents is None:
return d_sents
else:
return layer.ops.unflatten(d_sents, lengths)
return output, foreach_bwd
model = wrap(foreach_fwd, layer)
def _run_foreach_child_hooks(model, X, y):
for layer in model._layers:
for hook in layer.on_data_hooks:
hook(layer, X[0], y[0])
model.on_data_hooks = [_run_foreach_child_hooks]
return model |
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp) | Save frames of the videos into files. | Below is the the instruction that describes the task:
### Input:
Save frames of the videos into files.
### Response:
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp) |
def FormatArtifacts(self, artifacts):
"""Formats artifacts to desired output format.
Args:
artifacts (list[ArtifactDefinition]): artifact definitions.
Returns:
str: formatted string of artifact definition.
"""
artifact_definitions = [artifact.AsDict() for artifact in artifacts]
json_data = json.dumps(artifact_definitions)
return json_data | Formats artifacts to desired output format.
Args:
artifacts (list[ArtifactDefinition]): artifact definitions.
Returns:
str: formatted string of artifact definition. | Below is the the instruction that describes the task:
### Input:
Formats artifacts to desired output format.
Args:
artifacts (list[ArtifactDefinition]): artifact definitions.
Returns:
str: formatted string of artifact definition.
### Response:
def FormatArtifacts(self, artifacts):
"""Formats artifacts to desired output format.
Args:
artifacts (list[ArtifactDefinition]): artifact definitions.
Returns:
str: formatted string of artifact definition.
"""
artifact_definitions = [artifact.AsDict() for artifact in artifacts]
json_data = json.dumps(artifact_definitions)
return json_data |
def search_filename(fname, lineno, local_first):
""" Search a filename into the list of the include path.
If local_first is true, it will try first in the current directory of
the file being analyzed.
"""
fname = api.utils.sanitize_filename(fname)
i_path = [CURRENT_DIR] + INCLUDEPATH if local_first else list(INCLUDEPATH)
i_path.extend(OPTIONS.include_path.value.split(':') if OPTIONS.include_path.value else [])
if os.path.isabs(fname):
if os.path.isfile(fname):
return fname
else:
for dir_ in i_path:
path = api.utils.sanitize_filename(os.path.join(dir_, fname))
if os.path.exists(path):
return path
error(lineno, "file '%s' not found" % fname)
return '' | Search a filename into the list of the include path.
If local_first is true, it will try first in the current directory of
the file being analyzed. | Below is the the instruction that describes the task:
### Input:
Search a filename into the list of the include path.
If local_first is true, it will try first in the current directory of
the file being analyzed.
### Response:
def search_filename(fname, lineno, local_first):
""" Search a filename into the list of the include path.
If local_first is true, it will try first in the current directory of
the file being analyzed.
"""
fname = api.utils.sanitize_filename(fname)
i_path = [CURRENT_DIR] + INCLUDEPATH if local_first else list(INCLUDEPATH)
i_path.extend(OPTIONS.include_path.value.split(':') if OPTIONS.include_path.value else [])
if os.path.isabs(fname):
if os.path.isfile(fname):
return fname
else:
for dir_ in i_path:
path = api.utils.sanitize_filename(os.path.join(dir_, fname))
if os.path.exists(path):
return path
error(lineno, "file '%s' not found" % fname)
return '' |
def read_raster_no_crs(input_file, indexes=None, gdal_opts=None):
"""
Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
with rasterio.Env(
**get_gdal_options(
gdal_opts, is_remote=path_is_remote(input_file, s3=True)
)
):
with rasterio.open(input_file, "r") as src:
return src.read(indexes=indexes, masked=True)
except RasterioIOError as e:
for i in ("does not exist in the file system", "No such file or directory"):
if i in str(e):
raise FileNotFoundError("%s not found" % input_file)
else:
raise | Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found. | Below is the the instruction that describes the task:
### Input:
Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found.
### Response:
def read_raster_no_crs(input_file, indexes=None, gdal_opts=None):
"""
Wrapper function around rasterio.open().read().
Parameters
----------
input_file : str
Path to file
indexes : int or list
Band index or list of band indexes to be read.
Returns
-------
MaskedArray
Raises
------
FileNotFoundError if file cannot be found.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
with rasterio.Env(
**get_gdal_options(
gdal_opts, is_remote=path_is_remote(input_file, s3=True)
)
):
with rasterio.open(input_file, "r") as src:
return src.read(indexes=indexes, masked=True)
except RasterioIOError as e:
for i in ("does not exist in the file system", "No such file or directory"):
if i in str(e):
raise FileNotFoundError("%s not found" % input_file)
else:
raise |
def solveAgent(agent,verbose):
'''
Solve the dynamic model for one agent type. This function iterates on "cycles"
of an agent's model either a given number of times or until solution convergence
if an infinite horizon model is used (with agent.cycles = 0).
Parameters
----------
agent : AgentType
The microeconomic AgentType whose dynamic problem is to be solved.
verbose : boolean
If True, solution progress is printed to screen (when cycles != 1).
Returns
-------
solution : [Solution]
A list of solutions to the one period problems that the agent will
encounter in his "lifetime". Returns in reverse chronological order.
'''
# Record the flow of time when the Agent began the process, and make sure time is flowing backwards
original_time_flow = agent.time_flow
agent.timeRev()
# Check to see whether this is an (in)finite horizon problem
cycles_left = agent.cycles
infinite_horizon = cycles_left == 0
# Initialize the solution, which includes the terminal solution if it's not a pseudo-terminal period
solution = []
if not agent.pseudo_terminal:
solution.append(deepcopy(agent.solution_terminal))
# Initialize the process, then loop over cycles
solution_last = agent.solution_terminal
go = True
completed_cycles = 0
max_cycles = 5000 # escape clause
if verbose:
t_last = clock()
while go:
# Solve a cycle of the model, recording it if horizon is finite
solution_cycle = solveOneCycle(agent,solution_last)
if not infinite_horizon:
solution += solution_cycle
# Check for termination: identical solutions across cycle iterations or run out of cycles
solution_now = solution_cycle[-1]
if infinite_horizon:
if completed_cycles > 0:
solution_distance = solution_now.distance(solution_last)
go = (solution_distance > agent.tolerance and completed_cycles < max_cycles)
else: # Assume solution does not converge after only one cycle
solution_distance = 100.0
go = True
else:
cycles_left += -1
go = cycles_left > 0
# Update the "last period solution"
solution_last = solution_now
completed_cycles += 1
# Display progress if requested
if verbose:
t_now = clock()
if infinite_horizon:
print('Finished cycle #' + str(completed_cycles) + ' in ' + str(t_now-t_last) +\
' seconds, solution distance = ' + str(solution_distance))
else:
print('Finished cycle #' + str(completed_cycles) + ' of ' + str(agent.cycles) +\
' in ' + str(t_now-t_last) + ' seconds.')
t_last = t_now
# Record the last cycle if horizon is infinite (solution is still empty!)
if infinite_horizon:
solution = solution_cycle # PseudoTerminal=False impossible for infinite horizon
# Restore the direction of time to its original orientation, then return the solution
if original_time_flow:
agent.timeFwd()
return solution | Solve the dynamic model for one agent type. This function iterates on "cycles"
of an agent's model either a given number of times or until solution convergence
if an infinite horizon model is used (with agent.cycles = 0).
Parameters
----------
agent : AgentType
The microeconomic AgentType whose dynamic problem is to be solved.
verbose : boolean
If True, solution progress is printed to screen (when cycles != 1).
Returns
-------
solution : [Solution]
A list of solutions to the one period problems that the agent will
encounter in his "lifetime". Returns in reverse chronological order. | Below is the the instruction that describes the task:
### Input:
Solve the dynamic model for one agent type. This function iterates on "cycles"
of an agent's model either a given number of times or until solution convergence
if an infinite horizon model is used (with agent.cycles = 0).
Parameters
----------
agent : AgentType
The microeconomic AgentType whose dynamic problem is to be solved.
verbose : boolean
If True, solution progress is printed to screen (when cycles != 1).
Returns
-------
solution : [Solution]
A list of solutions to the one period problems that the agent will
encounter in his "lifetime". Returns in reverse chronological order.
### Response:
def solveAgent(agent,verbose):
'''
Solve the dynamic model for one agent type. This function iterates on "cycles"
of an agent's model either a given number of times or until solution convergence
if an infinite horizon model is used (with agent.cycles = 0).
Parameters
----------
agent : AgentType
The microeconomic AgentType whose dynamic problem is to be solved.
verbose : boolean
If True, solution progress is printed to screen (when cycles != 1).
Returns
-------
solution : [Solution]
A list of solutions to the one period problems that the agent will
encounter in his "lifetime". Returns in reverse chronological order.
'''
# Record the flow of time when the Agent began the process, and make sure time is flowing backwards
original_time_flow = agent.time_flow
agent.timeRev()
# Check to see whether this is an (in)finite horizon problem
cycles_left = agent.cycles
infinite_horizon = cycles_left == 0
# Initialize the solution, which includes the terminal solution if it's not a pseudo-terminal period
solution = []
if not agent.pseudo_terminal:
solution.append(deepcopy(agent.solution_terminal))
# Initialize the process, then loop over cycles
solution_last = agent.solution_terminal
go = True
completed_cycles = 0
max_cycles = 5000 # escape clause
if verbose:
t_last = clock()
while go:
# Solve a cycle of the model, recording it if horizon is finite
solution_cycle = solveOneCycle(agent,solution_last)
if not infinite_horizon:
solution += solution_cycle
# Check for termination: identical solutions across cycle iterations or run out of cycles
solution_now = solution_cycle[-1]
if infinite_horizon:
if completed_cycles > 0:
solution_distance = solution_now.distance(solution_last)
go = (solution_distance > agent.tolerance and completed_cycles < max_cycles)
else: # Assume solution does not converge after only one cycle
solution_distance = 100.0
go = True
else:
cycles_left += -1
go = cycles_left > 0
# Update the "last period solution"
solution_last = solution_now
completed_cycles += 1
# Display progress if requested
if verbose:
t_now = clock()
if infinite_horizon:
print('Finished cycle #' + str(completed_cycles) + ' in ' + str(t_now-t_last) +\
' seconds, solution distance = ' + str(solution_distance))
else:
print('Finished cycle #' + str(completed_cycles) + ' of ' + str(agent.cycles) +\
' in ' + str(t_now-t_last) + ' seconds.')
t_last = t_now
# Record the last cycle if horizon is infinite (solution is still empty!)
if infinite_horizon:
solution = solution_cycle # PseudoTerminal=False impossible for infinite horizon
# Restore the direction of time to its original orientation, then return the solution
if original_time_flow:
agent.timeFwd()
return solution |
def write_xspf(f, tuples):
"""send me a list of (artist,title,mp3_url)"""
xml = XmlWriter(f, indentAmount=' ')
xml.prolog()
xml.start('playlist', { 'xmlns': 'http://xspf.org/ns/0/', 'version': '1' })
xml.start('trackList')
for tupe in tuples:
xml.start('track')
xml.elem('creator',tupe[0])
xml.elem('title',tupe[1])
xml.elem('location', tupe[2])
xml.end()
xml.end()
xml.end()
f.close() | send me a list of (artist,title,mp3_url) | Below is the the instruction that describes the task:
### Input:
send me a list of (artist,title,mp3_url)
### Response:
def write_xspf(f, tuples):
"""send me a list of (artist,title,mp3_url)"""
xml = XmlWriter(f, indentAmount=' ')
xml.prolog()
xml.start('playlist', { 'xmlns': 'http://xspf.org/ns/0/', 'version': '1' })
xml.start('trackList')
for tupe in tuples:
xml.start('track')
xml.elem('creator',tupe[0])
xml.elem('title',tupe[1])
xml.elem('location', tupe[2])
xml.end()
xml.end()
xml.end()
f.close() |
def update_positions(self, positions):
'''Update the sphere positions.
'''
sphs_verts = self.sphs_verts_radii.copy()
sphs_verts += positions.reshape(self.n_spheres, 1, 3)
self.tr.update_vertices(sphs_verts)
self.poslist = positions | Update the sphere positions. | Below is the the instruction that describes the task:
### Input:
Update the sphere positions.
### Response:
def update_positions(self, positions):
'''Update the sphere positions.
'''
sphs_verts = self.sphs_verts_radii.copy()
sphs_verts += positions.reshape(self.n_spheres, 1, 3)
self.tr.update_vertices(sphs_verts)
self.poslist = positions |
def si_parse(value):
'''
Parse a value expressed using SI prefix units to a floating point number.
Parameters
----------
value : str or unicode
Value expressed using SI prefix units (as returned by :func:`si_format`
function).
.. versionchanged:: 1.0
Use unicode string for SI unit to support micro (i.e., µ) character.
.. seealso::
`Issue #4`_.
.. _`Issue #4`: https://github.com/cfobel/si-prefix/issues/4
'''
CRE_10E_NUMBER = re.compile(r'^\s*(?P<integer>[\+\-]?\d+)?'
r'(?P<fraction>.\d+)?\s*([eE]\s*'
r'(?P<expof10>[\+\-]?\d+))?$')
CRE_SI_NUMBER = re.compile(r'^\s*(?P<number>(?P<integer>[\+\-]?\d+)?'
r'(?P<fraction>.\d+)?)\s*'
u'(?P<si_unit>[%s])?\s*$' % SI_PREFIX_UNITS)
match = CRE_10E_NUMBER.match(value)
if match:
# Can be parse using `float`.
assert(match.group('integer') is not None or
match.group('fraction') is not None)
return float(value)
match = CRE_SI_NUMBER.match(value)
assert(match.group('integer') is not None or
match.group('fraction') is not None)
d = match.groupdict()
si_unit = d['si_unit'] if d['si_unit'] else ' '
prefix_levels = (len(SI_PREFIX_UNITS) - 1) // 2
scale = 10 ** (3 * (SI_PREFIX_UNITS.index(si_unit) - prefix_levels))
return float(d['number']) * scale | Parse a value expressed using SI prefix units to a floating point number.
Parameters
----------
value : str or unicode
Value expressed using SI prefix units (as returned by :func:`si_format`
function).
.. versionchanged:: 1.0
Use unicode string for SI unit to support micro (i.e., µ) character.
.. seealso::
`Issue #4`_.
.. _`Issue #4`: https://github.com/cfobel/si-prefix/issues/4 | Below is the the instruction that describes the task:
### Input:
Parse a value expressed using SI prefix units to a floating point number.
Parameters
----------
value : str or unicode
Value expressed using SI prefix units (as returned by :func:`si_format`
function).
.. versionchanged:: 1.0
Use unicode string for SI unit to support micro (i.e., µ) character.
.. seealso::
`Issue #4`_.
.. _`Issue #4`: https://github.com/cfobel/si-prefix/issues/4
### Response:
def si_parse(value):
'''
Parse a value expressed using SI prefix units to a floating point number.
Parameters
----------
value : str or unicode
Value expressed using SI prefix units (as returned by :func:`si_format`
function).
.. versionchanged:: 1.0
Use unicode string for SI unit to support micro (i.e., µ) character.
.. seealso::
`Issue #4`_.
.. _`Issue #4`: https://github.com/cfobel/si-prefix/issues/4
'''
CRE_10E_NUMBER = re.compile(r'^\s*(?P<integer>[\+\-]?\d+)?'
r'(?P<fraction>.\d+)?\s*([eE]\s*'
r'(?P<expof10>[\+\-]?\d+))?$')
CRE_SI_NUMBER = re.compile(r'^\s*(?P<number>(?P<integer>[\+\-]?\d+)?'
r'(?P<fraction>.\d+)?)\s*'
u'(?P<si_unit>[%s])?\s*$' % SI_PREFIX_UNITS)
match = CRE_10E_NUMBER.match(value)
if match:
# Can be parse using `float`.
assert(match.group('integer') is not None or
match.group('fraction') is not None)
return float(value)
match = CRE_SI_NUMBER.match(value)
assert(match.group('integer') is not None or
match.group('fraction') is not None)
d = match.groupdict()
si_unit = d['si_unit'] if d['si_unit'] else ' '
prefix_levels = (len(SI_PREFIX_UNITS) - 1) // 2
scale = 10 ** (3 * (SI_PREFIX_UNITS.index(si_unit) - prefix_levels))
return float(d['number']) * scale |
def read_volume_attachment(self, name, **kwargs): # noqa: E501
"""read_volume_attachment # noqa: E501
read the specified VolumeAttachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_volume_attachment(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501
return data | read_volume_attachment # noqa: E501
read the specified VolumeAttachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_volume_attachment(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1VolumeAttachment
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read_volume_attachment # noqa: E501
read the specified VolumeAttachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_volume_attachment(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
### Response:
def read_volume_attachment(self, name, **kwargs): # noqa: E501
"""read_volume_attachment # noqa: E501
read the specified VolumeAttachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_volume_attachment(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501
return data |
def bm3_k(p, v0, k0, k0p):
"""
calculate bulk modulus, wrapper for cal_k_bm3
cannot handle uncertainties
:param p: pressure
:param v0: volume at reference conditions
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at different conditions
:return: bulk modulus at high pressure
"""
return cal_k_bm3(p, [v0, k0, k0p]) | calculate bulk modulus, wrapper for cal_k_bm3
cannot handle uncertainties
:param p: pressure
:param v0: volume at reference conditions
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at different conditions
:return: bulk modulus at high pressure | Below is the the instruction that describes the task:
### Input:
calculate bulk modulus, wrapper for cal_k_bm3
cannot handle uncertainties
:param p: pressure
:param v0: volume at reference conditions
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at different conditions
:return: bulk modulus at high pressure
### Response:
def bm3_k(p, v0, k0, k0p):
"""
calculate bulk modulus, wrapper for cal_k_bm3
cannot handle uncertainties
:param p: pressure
:param v0: volume at reference conditions
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at different conditions
:return: bulk modulus at high pressure
"""
return cal_k_bm3(p, [v0, k0, k0p]) |
def from_fp(self, file_pointer, comment_lead=['c']):
"""
Read a CNF+ formula from a file pointer. A file pointer should be
specified as an argument. The only default argument is
``comment_lead``, which can be used for parsing specific comment
lines.
:param file_pointer: a file pointer to read the formula from.
:param comment_lead: a list of characters leading comment lines
:type file_pointer: file pointer
:type comment_lead: list(str)
Usage example:
.. code-block:: python
>>> with open('some-file.cnf+', 'r') as fp:
... cnf1 = CNFPlus()
... cnf1.from_fp(fp)
>>>
>>> with open('another-file.cnf+', 'r') as fp:
... cnf2 = CNFPlus(from_fp=fp)
"""
self.nv = 0
self.clauses = []
self.atmosts = []
self.comments = []
comment_lead = tuple('p') + tuple(comment_lead)
for line in file_pointer:
line = line.strip()
if line:
if line[0] not in comment_lead:
if line[-1] == '0': # normal clause
cl = [int(l) for l in line.split()[:-1]]
self.nv = max([abs(l) for l in cl] + [self.nv])
self.clauses.append(cl)
else: # atmost/atleast constraint
items = [i for i in line.split()]
lits = [int(l) for l in items[:-2]]
rhs = int(items[-1])
self.nv = max([abs(l) for l in lits] + [self.nv])
if items[-2][0] == '>':
lits = list(map(lambda l: -l, lits))
rhs = len(lits) - rhs
self.atmosts.append([lits, rhs])
elif not line.startswith('p cnf+ '):
self.comments.append(line) | Read a CNF+ formula from a file pointer. A file pointer should be
specified as an argument. The only default argument is
``comment_lead``, which can be used for parsing specific comment
lines.
:param file_pointer: a file pointer to read the formula from.
:param comment_lead: a list of characters leading comment lines
:type file_pointer: file pointer
:type comment_lead: list(str)
Usage example:
.. code-block:: python
>>> with open('some-file.cnf+', 'r') as fp:
... cnf1 = CNFPlus()
... cnf1.from_fp(fp)
>>>
>>> with open('another-file.cnf+', 'r') as fp:
... cnf2 = CNFPlus(from_fp=fp) | Below is the the instruction that describes the task:
### Input:
Read a CNF+ formula from a file pointer. A file pointer should be
specified as an argument. The only default argument is
``comment_lead``, which can be used for parsing specific comment
lines.
:param file_pointer: a file pointer to read the formula from.
:param comment_lead: a list of characters leading comment lines
:type file_pointer: file pointer
:type comment_lead: list(str)
Usage example:
.. code-block:: python
>>> with open('some-file.cnf+', 'r') as fp:
... cnf1 = CNFPlus()
... cnf1.from_fp(fp)
>>>
>>> with open('another-file.cnf+', 'r') as fp:
... cnf2 = CNFPlus(from_fp=fp)
### Response:
def from_fp(self, file_pointer, comment_lead=['c']):
"""
Read a CNF+ formula from a file pointer. A file pointer should be
specified as an argument. The only default argument is
``comment_lead``, which can be used for parsing specific comment
lines.
:param file_pointer: a file pointer to read the formula from.
:param comment_lead: a list of characters leading comment lines
:type file_pointer: file pointer
:type comment_lead: list(str)
Usage example:
.. code-block:: python
>>> with open('some-file.cnf+', 'r') as fp:
... cnf1 = CNFPlus()
... cnf1.from_fp(fp)
>>>
>>> with open('another-file.cnf+', 'r') as fp:
... cnf2 = CNFPlus(from_fp=fp)
"""
self.nv = 0
self.clauses = []
self.atmosts = []
self.comments = []
comment_lead = tuple('p') + tuple(comment_lead)
for line in file_pointer:
line = line.strip()
if line:
if line[0] not in comment_lead:
if line[-1] == '0': # normal clause
cl = [int(l) for l in line.split()[:-1]]
self.nv = max([abs(l) for l in cl] + [self.nv])
self.clauses.append(cl)
else: # atmost/atleast constraint
items = [i for i in line.split()]
lits = [int(l) for l in items[:-2]]
rhs = int(items[-1])
self.nv = max([abs(l) for l in lits] + [self.nv])
if items[-2][0] == '>':
lits = list(map(lambda l: -l, lits))
rhs = len(lits) - rhs
self.atmosts.append([lits, rhs])
elif not line.startswith('p cnf+ '):
self.comments.append(line) |
def get_stackset_ready_accounts(credentials, account_ids, quiet=True):
"""
Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role
:param credentials: AWS credentials to use when calling sts:assumerole
:param org_account_ids: List of AWS accounts to check for Stackset configuration
:return: List of account IDs in which assuming the stackset execution role worked
"""
api_client = connect_service('sts', credentials, silent=True)
configured_account_ids = []
for account_id in account_ids:
try:
role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id
api_client.assume_role(RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts')
configured_account_ids.append(account_id)
except Exception as e:
pass
if len(configured_account_ids) != len(account_ids) and not quiet:
printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids))
printDebug(str(configured_account_ids))
return configured_account_ids | Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role
:param credentials: AWS credentials to use when calling sts:assumerole
:param org_account_ids: List of AWS accounts to check for Stackset configuration
:return: List of account IDs in which assuming the stackset execution role worked | Below is the the instruction that describes the task:
### Input:
Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role
:param credentials: AWS credentials to use when calling sts:assumerole
:param org_account_ids: List of AWS accounts to check for Stackset configuration
:return: List of account IDs in which assuming the stackset execution role worked
### Response:
def get_stackset_ready_accounts(credentials, account_ids, quiet=True):
"""
Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role
:param credentials: AWS credentials to use when calling sts:assumerole
:param org_account_ids: List of AWS accounts to check for Stackset configuration
:return: List of account IDs in which assuming the stackset execution role worked
"""
api_client = connect_service('sts', credentials, silent=True)
configured_account_ids = []
for account_id in account_ids:
try:
role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id
api_client.assume_role(RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts')
configured_account_ids.append(account_id)
except Exception as e:
pass
if len(configured_account_ids) != len(account_ids) and not quiet:
printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids))
printDebug(str(configured_account_ids))
return configured_account_ids |
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False | Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int | Below is the the instruction that describes the task:
### Input:
Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
### Response:
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False |
def _get_color(self, age):
"""Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b)
"""
if age == self.tree.age:
return self.leaf_color
color = self.stem_color
tree = self.tree
if len(color) == 3:
return color
diff = [color[i+3]-color[i] for i in range(3)]
per_age = [diff[i]/(tree.age-1) for i in range(3)]
return tuple([int(color[i]+per_age[i]*age) for i in range(3)]) | Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b) | Below is the the instruction that describes the task:
### Input:
Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b)
### Response:
def _get_color(self, age):
"""Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b)
"""
if age == self.tree.age:
return self.leaf_color
color = self.stem_color
tree = self.tree
if len(color) == 3:
return color
diff = [color[i+3]-color[i] for i in range(3)]
per_age = [diff[i]/(tree.age-1) for i in range(3)]
return tuple([int(color[i]+per_age[i]*age) for i in range(3)]) |
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return time.__new__(type(self),
hour, minute, second, microsecond, tzinfo) | Return a new time with new values for the specified fields. | Below is the the instruction that describes the task:
### Input:
Return a new time with new values for the specified fields.
### Response:
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return time.__new__(type(self),
hour, minute, second, microsecond, tzinfo) |
def deregister_image(self, image_id, delete_snapshot=False):
"""
Unregister an AMI.
:type image_id: string
:param image_id: the ID of the Image to unregister
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the
snapshot associated with an EBS volume
mounted at /dev/sda1
:rtype: bool
:return: True if successful
"""
snapshot_id = None
if delete_snapshot:
image = self.get_image(image_id)
for key in image.block_device_mapping:
if key == "/dev/sda1":
snapshot_id = image.block_device_mapping[key].snapshot_id
break
result = self.get_status('DeregisterImage',
{'ImageId':image_id}, verb='POST')
if result and snapshot_id:
return result and self.delete_snapshot(snapshot_id)
return result | Unregister an AMI.
:type image_id: string
:param image_id: the ID of the Image to unregister
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the
snapshot associated with an EBS volume
mounted at /dev/sda1
:rtype: bool
:return: True if successful | Below is the the instruction that describes the task:
### Input:
Unregister an AMI.
:type image_id: string
:param image_id: the ID of the Image to unregister
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the
snapshot associated with an EBS volume
mounted at /dev/sda1
:rtype: bool
:return: True if successful
### Response:
def deregister_image(self, image_id, delete_snapshot=False):
"""
Unregister an AMI.
:type image_id: string
:param image_id: the ID of the Image to unregister
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the
snapshot associated with an EBS volume
mounted at /dev/sda1
:rtype: bool
:return: True if successful
"""
snapshot_id = None
if delete_snapshot:
image = self.get_image(image_id)
for key in image.block_device_mapping:
if key == "/dev/sda1":
snapshot_id = image.block_device_mapping[key].snapshot_id
break
result = self.get_status('DeregisterImage',
{'ImageId':image_id}, verb='POST')
if result and snapshot_id:
return result and self.delete_snapshot(snapshot_id)
return result |
def start_monitor(self, standalone=True):
"""
Run command in a loop and check exit status plus restart process when needed
"""
try:
self.start()
cmdline = shlex.split(self.config.process_to_monitor)
if standalone:
signal.signal(signal.SIGINT, self.shutdown)
self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
while self.process and not self.finished:
self.process.wait()
if self._is_sigsegv(self.process.returncode):
if self.config.debug:
print("[\033[92mINFO\033[0m] Process crashed with \033[91mSIGSEGV\033[0m, waiting for testcase...")
while not self.got_testcase():
time.sleep(1)
self.save_testcase(self.testcase[-10:]) # just take last 10 testcases
if self.process:
self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
self.shutdown()
self.process = False
self.got_testcase = lambda: True
raise PJFProcessExecutionError("Binary <%s> does not exist" % cmdline[0])
except Exception as e:
raise PJFBaseException("Unknown error please send log to author") | Run command in a loop and check exit status plus restart process when needed | Below is the the instruction that describes the task:
### Input:
Run command in a loop and check exit status plus restart process when needed
### Response:
def start_monitor(self, standalone=True):
"""
Run command in a loop and check exit status plus restart process when needed
"""
try:
self.start()
cmdline = shlex.split(self.config.process_to_monitor)
if standalone:
signal.signal(signal.SIGINT, self.shutdown)
self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
while self.process and not self.finished:
self.process.wait()
if self._is_sigsegv(self.process.returncode):
if self.config.debug:
print("[\033[92mINFO\033[0m] Process crashed with \033[91mSIGSEGV\033[0m, waiting for testcase...")
while not self.got_testcase():
time.sleep(1)
self.save_testcase(self.testcase[-10:]) # just take last 10 testcases
if self.process:
self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError:
self.shutdown()
self.process = False
self.got_testcase = lambda: True
raise PJFProcessExecutionError("Binary <%s> does not exist" % cmdline[0])
except Exception as e:
raise PJFBaseException("Unknown error please send log to author") |
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj | Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
} | Below is the the instruction that describes the task:
### Input:
Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
### Response:
def construct_rest_of_worlds_mapping(self, excluded, fp=None):
"""Construct topo mapping file for ``excluded``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``.
Topo mapping has the data format:
.. code-block:: python
{
'data': [
['location label', ['topo face integer ids']],
],
'metadata': {
'filename': 'name of face definitions file',
'field': 'field with uniquely identifies the fields in ``filename``',
'sha256': 'SHA 256 hash of ``filename``'
}
}
"""
metadata = {
'filename': 'faces.gpkg',
'field': 'id',
'sha256': sha256(self.faces_fp)
}
data = []
for key, locations in excluded.items():
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
data.append((key, sorted(included)))
obj = {'data': data, 'metadata': metadata}
if fp:
with open(fp, "w") as f:
json.dump(obj, f, indent=2)
else:
return obj |
def aggregate(self, query: Optional[dict] = None,
group: Optional[dict] = None,
order_by: Union[None, list, tuple] = None) -> list:
"""return aggregation result based on specified rulez query and group"""
raise NotImplementedError | return aggregation result based on specified rulez query and group | Below is the the instruction that describes the task:
### Input:
return aggregation result based on specified rulez query and group
### Response:
def aggregate(self, query: Optional[dict] = None,
group: Optional[dict] = None,
order_by: Union[None, list, tuple] = None) -> list:
"""return aggregation result based on specified rulez query and group"""
raise NotImplementedError |
def dispatch(self, request, start_response):
"""Handles dispatch to apiserver handlers.
This typically ends up calling start_response and returning the entire
body of the response.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the body of the response.
"""
# Check if this matches any of our special handlers.
dispatched_response = self.dispatch_non_api_requests(request,
start_response)
if dispatched_response is not None:
return dispatched_response
# Call the service.
try:
return self.call_backend(request, start_response)
except errors.RequestError as error:
return self._handle_request_error(request, error, start_response) | Handles dispatch to apiserver handlers.
This typically ends up calling start_response and returning the entire
body of the response.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the body of the response. | Below is the the instruction that describes the task:
### Input:
Handles dispatch to apiserver handlers.
This typically ends up calling start_response and returning the entire
body of the response.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the body of the response.
### Response:
def dispatch(self, request, start_response):
"""Handles dispatch to apiserver handlers.
This typically ends up calling start_response and returning the entire
body of the response.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string, the body of the response.
"""
# Check if this matches any of our special handlers.
dispatched_response = self.dispatch_non_api_requests(request,
start_response)
if dispatched_response is not None:
return dispatched_response
# Call the service.
try:
return self.call_backend(request, start_response)
except errors.RequestError as error:
return self._handle_request_error(request, error, start_response) |
def Remote(path=None, loader=Notebook, **globals):
"""A remote notebook finder. Place a `*` into a url
to generalize the finder. It returns a context manager
"""
class Remote(RemoteMixin, loader):
...
return Remote(path=path, **globals) | A remote notebook finder. Place a `*` into a url
to generalize the finder. It returns a context manager | Below is the the instruction that describes the task:
### Input:
A remote notebook finder. Place a `*` into a url
to generalize the finder. It returns a context manager
### Response:
def Remote(path=None, loader=Notebook, **globals):
"""A remote notebook finder. Place a `*` into a url
to generalize the finder. It returns a context manager
"""
class Remote(RemoteMixin, loader):
...
return Remote(path=path, **globals) |
def update_task_descriptor_content(self, courseid, taskid, content, force_extension=None):
"""
Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
if force_extension is None:
path_to_descriptor, descriptor_manager = self._get_task_descriptor_info(courseid, taskid)
elif force_extension in self.get_available_task_file_extensions():
path_to_descriptor = "task." + force_extension
descriptor_manager = self._task_file_managers[force_extension]
else:
raise TaskReaderNotFoundException()
try:
self.get_task_fs(courseid, taskid).put(path_to_descriptor, descriptor_manager.dump(content))
except:
raise TaskNotFoundException() | Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException | Below is the the instruction that describes the task:
### Input:
Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
### Response:
def update_task_descriptor_content(self, courseid, taskid, content, force_extension=None):
"""
Update the task descriptor with the dict in content
:param courseid: the course id of the course
:param taskid: the task id of the task
:param content: the content to put in the task file
:param force_extension: If None, save it the same format. Else, save with the given extension
:raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
if force_extension is None:
path_to_descriptor, descriptor_manager = self._get_task_descriptor_info(courseid, taskid)
elif force_extension in self.get_available_task_file_extensions():
path_to_descriptor = "task." + force_extension
descriptor_manager = self._task_file_managers[force_extension]
else:
raise TaskReaderNotFoundException()
try:
self.get_task_fs(courseid, taskid).put(path_to_descriptor, descriptor_manager.dump(content))
except:
raise TaskNotFoundException() |
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string
if using higher enc. If the type is not found, raise CommandExecutionError.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
ecdsa = ['e', 'ecdsa', 'ecdsa-sha2-nistp521', 'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp256']
ed25519 = ['ed25519', 'ssh-ed25519']
if enc in rsa:
return 'ssh-rsa'
elif enc in dss:
return 'ssh-dss'
elif enc in ecdsa:
# ecdsa defaults to ecdsa-sha2-nistp256
# otherwise enc string is actual encoding string
if enc in ['e', 'ecdsa']:
return 'ecdsa-sha2-nistp256'
return enc
elif enc in ed25519:
return 'ssh-ed25519'
else:
raise CommandExecutionError(
'Incorrect encryption key type \'{0}\'.'.format(enc)
) | Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string
if using higher enc. If the type is not found, raise CommandExecutionError. | Below is the the instruction that describes the task:
### Input:
Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string
if using higher enc. If the type is not found, raise CommandExecutionError.
### Response:
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string
if using higher enc. If the type is not found, raise CommandExecutionError.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
ecdsa = ['e', 'ecdsa', 'ecdsa-sha2-nistp521', 'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp256']
ed25519 = ['ed25519', 'ssh-ed25519']
if enc in rsa:
return 'ssh-rsa'
elif enc in dss:
return 'ssh-dss'
elif enc in ecdsa:
# ecdsa defaults to ecdsa-sha2-nistp256
# otherwise enc string is actual encoding string
if enc in ['e', 'ecdsa']:
return 'ecdsa-sha2-nistp256'
return enc
elif enc in ed25519:
return 'ssh-ed25519'
else:
raise CommandExecutionError(
'Incorrect encryption key type \'{0}\'.'.format(enc)
) |
def rank(keys, axis=semantics.axis_default):
"""where each item is in the pecking order.
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
unique integers, ranking the sorting order
Notes
-----
we should have that index.sorted[index.rank] == keys
"""
index = as_index(keys, axis)
return index.rank | where each item is in the pecking order.
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
unique integers, ranking the sorting order
Notes
-----
we should have that index.sorted[index.rank] == keys | Below is the the instruction that describes the task:
### Input:
where each item is in the pecking order.
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
unique integers, ranking the sorting order
Notes
-----
we should have that index.sorted[index.rank] == keys
### Response:
def rank(keys, axis=semantics.axis_default):
"""where each item is in the pecking order.
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
unique integers, ranking the sorting order
Notes
-----
we should have that index.sorted[index.rank] == keys
"""
index = as_index(keys, axis)
return index.rank |
def get_args(node):
"""Return the arguments of a node in the event graph."""
arg_roles = {}
args = node.findall('arg') + \
[node.find('arg1'), node.find('arg2'), node.find('arg3')]
for arg in args:
if arg is not None:
id = arg.attrib.get('id')
if id is not None:
arg_roles[arg.attrib['role']] = (arg.attrib['id'], arg)
# Now look at possible inevent links
if node.find('features') is not None:
inevents = node.findall('features/inevent')
for inevent in inevents:
if 'id' in inevent.attrib:
arg_roles['inevent'] = (inevent.attrib['id'], inevent)
ptms = node.findall('features/ptm') + node.findall('features/no-ptm')
for ptm in ptms:
if 'id' in inevent.attrib:
arg_roles['ptm'] = (inevent.attrib['id'], ptm)
# And also look for assoc-with links
aw = node.find('assoc-with')
if aw is not None:
aw_id = aw.attrib['id']
arg_roles['assoc-with'] = (aw_id, aw)
return arg_roles | Return the arguments of a node in the event graph. | Below is the the instruction that describes the task:
### Input:
Return the arguments of a node in the event graph.
### Response:
def get_args(node):
"""Return the arguments of a node in the event graph."""
arg_roles = {}
args = node.findall('arg') + \
[node.find('arg1'), node.find('arg2'), node.find('arg3')]
for arg in args:
if arg is not None:
id = arg.attrib.get('id')
if id is not None:
arg_roles[arg.attrib['role']] = (arg.attrib['id'], arg)
# Now look at possible inevent links
if node.find('features') is not None:
inevents = node.findall('features/inevent')
for inevent in inevents:
if 'id' in inevent.attrib:
arg_roles['inevent'] = (inevent.attrib['id'], inevent)
ptms = node.findall('features/ptm') + node.findall('features/no-ptm')
for ptm in ptms:
if 'id' in inevent.attrib:
arg_roles['ptm'] = (inevent.attrib['id'], ptm)
# And also look for assoc-with links
aw = node.find('assoc-with')
if aw is not None:
aw_id = aw.attrib['id']
arg_roles['assoc-with'] = (aw_id, aw)
return arg_roles |
def register_email(request):
'''
Register new email.
'''
user = request.user
serializer = RegisterEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data['email']
template_config = (
registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES)
if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED:
signer = RegisterEmailSigner({
'user_id': user.pk,
'email': email,
}, request=request)
send_verification_notification(
user, signer, template_config, email=email)
else:
email_field = get_user_setting('EMAIL_FIELD')
setattr(user, email_field, email)
user.save()
return get_ok_response('Register email link email sent') | Register new email. | Below is the the instruction that describes the task:
### Input:
Register new email.
### Response:
def register_email(request):
'''
Register new email.
'''
user = request.user
serializer = RegisterEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data['email']
template_config = (
registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES)
if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED:
signer = RegisterEmailSigner({
'user_id': user.pk,
'email': email,
}, request=request)
send_verification_notification(
user, signer, template_config, email=email)
else:
email_field = get_user_setting('EMAIL_FIELD')
setattr(user, email_field, email)
user.save()
return get_ok_response('Register email link email sent') |
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html | From some file path, input stream, or URL, construct and return
an HTML tree. | Below is the the instruction that describes the task:
### Input:
From some file path, input stream, or URL, construct and return
an HTML tree.
### Response:
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html |
def _read_nowait(self, n: int) -> bytes:
""" Read not more than n bytes, or whole buffer is n == -1 """
chunks = []
while self._buffer:
chunk = self._read_nowait_chunk(n)
chunks.append(chunk)
if n != -1:
n -= len(chunk)
if n == 0:
break
return b''.join(chunks) if chunks else b'' | Read not more than n bytes, or whole buffer is n == -1 | Below is the the instruction that describes the task:
### Input:
Read not more than n bytes, or whole buffer is n == -1
### Response:
def _read_nowait(self, n: int) -> bytes:
""" Read not more than n bytes, or whole buffer is n == -1 """
chunks = []
while self._buffer:
chunk = self._read_nowait_chunk(n)
chunks.append(chunk)
if n != -1:
n -= len(chunk)
if n == 0:
break
return b''.join(chunks) if chunks else b'' |
def coupl_model8(self):
""" Variant of toggle switch.
"""
self.Coupl = 0.5*self.Adj_signed
# reduce the value of the coupling of the repressing genes
# otherwise completely unstable solutions are obtained
for x in np.nditer(self.Coupl,op_flags=['readwrite']):
if x < -1e-6:
x[...] = -0.2 | Variant of toggle switch. | Below is the the instruction that describes the task:
### Input:
Variant of toggle switch.
### Response:
def coupl_model8(self):
""" Variant of toggle switch.
"""
self.Coupl = 0.5*self.Adj_signed
# reduce the value of the coupling of the repressing genes
# otherwise completely unstable solutions are obtained
for x in np.nditer(self.Coupl,op_flags=['readwrite']):
if x < -1e-6:
x[...] = -0.2 |
def set_mute(mute_value):
"Browse for mute usages and set value"
all_mutes = ( \
(0x8, 0x9), # LED page
(0x1, 0xA7), # desktop page
(0xb, 0x2f),
)
all_target_usages = [hid.get_full_usage_id(u[0], u[1]) for u in all_mutes]
# usually you'll find and open the target device, here we'll browse for the
# current connected devices
all_devices = hid.find_all_hid_devices()
success = 0
if not all_devices:
print("Can't any HID device!")
else:
# search for our target usage
# target pageId, usageId
for device in all_devices:
try:
device.open()
# target 'to set' value could be in feature or output reports
for report in device.find_output_reports() + device.find_feature_reports():
for target_usage in all_target_usages:
if target_usage in report:
# set our value and send
report[target_usage] = value
report.send()
success += 1
finally:
device.close()
# fit to sys.exit() proper result values
print("{0} Mute usage(s) set\n".format(success))
if success:
return 0
return -1 | Browse for mute usages and set value | Below is the the instruction that describes the task:
### Input:
Browse for mute usages and set value
### Response:
def set_mute(mute_value):
"Browse for mute usages and set value"
all_mutes = ( \
(0x8, 0x9), # LED page
(0x1, 0xA7), # desktop page
(0xb, 0x2f),
)
all_target_usages = [hid.get_full_usage_id(u[0], u[1]) for u in all_mutes]
# usually you'll find and open the target device, here we'll browse for the
# current connected devices
all_devices = hid.find_all_hid_devices()
success = 0
if not all_devices:
print("Can't any HID device!")
else:
# search for our target usage
# target pageId, usageId
for device in all_devices:
try:
device.open()
# target 'to set' value could be in feature or output reports
for report in device.find_output_reports() + device.find_feature_reports():
for target_usage in all_target_usages:
if target_usage in report:
# set our value and send
report[target_usage] = value
report.send()
success += 1
finally:
device.close()
# fit to sys.exit() proper result values
print("{0} Mute usage(s) set\n".format(success))
if success:
return 0
return -1 |
def stack_template_key_name(blueprint):
"""Given a blueprint, produce an appropriate key name.
Args:
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the key from.
Returns:
string: Key name resulting from blueprint.
"""
name = blueprint.name
return "stack_templates/%s/%s-%s.json" % (blueprint.context.get_fqn(name),
name,
blueprint.version) | Given a blueprint, produce an appropriate key name.
Args:
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the key from.
Returns:
string: Key name resulting from blueprint. | Below is the the instruction that describes the task:
### Input:
Given a blueprint, produce an appropriate key name.
Args:
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the key from.
Returns:
string: Key name resulting from blueprint.
### Response:
def stack_template_key_name(blueprint):
"""Given a blueprint, produce an appropriate key name.
Args:
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the key from.
Returns:
string: Key name resulting from blueprint.
"""
name = blueprint.name
return "stack_templates/%s/%s-%s.json" % (blueprint.context.get_fqn(name),
name,
blueprint.version) |
def _add_two_way_unqualified_edge(self, u: BaseEntity, v: BaseEntity, relation: str) -> str:
"""Add an unqualified edge both ways."""
self.add_unqualified_edge(v, u, relation)
return self.add_unqualified_edge(u, v, relation) | Add an unqualified edge both ways. | Below is the the instruction that describes the task:
### Input:
Add an unqualified edge both ways.
### Response:
def _add_two_way_unqualified_edge(self, u: BaseEntity, v: BaseEntity, relation: str) -> str:
"""Add an unqualified edge both ways."""
self.add_unqualified_edge(v, u, relation)
return self.add_unqualified_edge(u, v, relation) |
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found' | make_executable(query) -- give executable permissions to a given file | Below is the the instruction that describes the task:
### Input:
make_executable(query) -- give executable permissions to a given file
### Response:
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found' |
def version(user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Return the version of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.version
'''
query = 'SELECT setting FROM pg_catalog.pg_settings ' \
'WHERE name = \'server_version\''
cmd = _psql_cmd('-c', query,
'-t',
host=host,
user=user,
port=port,
maintenance_db=maintenance_db,
password=password)
ret = _run_psql(
cmd, runas=runas, password=password, host=host, port=port, user=user)
for line in salt.utils.itertools.split(ret['stdout'], '\n'):
# Just return the first line
return line | Return the version of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.version | Below is the the instruction that describes the task:
### Input:
Return the version of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.version
### Response:
def version(user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Return the version of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.version
'''
query = 'SELECT setting FROM pg_catalog.pg_settings ' \
'WHERE name = \'server_version\''
cmd = _psql_cmd('-c', query,
'-t',
host=host,
user=user,
port=port,
maintenance_db=maintenance_db,
password=password)
ret = _run_psql(
cmd, runas=runas, password=password, host=host, port=port, user=user)
for line in salt.utils.itertools.split(ret['stdout'], '\n'):
# Just return the first line
return line |
def pruning(self, X, y, cost_mat):
""" Function that prune the decision tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y_true : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
"""
self.tree_.tree_pruned = copy.deepcopy(self.tree_.tree)
if self.tree_.n_nodes > 0:
self._pruning(X, y, cost_mat)
nodes_pruned = self._nodes(self.tree_.tree_pruned)
self.tree_.n_nodes_pruned = len(nodes_pruned) | Function that prune the decision tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y_true : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example. | Below is the the instruction that describes the task:
### Input:
Function that prune the decision tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y_true : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
### Response:
def pruning(self, X, y, cost_mat):
""" Function that prune the decision tree.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
y_true : array indicator matrix
Ground truth (correct) labels.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
"""
self.tree_.tree_pruned = copy.deepcopy(self.tree_.tree)
if self.tree_.n_nodes > 0:
self._pruning(X, y, cost_mat)
nodes_pruned = self._nodes(self.tree_.tree_pruned)
self.tree_.n_nodes_pruned = len(nodes_pruned) |
def __analizar_evento(self, ret):
"Comprueba y extrae el wvento informativo si existen en la respuesta XML"
evt = ret.get('evento')
if evt:
self.Eventos = [evt]
self.Evento = "%(codigo)s: %(descripcion)s" % evt | Comprueba y extrae el wvento informativo si existen en la respuesta XML | Below is the the instruction that describes the task:
### Input:
Comprueba y extrae el wvento informativo si existen en la respuesta XML
### Response:
def __analizar_evento(self, ret):
"Comprueba y extrae el wvento informativo si existen en la respuesta XML"
evt = ret.get('evento')
if evt:
self.Eventos = [evt]
self.Evento = "%(codigo)s: %(descripcion)s" % evt |
def convert_scope_string_to_expression(scope):
'''**Description**
Internal function to convert a filter string to a filter object to be used with dashboards.
'''
#
# NOTE: The supported grammar is not perfectly aligned with the grammar supported by the Sysdig backend.
# Proper grammar implementation will happen soon.
# For practical purposes, the parsing will have equivalent results.
#
if scope is None or not scope:
return [True, []]
expressions = []
string_expressions = scope.strip(' \t\n\r').split(' and ')
expression_re = re.compile('^(?P<not>not )?(?P<operand>[^ ]+) (?P<operator>=|!=|in|contains|starts with) (?P<value>(:?"[^"]+"|\'[^\']+\'|\(.+\)|.+))$')
for string_expression in string_expressions:
matches = expression_re.match(string_expression)
if matches is None:
return [False, 'invalid scope format']
is_not_operator = matches.group('not') is not None
if matches.group('operator') == 'in':
list_value = matches.group('value').strip(' ()')
value_matches = re.findall('(:?\'[^\',]+\')|(:?"[^",]+")|(:?[,]+)', list_value)
if len(value_matches) == 0:
return [False, 'invalid scope value list format']
value_matches = map(lambda v: v[0] if v[0] else v[1], value_matches)
values = map(lambda v: v.strip(' "\''), value_matches)
else:
values = [matches.group('value').strip('"\'')]
operator_parse_dict = {
'in': 'in' if not is_not_operator else 'notIn',
'=': 'equals' if not is_not_operator else 'notEquals',
'!=': 'notEquals' if not is_not_operator else 'equals',
'contains': 'contains' if not is_not_operator else 'notContains',
'starts with': 'startsWith'
}
operator = operator_parse_dict.get(matches.group('operator'), None)
if operator is None:
return [False, 'invalid scope operator']
expressions.append({
'operand': matches.group('operand'),
'operator': operator,
'value': values
})
return [True, expressions] | **Description**
Internal function to convert a filter string to a filter object to be used with dashboards. | Below is the the instruction that describes the task:
### Input:
**Description**
Internal function to convert a filter string to a filter object to be used with dashboards.
### Response:
def convert_scope_string_to_expression(scope):
'''**Description**
Internal function to convert a filter string to a filter object to be used with dashboards.
'''
#
# NOTE: The supported grammar is not perfectly aligned with the grammar supported by the Sysdig backend.
# Proper grammar implementation will happen soon.
# For practical purposes, the parsing will have equivalent results.
#
if scope is None or not scope:
return [True, []]
expressions = []
string_expressions = scope.strip(' \t\n\r').split(' and ')
expression_re = re.compile('^(?P<not>not )?(?P<operand>[^ ]+) (?P<operator>=|!=|in|contains|starts with) (?P<value>(:?"[^"]+"|\'[^\']+\'|\(.+\)|.+))$')
for string_expression in string_expressions:
matches = expression_re.match(string_expression)
if matches is None:
return [False, 'invalid scope format']
is_not_operator = matches.group('not') is not None
if matches.group('operator') == 'in':
list_value = matches.group('value').strip(' ()')
value_matches = re.findall('(:?\'[^\',]+\')|(:?"[^",]+")|(:?[,]+)', list_value)
if len(value_matches) == 0:
return [False, 'invalid scope value list format']
value_matches = map(lambda v: v[0] if v[0] else v[1], value_matches)
values = map(lambda v: v.strip(' "\''), value_matches)
else:
values = [matches.group('value').strip('"\'')]
operator_parse_dict = {
'in': 'in' if not is_not_operator else 'notIn',
'=': 'equals' if not is_not_operator else 'notEquals',
'!=': 'notEquals' if not is_not_operator else 'equals',
'contains': 'contains' if not is_not_operator else 'notContains',
'starts with': 'startsWith'
}
operator = operator_parse_dict.get(matches.group('operator'), None)
if operator is None:
return [False, 'invalid scope operator']
expressions.append({
'operand': matches.group('operand'),
'operator': operator,
'value': values
})
return [True, expressions] |
def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):
'''
Determine whether or not the installed packages match what was requested in
the SLS file.
'''
ok = []
failed = []
if not new_caps:
new_caps = dict()
for pkgname, pkgver in desired.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names.
# Homebrew for Mac OSX does something similar with tap names
# prefixing package names, separated with a slash.
has_origin = '/' in pkgname
if __grains__['os'] == 'FreeBSD' and has_origin:
cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname]
elif __grains__['os'] == 'MacOS' and has_origin:
cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1]))
elif __grains__['os'] == 'OpenBSD':
cver = new_pkgs.get(pkgname.split('%')[0])
elif __grains__['os_family'] == 'Debian':
cver = new_pkgs.get(pkgname.split('=')[0])
else:
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
cver = new_pkgs.get(new_caps.get(pkgname)[0])
if not cver:
failed.append(pkgname)
continue
elif pkgver == 'latest':
ok.append(pkgname)
continue
elif not __salt__['pkg_resource.version_clean'](pkgver):
ok.append(pkgname)
continue
elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]):
ok.append(pkgname)
continue
if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):
ok.append(pkgname)
else:
failed.append(pkgname)
return ok, failed | Determine whether or not the installed packages match what was requested in
the SLS file. | Below is the the instruction that describes the task:
### Input:
Determine whether or not the installed packages match what was requested in
the SLS file.
### Response:
def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):
'''
Determine whether or not the installed packages match what was requested in
the SLS file.
'''
ok = []
failed = []
if not new_caps:
new_caps = dict()
for pkgname, pkgver in desired.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names.
# Homebrew for Mac OSX does something similar with tap names
# prefixing package names, separated with a slash.
has_origin = '/' in pkgname
if __grains__['os'] == 'FreeBSD' and has_origin:
cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname]
elif __grains__['os'] == 'MacOS' and has_origin:
cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1]))
elif __grains__['os'] == 'OpenBSD':
cver = new_pkgs.get(pkgname.split('%')[0])
elif __grains__['os_family'] == 'Debian':
cver = new_pkgs.get(pkgname.split('=')[0])
else:
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
cver = new_pkgs.get(new_caps.get(pkgname)[0])
if not cver:
failed.append(pkgname)
continue
elif pkgver == 'latest':
ok.append(pkgname)
continue
elif not __salt__['pkg_resource.version_clean'](pkgver):
ok.append(pkgname)
continue
elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]):
ok.append(pkgname)
continue
if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):
ok.append(pkgname)
else:
failed.append(pkgname)
return ok, failed |
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params) | Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict. | Below is the the instruction that describes the task:
### Input:
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
### Response:
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params) |
def example_reading_spec(self):
"""Return a mix of env and video data fields and decoders."""
video_fields, video_decoders = (
video_utils.VideoProblem.example_reading_spec(self))
env_fields, env_decoders = env_problem.EnvProblem.example_reading_spec(self)
# Remove raw observations field since we want to capture them as videos.
env_fields.pop(env_problem.OBSERVATION_FIELD)
env_decoders.pop(env_problem.OBSERVATION_FIELD)
# Add frame number spec and decoder.
env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64)
env_decoders[
_FRAME_NUMBER_FIELD] = tf.contrib.slim.tfexample_decoder.Tensor(
_FRAME_NUMBER_FIELD)
# Add video fields and decoders
env_fields.update(video_fields)
env_decoders.update(video_decoders)
return env_fields, env_decoders | Return a mix of env and video data fields and decoders. | Below is the the instruction that describes the task:
### Input:
Return a mix of env and video data fields and decoders.
### Response:
def example_reading_spec(self):
"""Return a mix of env and video data fields and decoders."""
video_fields, video_decoders = (
video_utils.VideoProblem.example_reading_spec(self))
env_fields, env_decoders = env_problem.EnvProblem.example_reading_spec(self)
# Remove raw observations field since we want to capture them as videos.
env_fields.pop(env_problem.OBSERVATION_FIELD)
env_decoders.pop(env_problem.OBSERVATION_FIELD)
# Add frame number spec and decoder.
env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64)
env_decoders[
_FRAME_NUMBER_FIELD] = tf.contrib.slim.tfexample_decoder.Tensor(
_FRAME_NUMBER_FIELD)
# Add video fields and decoders
env_fields.update(video_fields)
env_decoders.update(video_decoders)
return env_fields, env_decoders |
def get_module_by_name(self, modName):
"""
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
"""
# Convert modName to lowercase.
# This helps make case insensitive string comparisons.
modName = modName.lower()
# modName is an absolute pathname.
if PathOperations.path_is_absolute(modName):
for lib in self.iter_modules():
if modName == lib.get_filename().lower():
return lib
return None # Stop trying to match the name.
# Get all the module names.
# This prevents having to iterate through the module list
# more than once.
modDict = [ ( lib.get_name(), lib ) for lib in self.iter_modules() ]
modDict = dict(modDict)
# modName is a base filename.
if modName in modDict:
return modDict[modName]
# modName is a base filename without extension.
filepart, extpart = PathOperations.split_extension(modName)
if filepart and extpart:
if filepart in modDict:
return modDict[filepart]
# modName is a base address.
try:
baseAddress = HexInput.integer(modName)
except ValueError:
return None
if self.has_module(baseAddress):
return self.get_module(baseAddress)
# Module not found.
return None | @type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found. | Below is the the instruction that describes the task:
### Input:
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
### Response:
def get_module_by_name(self, modName):
"""
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
"""
# Convert modName to lowercase.
# This helps make case insensitive string comparisons.
modName = modName.lower()
# modName is an absolute pathname.
if PathOperations.path_is_absolute(modName):
for lib in self.iter_modules():
if modName == lib.get_filename().lower():
return lib
return None # Stop trying to match the name.
# Get all the module names.
# This prevents having to iterate through the module list
# more than once.
modDict = [ ( lib.get_name(), lib ) for lib in self.iter_modules() ]
modDict = dict(modDict)
# modName is a base filename.
if modName in modDict:
return modDict[modName]
# modName is a base filename without extension.
filepart, extpart = PathOperations.split_extension(modName)
if filepart and extpart:
if filepart in modDict:
return modDict[filepart]
# modName is a base address.
try:
baseAddress = HexInput.integer(modName)
except ValueError:
return None
if self.has_module(baseAddress):
return self.get_module(baseAddress)
# Module not found.
return None |
def html(self, url, timeout=None):
"""High level method to get http request response in text.
smartly handle the encoding problem.
"""
response = self.get_response(url, timeout=timeout)
if response:
domain = self.get_domain(url)
if domain in self.domain_encoding_map: # domain have been visited
try: # apply extreme decoding
html = self.decoder.decode(response.content,
self.domain_encoding_map[domain])[0]
return html
except Exception as e:
print(e)
return None
else: # never visit this domain
try:
html, encoding = self.decoder.autodecode(response.content)
# save chardet analysis result
self.domain_encoding_map[domain] = encoding
return html
except Exception as e:
print(e)
return None
else:
return None | High level method to get http request response in text.
smartly handle the encoding problem. | Below is the the instruction that describes the task:
### Input:
High level method to get http request response in text.
smartly handle the encoding problem.
### Response:
def html(self, url, timeout=None):
"""High level method to get http request response in text.
smartly handle the encoding problem.
"""
response = self.get_response(url, timeout=timeout)
if response:
domain = self.get_domain(url)
if domain in self.domain_encoding_map: # domain have been visited
try: # apply extreme decoding
html = self.decoder.decode(response.content,
self.domain_encoding_map[domain])[0]
return html
except Exception as e:
print(e)
return None
else: # never visit this domain
try:
html, encoding = self.decoder.autodecode(response.content)
# save chardet analysis result
self.domain_encoding_map[domain] = encoding
return html
except Exception as e:
print(e)
return None
else:
return None |
def array(self):
"""
The underlying array of shape (N, L, I)
"""
return numpy.array([self[sid].array for sid in sorted(self)]) | The underlying array of shape (N, L, I) | Below is the the instruction that describes the task:
### Input:
The underlying array of shape (N, L, I)
### Response:
def array(self):
"""
The underlying array of shape (N, L, I)
"""
return numpy.array([self[sid].array for sid in sorted(self)]) |
def create_bird_config_files(bird_configuration):
"""Create bird configuration files per IP version.
Creates bird configuration files if they don't exist. It also creates the
directories where we store the history of changes, if this functionality is
enabled.
Arguments:
bird_configuration (dict): A dictionary with settings for bird.
Returns:
None
Raises:
ValueError if we can't create bird configuration files and the
directory to store the history of changes in bird configuration file.
"""
for ip_version in bird_configuration:
# This creates the file if it doesn't exist.
config_file = bird_configuration[ip_version]['config_file']
try:
touch(config_file)
except OSError as exc:
raise ValueError("failed to create {f}:{e}"
.format(f=config_file, e=exc))
if bird_configuration[ip_version]['keep_changes']:
history_dir = os.path.join(os.path.dirname(config_file), 'history')
try:
os.mkdir(history_dir)
except FileExistsError:
pass
except OSError as exc:
raise ValueError("failed to make directory {d} for keeping a "
"history of changes for {b}:{e}"
.format(d=history_dir, b=config_file, e=exc))
else:
print("{d} is created".format(d=history_dir)) | Create bird configuration files per IP version.
Creates bird configuration files if they don't exist. It also creates the
directories where we store the history of changes, if this functionality is
enabled.
Arguments:
bird_configuration (dict): A dictionary with settings for bird.
Returns:
None
Raises:
ValueError if we can't create bird configuration files and the
directory to store the history of changes in bird configuration file. | Below is the the instruction that describes the task:
### Input:
Create bird configuration files per IP version.
Creates bird configuration files if they don't exist. It also creates the
directories where we store the history of changes, if this functionality is
enabled.
Arguments:
bird_configuration (dict): A dictionary with settings for bird.
Returns:
None
Raises:
ValueError if we can't create bird configuration files and the
directory to store the history of changes in bird configuration file.
### Response:
def create_bird_config_files(bird_configuration):
"""Create bird configuration files per IP version.
Creates bird configuration files if they don't exist. It also creates the
directories where we store the history of changes, if this functionality is
enabled.
Arguments:
bird_configuration (dict): A dictionary with settings for bird.
Returns:
None
Raises:
ValueError if we can't create bird configuration files and the
directory to store the history of changes in bird configuration file.
"""
for ip_version in bird_configuration:
# This creates the file if it doesn't exist.
config_file = bird_configuration[ip_version]['config_file']
try:
touch(config_file)
except OSError as exc:
raise ValueError("failed to create {f}:{e}"
.format(f=config_file, e=exc))
if bird_configuration[ip_version]['keep_changes']:
history_dir = os.path.join(os.path.dirname(config_file), 'history')
try:
os.mkdir(history_dir)
except FileExistsError:
pass
except OSError as exc:
raise ValueError("failed to make directory {d} for keeping a "
"history of changes for {b}:{e}"
.format(d=history_dir, b=config_file, e=exc))
else:
print("{d} is created".format(d=history_dir)) |
def _make_expanded_field_serializer(
self, name, nested_expand, nested_fields, nested_omit
):
"""
Returns an instance of the dynamically created nested serializer.
"""
field_options = self.expandable_fields[name]
serializer_class = field_options[0]
serializer_settings = copy.deepcopy(field_options[1])
if name in nested_expand:
serializer_settings["expand"] = nested_expand[name]
if name in nested_fields:
serializer_settings["fields"] = nested_fields[name]
if name in nested_omit:
serializer_settings["omit"] = nested_omit[name]
if serializer_settings.get("source") == name:
del serializer_settings["source"]
if type(serializer_class) == str:
serializer_class = self._import_serializer_class(serializer_class)
return serializer_class(**serializer_settings) | Returns an instance of the dynamically created nested serializer. | Below is the the instruction that describes the task:
### Input:
Returns an instance of the dynamically created nested serializer.
### Response:
def _make_expanded_field_serializer(
self, name, nested_expand, nested_fields, nested_omit
):
"""
Returns an instance of the dynamically created nested serializer.
"""
field_options = self.expandable_fields[name]
serializer_class = field_options[0]
serializer_settings = copy.deepcopy(field_options[1])
if name in nested_expand:
serializer_settings["expand"] = nested_expand[name]
if name in nested_fields:
serializer_settings["fields"] = nested_fields[name]
if name in nested_omit:
serializer_settings["omit"] = nested_omit[name]
if serializer_settings.get("source") == name:
del serializer_settings["source"]
if type(serializer_class) == str:
serializer_class = self._import_serializer_class(serializer_class)
return serializer_class(**serializer_settings) |
def restart_apps_or_services(app_or_service_names=None):
"""Restart any containers associated with Dusty, or associated with
the provided app_or_service_names."""
if app_or_service_names:
log_to_client("Restarting the following apps or services: {}".format(', '.join(app_or_service_names)))
else:
log_to_client("Restarting all active containers associated with Dusty")
if app_or_service_names:
specs = spec_assembler.get_assembled_specs()
specs_list = [specs['apps'][app_name] for app_name in app_or_service_names if app_name in specs['apps']]
repos = set()
for spec in specs_list:
if spec['repo']:
repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec))
nfs.update_nfs_with_repos(repos)
else:
nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False))
compose.restart_running_services(app_or_service_names) | Restart any containers associated with Dusty, or associated with
the provided app_or_service_names. | Below is the the instruction that describes the task:
### Input:
Restart any containers associated with Dusty, or associated with
the provided app_or_service_names.
### Response:
def restart_apps_or_services(app_or_service_names=None):
"""Restart any containers associated with Dusty, or associated with
the provided app_or_service_names."""
if app_or_service_names:
log_to_client("Restarting the following apps or services: {}".format(', '.join(app_or_service_names)))
else:
log_to_client("Restarting all active containers associated with Dusty")
if app_or_service_names:
specs = spec_assembler.get_assembled_specs()
specs_list = [specs['apps'][app_name] for app_name in app_or_service_names if app_name in specs['apps']]
repos = set()
for spec in specs_list:
if spec['repo']:
repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec))
nfs.update_nfs_with_repos(repos)
else:
nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False))
compose.restart_running_services(app_or_service_names) |
def console_output(msg, logging_msg=None):
"""Use instead of print, to clear the status information before printing"""
assert isinstance(msg, bytes)
assert isinstance(logging_msg, bytes) or logging_msg is None
from polysh import remote_dispatcher
remote_dispatcher.log(logging_msg or msg)
if remote_dispatcher.options.interactive:
from polysh.stdin import the_stdin_thread
the_stdin_thread.no_raw_input()
global last_status_length
if last_status_length:
safe_write('\r{}\r'.format(
last_status_length * ' ').encode())
last_status_length = 0
safe_write(msg) | Use instead of print, to clear the status information before printing | Below is the the instruction that describes the task:
### Input:
Use instead of print, to clear the status information before printing
### Response:
def console_output(msg, logging_msg=None):
"""Use instead of print, to clear the status information before printing"""
assert isinstance(msg, bytes)
assert isinstance(logging_msg, bytes) or logging_msg is None
from polysh import remote_dispatcher
remote_dispatcher.log(logging_msg or msg)
if remote_dispatcher.options.interactive:
from polysh.stdin import the_stdin_thread
the_stdin_thread.no_raw_input()
global last_status_length
if last_status_length:
safe_write('\r{}\r'.format(
last_status_length * ' ').encode())
last_status_length = 0
safe_write(msg) |
def send_document(self, chat_id, data, reply_to_message_id=None, caption=None, reply_markup=None,
parse_mode=None, disable_notification=None, timeout=None):
"""
Use this method to send general files.
:param chat_id:
:param data:
:param reply_to_message_id:
:param reply_markup:
:param parse_mode:
:param disable_notification:
:return: API reply.
"""
return types.Message.de_json(
apihelper.send_data(self.token, chat_id, data, 'document', reply_to_message_id, reply_markup,
parse_mode, disable_notification, timeout, caption=caption)) | Use this method to send general files.
:param chat_id:
:param data:
:param reply_to_message_id:
:param reply_markup:
:param parse_mode:
:param disable_notification:
:return: API reply. | Below is the the instruction that describes the task:
### Input:
Use this method to send general files.
:param chat_id:
:param data:
:param reply_to_message_id:
:param reply_markup:
:param parse_mode:
:param disable_notification:
:return: API reply.
### Response:
def send_document(self, chat_id, data, reply_to_message_id=None, caption=None, reply_markup=None,
parse_mode=None, disable_notification=None, timeout=None):
"""
Use this method to send general files.
:param chat_id:
:param data:
:param reply_to_message_id:
:param reply_markup:
:param parse_mode:
:param disable_notification:
:return: API reply.
"""
return types.Message.de_json(
apihelper.send_data(self.token, chat_id, data, 'document', reply_to_message_id, reply_markup,
parse_mode, disable_notification, timeout, caption=caption)) |
def evaluate_dir(sample_dir):
"""Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
"""
results = []
if sample_dir[-1] == "/":
sample_dir = sample_dir[:-1]
for filename in glob.glob("%s/*.inkml" % sample_dir):
results.append(evaluate_inkml(filename))
return results | Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability' | Below is the the instruction that describes the task:
### Input:
Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
### Response:
def evaluate_dir(sample_dir):
"""Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
"""
results = []
if sample_dir[-1] == "/":
sample_dir = sample_dir[:-1]
for filename in glob.glob("%s/*.inkml" % sample_dir):
results.append(evaluate_inkml(filename))
return results |
def _sim_prediction(self, theta, theta_t, Y, scores, h, t_params, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
theta : np.array
The past predicted values
theta_t : np.array
The past local linear trend
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_params)
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
Y_exp = Y.copy()
theta_exp = theta.copy()
theta_t_exp = theta_t.copy()
scores_exp = scores.copy()
#(TODO: vectorize the inner construction here)
for t in range(0,h):
new_value1 = theta_t_exp[-1] + theta_exp[-1] + t_params[0]*scores_exp[-1]
new_value2 = theta_t_exp[-1] + t_params[1]*scores_exp[-1]
if self.model_name2 == "Exponential":
rnd_value = self.family.draw_variable(1.0/self.link(new_value1),model_scale,model_shape,model_skewness,1)[0]
else:
rnd_value = self.family.draw_variable(self.link(new_value1),model_scale,model_shape,model_skewness,1)[0]
Y_exp = np.append(Y_exp,[rnd_value])
theta_exp = np.append(theta_exp,[new_value1]) # For indexing consistency
theta_t_exp = np.append(theta_t_exp,[new_value2])
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
sim_vector[n] = Y_exp[-h:]
return np.transpose(sim_vector) | Simulates a h-step ahead mean prediction
Parameters
----------
theta : np.array
The past predicted values
theta_t : np.array
The past local linear trend
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations | Below is the the instruction that describes the task:
### Input:
Simulates a h-step ahead mean prediction
Parameters
----------
theta : np.array
The past predicted values
theta_t : np.array
The past local linear trend
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
### Response:
def _sim_prediction(self, theta, theta_t, Y, scores, h, t_params, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
theta : np.array
The past predicted values
theta_t : np.array
The past local linear trend
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_params)
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
Y_exp = Y.copy()
theta_exp = theta.copy()
theta_t_exp = theta_t.copy()
scores_exp = scores.copy()
#(TODO: vectorize the inner construction here)
for t in range(0,h):
new_value1 = theta_t_exp[-1] + theta_exp[-1] + t_params[0]*scores_exp[-1]
new_value2 = theta_t_exp[-1] + t_params[1]*scores_exp[-1]
if self.model_name2 == "Exponential":
rnd_value = self.family.draw_variable(1.0/self.link(new_value1),model_scale,model_shape,model_skewness,1)[0]
else:
rnd_value = self.family.draw_variable(self.link(new_value1),model_scale,model_shape,model_skewness,1)[0]
Y_exp = np.append(Y_exp,[rnd_value])
theta_exp = np.append(theta_exp,[new_value1]) # For indexing consistency
theta_t_exp = np.append(theta_t_exp,[new_value2])
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
sim_vector[n] = Y_exp[-h:]
return np.transpose(sim_vector) |
def check_key(self, key):
"""Checks key and add key_prefix."""
return _check_key(key, allow_unicode_keys=self.allow_unicode_keys,
key_prefix=self.key_prefix) | Checks key and add key_prefix. | Below is the the instruction that describes the task:
### Input:
Checks key and add key_prefix.
### Response:
def check_key(self, key):
"""Checks key and add key_prefix."""
return _check_key(key, allow_unicode_keys=self.allow_unicode_keys,
key_prefix=self.key_prefix) |
def cv_compute(self, b, A, B, C, mK, f, m1, m2):
'''
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
'''
A = np.sum([l * a for l, a in zip(self.lam[b], A)
if l is not None], axis=0)
B = np.sum([l * b for l, b in zip(self.lam[b], B)
if l is not None], axis=0)
W = np.linalg.solve(mK + A + C, f)
if self.transit_model is None:
model = np.dot(B, W)
else:
w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W)
for n, l in enumerate(self.lam[b])
if l is not None])
model = np.dot(np.hstack(
[self.X(n, m1) for n, l in enumerate(self.lam[b])
if l is not None]), w_pld)
model -= np.nanmedian(model)
return model | Compute the model (cross-validation step only) for chunk :py:obj:`b`. | Below is the the instruction that describes the task:
### Input:
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
### Response:
def cv_compute(self, b, A, B, C, mK, f, m1, m2):
'''
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
'''
A = np.sum([l * a for l, a in zip(self.lam[b], A)
if l is not None], axis=0)
B = np.sum([l * b for l, b in zip(self.lam[b], B)
if l is not None], axis=0)
W = np.linalg.solve(mK + A + C, f)
if self.transit_model is None:
model = np.dot(B, W)
else:
w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W)
for n, l in enumerate(self.lam[b])
if l is not None])
model = np.dot(np.hstack(
[self.X(n, m1) for n, l in enumerate(self.lam[b])
if l is not None]), w_pld)
model -= np.nanmedian(model)
return model |
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc) | Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)] | Below is the the instruction that describes the task:
### Input:
Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
### Response:
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc) |
def media(self):
"""
Combines media of both components and adds a small script that unchecks
the clear box, when a value in any wrapped input is modified.
"""
return self.widget.media + self.checkbox.media + Media(self.Media) | Combines media of both components and adds a small script that unchecks
the clear box, when a value in any wrapped input is modified. | Below is the the instruction that describes the task:
### Input:
Combines media of both components and adds a small script that unchecks
the clear box, when a value in any wrapped input is modified.
### Response:
def media(self):
"""
Combines media of both components and adds a small script that unchecks
the clear box, when a value in any wrapped input is modified.
"""
return self.widget.media + self.checkbox.media + Media(self.Media) |
def up(force=True, env=None, **kwargs):
"Starts a new experiment"
inventory = os.path.join(os.getcwd(), "hosts")
conf = Configuration.from_dictionnary(provider_conf)
provider = Enos_vagrant(conf)
roles, networks = provider.init()
check_networks(roles, networks)
env["roles"] = roles
env["networks"] = networks | Starts a new experiment | Below is the the instruction that describes the task:
### Input:
Starts a new experiment
### Response:
def up(force=True, env=None, **kwargs):
"Starts a new experiment"
inventory = os.path.join(os.getcwd(), "hosts")
conf = Configuration.from_dictionnary(provider_conf)
provider = Enos_vagrant(conf)
roles, networks = provider.init()
check_networks(roles, networks)
env["roles"] = roles
env["networks"] = networks |
def drop_indexes(self):
"""Delete all indexes for the database"""
LOG.warning("Dropping all indexe")
for collection_name in INDEXES:
LOG.warning("Dropping all indexes for collection name %s", collection_name)
self.db[collection_name].drop_indexes() | Delete all indexes for the database | Below is the the instruction that describes the task:
### Input:
Delete all indexes for the database
### Response:
def drop_indexes(self):
"""Delete all indexes for the database"""
LOG.warning("Dropping all indexe")
for collection_name in INDEXES:
LOG.warning("Dropping all indexes for collection name %s", collection_name)
self.db[collection_name].drop_indexes() |
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs) | Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values | Below is the the instruction that describes the task:
### Input:
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
### Response:
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs) |
def computeStatistics(tped, tfam, snps):
"""Computes the completion and concordance of each SNPs.
:param tped: a representation of the ``tped``.
:param tfam: a representation of the ``tfam``
:param snps: the position of the duplicated markers in the ``tped``.
:type tped: numpy.array
:type tfam: list
:type snps: dict
:returns: a tuple containing the completion of duplicated markers
(:py:class:`numpy.array`) as first element, and the concordance
(:py:class:`dict`) of duplicated markers, as last element.
A marker's completion is compute using this formula (where :math:`G_i` is
the set of genotypes for the marker :math:`i`):
.. math::
Completion_i = \\frac{||g \\in G_i \\textrm{ where } g \\neq 0||}
{||G_i||}
The pairwise concordance between duplicated markers is compute as follow
(where :math:`G_i` and :math:`G_j` are the sets of genotypes for markers
:math:`i` and :math:`j`, respectively):
.. math::
Concordance_{i,j} = \\frac{
||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0||
}{
||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0||
}
Hence, we only computes the numerators and denominators of the completion
and concordance, for future reference.
.. note::
When the genotypes are not comparable, the function tries to flip one
of the genotype to see if it becomes comparable.
"""
# The completion data type
completion = np.array([[0 for i in xrange(len(tped))],
[0 for i in xrange(len(tped))]])
# The concordance data type
concordance = {}
for snpID in snps.keys():
nbDup = len(snps[snpID])
concordance[snpID] = [
np.asmatrix(np.zeros((nbDup, nbDup), dtype=int)),
np.asmatrix(np.zeros((nbDup, nbDup), dtype=int))
]
# The women and the no sex
menIndex = np.where(tfam[:, 4] == "1")
womenIndex = np.where(tfam[:, 4] == "2")
noSexIndex = np.where(tfam[:, 4] == "0")
for snpID, indexes in snps.iteritems():
nbDup = len(indexes)
currGenotypes = tped[indexes, 4:]
chromosome, position = snpID
# if chromosome == "24":
# # Remove the heterozygous men
# menToRemove = getIndexOfHeteroMen(currGenotypes, menIndex)
# # Remove the women and the no sex
# currGenotypes = np.delete(currGenotypes,
# np.hstack((womenIndex, noSexIndex,
# menToRemove)), 1)
# elif chromosome == "23":
# # Remove the heterozygous men
# menToRemove = getIndexOfHeteroMen(currGenotypes, menIndex)
# # Remove the no sex
# currGenotypes = np.delete(currGenotypes,
# np.hstack((noSexIndex, menToRemove)),
# 1)
for i in xrange(nbDup):
# Compute completion here
completion[0][indexes[i]] = len(
np.where(currGenotypes[i] != "0 0")[0]
)
completion[1][indexes[i]] = len(currGenotypes[i])
for j in xrange(i+1, nbDup):
# Compute concordance here
# Removing samples with at least one null genotype
nullGenotypeIndexes = np.where(
np.any(currGenotypes[[i, j]] == "0 0", 0)
)
subGenotypes = np.delete(
currGenotypes,
nullGenotypeIndexes,
1,
)
# Finding the errors in the subseted genotypes
errorIndexes = np.where(subGenotypes[i] != subGenotypes[j])[0]
nbDiff = len(errorIndexes)
for k in errorIndexes:
# Getting the genotypes
genotype1 = set(subGenotypes[i, k].split(" "))
genotype2 = set(subGenotypes[j, k].split(" "))
# Checking for flips
if len(genotype1) == len(genotype2):
# Both have the same number of different alleles,
# so they might be flipped
genotype2 = flipGenotype(genotype2)
if genotype1 == genotype2:
# The genotypes are equivalent after the flip
nbDiff -= 1
# Updating the concordance
nbTot = len(subGenotypes[i])
concordance[snpID][0][i, j] = nbTot - nbDiff
concordance[snpID][0][j, i] = nbTot - nbDiff
if nbTot == 0:
# We will have a division by 0...
nbTot = 1
concordance[snpID][1][i, j] = nbTot
concordance[snpID][1][j, i] = nbTot
for snpID in concordance.iterkeys():
for i in range(len(concordance[snpID][0])):
concordance[snpID][0][i, i] = 1
concordance[snpID][1][i, i] = 1
return completion, concordance | Computes the completion and concordance of each SNPs.
:param tped: a representation of the ``tped``.
:param tfam: a representation of the ``tfam``
:param snps: the position of the duplicated markers in the ``tped``.
:type tped: numpy.array
:type tfam: list
:type snps: dict
:returns: a tuple containing the completion of duplicated markers
(:py:class:`numpy.array`) as first element, and the concordance
(:py:class:`dict`) of duplicated markers, as last element.
A marker's completion is compute using this formula (where :math:`G_i` is
the set of genotypes for the marker :math:`i`):
.. math::
Completion_i = \\frac{||g \\in G_i \\textrm{ where } g \\neq 0||}
{||G_i||}
The pairwise concordance between duplicated markers is compute as follow
(where :math:`G_i` and :math:`G_j` are the sets of genotypes for markers
:math:`i` and :math:`j`, respectively):
.. math::
Concordance_{i,j} = \\frac{
||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0||
}{
||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0||
}
Hence, we only computes the numerators and denominators of the completion
and concordance, for future reference.
.. note::
When the genotypes are not comparable, the function tries to flip one
of the genotype to see if it becomes comparable. | Below is the the instruction that describes the task:
### Input:
Computes the completion and concordance of each SNPs.
:param tped: a representation of the ``tped``.
:param tfam: a representation of the ``tfam``
:param snps: the position of the duplicated markers in the ``tped``.
:type tped: numpy.array
:type tfam: list
:type snps: dict
:returns: a tuple containing the completion of duplicated markers
(:py:class:`numpy.array`) as first element, and the concordance
(:py:class:`dict`) of duplicated markers, as last element.
A marker's completion is compute using this formula (where :math:`G_i` is
the set of genotypes for the marker :math:`i`):
.. math::
Completion_i = \\frac{||g \\in G_i \\textrm{ where } g \\neq 0||}
{||G_i||}
The pairwise concordance between duplicated markers is compute as follow
(where :math:`G_i` and :math:`G_j` are the sets of genotypes for markers
:math:`i` and :math:`j`, respectively):
.. math::
Concordance_{i,j} = \\frac{
||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0||
}{
||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0||
}
Hence, we only computes the numerators and denominators of the completion
and concordance, for future reference.
.. note::
When the genotypes are not comparable, the function tries to flip one
of the genotype to see if it becomes comparable.
### Response:
def computeStatistics(tped, tfam, snps):
"""Computes the completion and concordance of each SNPs.
:param tped: a representation of the ``tped``.
:param tfam: a representation of the ``tfam``
:param snps: the position of the duplicated markers in the ``tped``.
:type tped: numpy.array
:type tfam: list
:type snps: dict
:returns: a tuple containing the completion of duplicated markers
(:py:class:`numpy.array`) as first element, and the concordance
(:py:class:`dict`) of duplicated markers, as last element.
A marker's completion is compute using this formula (where :math:`G_i` is
the set of genotypes for the marker :math:`i`):
.. math::
Completion_i = \\frac{||g \\in G_i \\textrm{ where } g \\neq 0||}
{||G_i||}
The pairwise concordance between duplicated markers is compute as follow
(where :math:`G_i` and :math:`G_j` are the sets of genotypes for markers
:math:`i` and :math:`j`, respectively):
.. math::
Concordance_{i,j} = \\frac{
||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0||
}{
||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0||
}
Hence, we only computes the numerators and denominators of the completion
and concordance, for future reference.
.. note::
When the genotypes are not comparable, the function tries to flip one
of the genotype to see if it becomes comparable.
"""
# The completion data type
completion = np.array([[0 for i in xrange(len(tped))],
[0 for i in xrange(len(tped))]])
# The concordance data type
concordance = {}
for snpID in snps.keys():
nbDup = len(snps[snpID])
concordance[snpID] = [
np.asmatrix(np.zeros((nbDup, nbDup), dtype=int)),
np.asmatrix(np.zeros((nbDup, nbDup), dtype=int))
]
# The women and the no sex
menIndex = np.where(tfam[:, 4] == "1")
womenIndex = np.where(tfam[:, 4] == "2")
noSexIndex = np.where(tfam[:, 4] == "0")
for snpID, indexes in snps.iteritems():
nbDup = len(indexes)
currGenotypes = tped[indexes, 4:]
chromosome, position = snpID
# if chromosome == "24":
# # Remove the heterozygous men
# menToRemove = getIndexOfHeteroMen(currGenotypes, menIndex)
# # Remove the women and the no sex
# currGenotypes = np.delete(currGenotypes,
# np.hstack((womenIndex, noSexIndex,
# menToRemove)), 1)
# elif chromosome == "23":
# # Remove the heterozygous men
# menToRemove = getIndexOfHeteroMen(currGenotypes, menIndex)
# # Remove the no sex
# currGenotypes = np.delete(currGenotypes,
# np.hstack((noSexIndex, menToRemove)),
# 1)
for i in xrange(nbDup):
# Compute completion here
completion[0][indexes[i]] = len(
np.where(currGenotypes[i] != "0 0")[0]
)
completion[1][indexes[i]] = len(currGenotypes[i])
for j in xrange(i+1, nbDup):
# Compute concordance here
# Removing samples with at least one null genotype
nullGenotypeIndexes = np.where(
np.any(currGenotypes[[i, j]] == "0 0", 0)
)
subGenotypes = np.delete(
currGenotypes,
nullGenotypeIndexes,
1,
)
# Finding the errors in the subseted genotypes
errorIndexes = np.where(subGenotypes[i] != subGenotypes[j])[0]
nbDiff = len(errorIndexes)
for k in errorIndexes:
# Getting the genotypes
genotype1 = set(subGenotypes[i, k].split(" "))
genotype2 = set(subGenotypes[j, k].split(" "))
# Checking for flips
if len(genotype1) == len(genotype2):
# Both have the same number of different alleles,
# so they might be flipped
genotype2 = flipGenotype(genotype2)
if genotype1 == genotype2:
# The genotypes are equivalent after the flip
nbDiff -= 1
# Updating the concordance
nbTot = len(subGenotypes[i])
concordance[snpID][0][i, j] = nbTot - nbDiff
concordance[snpID][0][j, i] = nbTot - nbDiff
if nbTot == 0:
# We will have a division by 0...
nbTot = 1
concordance[snpID][1][i, j] = nbTot
concordance[snpID][1][j, i] = nbTot
for snpID in concordance.iterkeys():
for i in range(len(concordance[snpID][0])):
concordance[snpID][0][i, i] = 1
concordance[snpID][1][i, i] = 1
return completion, concordance |
def send_message(self,body):
"""
Send a message to the room.
:Parameters:
- `body`: the message body.
:Types:
- `body`: `unicode`
"""
m=Message(to_jid=self.room_jid.bare(),stanza_type="groupchat",body=body)
self.manager.stream.send(m) | Send a message to the room.
:Parameters:
- `body`: the message body.
:Types:
- `body`: `unicode` | Below is the the instruction that describes the task:
### Input:
Send a message to the room.
:Parameters:
- `body`: the message body.
:Types:
- `body`: `unicode`
### Response:
def send_message(self,body):
"""
Send a message to the room.
:Parameters:
- `body`: the message body.
:Types:
- `body`: `unicode`
"""
m=Message(to_jid=self.room_jid.bare(),stanza_type="groupchat",body=body)
self.manager.stream.send(m) |
def parse_declaration_expressn_fncall(self, name, params, es):
"""
Parses out cromwell's built-in function calls.
Some of these are special
and need minor adjustments, for example length(), which is equivalent to
python's len() function. Or sub, which is equivalent to re.sub(), but
needs a rearrangement of input variables.
Known to be supported: sub, size, read_tsv, length, select_first.
:param name:
:param params:
:param es:
:return:
"""
# name of the function
if isinstance(name, wdl_parser.Terminal):
if name.str:
# use python's built-in for length()
if name.source_string == 'length':
es = es + 'len('
elif name.source_string == 'stdout':
return es + 'stdout'
else:
es = es + name.source_string + '('
else:
raise NotImplementedError
elif isinstance(name, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(name, wdl_parser.AstList):
raise NotImplementedError
# use python's re.sub() for sub()
if name.source_string == 'sub':
es_params = self.parse_declaration_expressn_fncall_SUBparams(params)
else:
es_params = self.parse_declaration_expressn_fncall_normalparams(params)
if name.source_string == 'glob':
return es + es_params + ', tempDir)'
elif name.source_string == 'size':
return es + es_params + ', fileStore=fileStore)'
else:
return es + es_params + ')' | Parses out cromwell's built-in function calls.
Some of these are special
and need minor adjustments, for example length(), which is equivalent to
python's len() function. Or sub, which is equivalent to re.sub(), but
needs a rearrangement of input variables.
Known to be supported: sub, size, read_tsv, length, select_first.
:param name:
:param params:
:param es:
:return: | Below is the the instruction that describes the task:
### Input:
Parses out cromwell's built-in function calls.
Some of these are special
and need minor adjustments, for example length(), which is equivalent to
python's len() function. Or sub, which is equivalent to re.sub(), but
needs a rearrangement of input variables.
Known to be supported: sub, size, read_tsv, length, select_first.
:param name:
:param params:
:param es:
:return:
### Response:
def parse_declaration_expressn_fncall(self, name, params, es):
"""
Parses out cromwell's built-in function calls.
Some of these are special
and need minor adjustments, for example length(), which is equivalent to
python's len() function. Or sub, which is equivalent to re.sub(), but
needs a rearrangement of input variables.
Known to be supported: sub, size, read_tsv, length, select_first.
:param name:
:param params:
:param es:
:return:
"""
# name of the function
if isinstance(name, wdl_parser.Terminal):
if name.str:
# use python's built-in for length()
if name.source_string == 'length':
es = es + 'len('
elif name.source_string == 'stdout':
return es + 'stdout'
else:
es = es + name.source_string + '('
else:
raise NotImplementedError
elif isinstance(name, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(name, wdl_parser.AstList):
raise NotImplementedError
# use python's re.sub() for sub()
if name.source_string == 'sub':
es_params = self.parse_declaration_expressn_fncall_SUBparams(params)
else:
es_params = self.parse_declaration_expressn_fncall_normalparams(params)
if name.source_string == 'glob':
return es + es_params + ', tempDir)'
elif name.source_string == 'size':
return es + es_params + ', fileStore=fileStore)'
else:
return es + es_params + ')' |
def push(self, new_scope=None):
"""Create a new scope
:returns: TODO
"""
if new_scope is None:
new_scope = {
"types": {},
"vars": {}
}
self._curr_scope = new_scope
self._dlog("pushing new scope, scope level = {}".format(self.level()))
self._scope_stack.append(self._curr_scope) | Create a new scope
:returns: TODO | Below is the the instruction that describes the task:
### Input:
Create a new scope
:returns: TODO
### Response:
def push(self, new_scope=None):
"""Create a new scope
:returns: TODO
"""
if new_scope is None:
new_scope = {
"types": {},
"vars": {}
}
self._curr_scope = new_scope
self._dlog("pushing new scope, scope level = {}".format(self.level()))
self._scope_stack.append(self._curr_scope) |
def loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed.
'''
sum = logp_of_set(self.children)
if self.verbose > 2:
print_('\t' + self._id + ' Current log-likelihood ', sum)
return sum | The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed. | Below is the the instruction that describes the task:
### Input:
The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed.
### Response:
def loglike(self):
'''
The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed.
'''
sum = logp_of_set(self.children)
if self.verbose > 2:
print_('\t' + self._id + ' Current log-likelihood ', sum)
return sum |
def _update_search_index(*, instance, index, update_fields):
"""Process index / update search index update actions."""
if not _in_search_queryset(instance=instance, index=index):
logger.debug(
"Object (%r) is not in search queryset, ignoring update.", instance
)
return
try:
if update_fields:
pre_update.send(
sender=instance.__class__,
instance=instance,
index=index,
update_fields=update_fields,
)
if settings.auto_sync(instance):
instance.update_search_document(
index=index, update_fields=update_fields
)
else:
pre_index.send(sender=instance.__class__, instance=instance, index=index)
if settings.auto_sync(instance):
instance.index_search_document(index=index)
except Exception:
logger.exception("Error handling 'post_save' signal for %s", instance) | Process index / update search index update actions. | Below is the the instruction that describes the task:
### Input:
Process index / update search index update actions.
### Response:
def _update_search_index(*, instance, index, update_fields):
"""Process index / update search index update actions."""
if not _in_search_queryset(instance=instance, index=index):
logger.debug(
"Object (%r) is not in search queryset, ignoring update.", instance
)
return
try:
if update_fields:
pre_update.send(
sender=instance.__class__,
instance=instance,
index=index,
update_fields=update_fields,
)
if settings.auto_sync(instance):
instance.update_search_document(
index=index, update_fields=update_fields
)
else:
pre_index.send(sender=instance.__class__, instance=instance, index=index)
if settings.auto_sync(instance):
instance.index_search_document(index=index)
except Exception:
logger.exception("Error handling 'post_save' signal for %s", instance) |
def up(self, path, fileobject, upload_callback=None, resume_offset=None):
"Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API"
"""
*** WHAT DID I DO?: created file
***
POST https://up.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/testFolder/testFile.txt?cphash=d41d8cd98f00b204e9800998ecf8427e HTTP/1.1
User-Agent: Desktop_Jottacloud 3.0.22.203 Windows_8 6.2.9200 x86_64
Authorization: Basic ******************
X-JottaAPIVersion: 2.2
X-Jfs-DeviceName: **CENSORED**
JCreated: 2014-10-26T12:33:09Z+00:00
JModified: 2014-10-26T12:33:09Z+00:00
JMd5: d41d8cd98f00b204e9800998ecf8427e
JSize: 0
jx_csid: dOq1NCRer6uxuR/bFxihasj4QzBU3Tn7S2jVF1CE71YW1fGhxPFYYsw2T0XYjnJBtxKQzhWixmg+u5kp8bJtvMpIFHbhSDmPPSk+PVBf2UdFhXxli4YEII9a97eO4XBfn5QWAV1LJ2Z9l59jmnLkJQgfOyexkuQbxHdSLgQPXu8=
jx_lisence: M1v3p31oQf2OXvyAn2GvfS2I2oiMXrw+cofuMVHHI/2K+wlxhj22VkON6fN6fJMsGNcMzvcFYfmKPgL0Yf8TCO5A/6ULk6N8LctY3+fPegx+Jgbyc4hh0IXwnOdqa+UZ6Lg1ub4VXr5XnX3P3IxeVDg0VbcJnzv4TbFA+oMXmfM=
Content-Type: application/octet-stream
Content-Length: 0
Connection: Keep-Alive
Accept-Encoding: gzip
Accept-Language: nb-NO,en,*
Host: up.jottacloud.com
"""
url = path.replace('www.jottacloud.com', 'up.jottacloud.com')
# Calculate file length
fileobject.seek(0,2)
contentlen = fileobject.tell()
# Rewind read head to correct offset
# If we're resuming an incomplete upload, continue from that offset
try:
fileobject.seek(resume_offset)
except TypeError as e:
if resume_offset is None:
fileobject.seek(0)
except IOError as e:
log.exception(e)
log.warning('Could not seek to file offset %r, re-starting upload of %r from 0',
resume_offset,
url)
fileobject.seek(0)
# Calculate file md5 hash
md5hash = calculate_md5(fileobject)
log.debug('posting content (len %s, hash %s) to url %r', contentlen, md5hash, url)
try:
mtime = os.path.getmtime(fileobject.name)
timestamp = datetime.datetime.fromtimestamp(mtime).isoformat()
except Exception as e:
if hasattr(fileobject, 'name'):
log.exception('Problems getting mtime from fileobjet: %r', e)
timestamp = datetime.datetime.now().isoformat()
params = {'cphash': md5hash}
m = requests_toolbelt.MultipartEncoder({
'md5': ('', md5hash),
'modified': ('', timestamp),
'created': ('', timestamp),
'file': (os.path.basename(url), fileobject, 'application/octet-stream'),
})
headers = {'JMd5':md5hash,
'JCreated': timestamp,
'JModified': timestamp,
'X-Jfs-DeviceName': 'Jotta',
'JSize': str(contentlen), # headers have to be strings or bytes , cf #122
'jx_csid': '',
'jx_lisence': '',
'content-type': m.content_type,
}
fileobject.seek(0) # rewind read index for requests.post
files = {'md5': ('', md5hash),
'modified': ('', timestamp),
'created': ('', timestamp),
'file': (os.path.basename(url), fileobject, 'application/octet-stream')}
return self.post(url, None, files=files, params=params, extra_headers=headers, upload_callback=upload_callback) | Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API | Below is the the instruction that describes the task:
### Input:
Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API
### Response:
def up(self, path, fileobject, upload_callback=None, resume_offset=None):
"Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API"
"""
*** WHAT DID I DO?: created file
***
POST https://up.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/testFolder/testFile.txt?cphash=d41d8cd98f00b204e9800998ecf8427e HTTP/1.1
User-Agent: Desktop_Jottacloud 3.0.22.203 Windows_8 6.2.9200 x86_64
Authorization: Basic ******************
X-JottaAPIVersion: 2.2
X-Jfs-DeviceName: **CENSORED**
JCreated: 2014-10-26T12:33:09Z+00:00
JModified: 2014-10-26T12:33:09Z+00:00
JMd5: d41d8cd98f00b204e9800998ecf8427e
JSize: 0
jx_csid: dOq1NCRer6uxuR/bFxihasj4QzBU3Tn7S2jVF1CE71YW1fGhxPFYYsw2T0XYjnJBtxKQzhWixmg+u5kp8bJtvMpIFHbhSDmPPSk+PVBf2UdFhXxli4YEII9a97eO4XBfn5QWAV1LJ2Z9l59jmnLkJQgfOyexkuQbxHdSLgQPXu8=
jx_lisence: M1v3p31oQf2OXvyAn2GvfS2I2oiMXrw+cofuMVHHI/2K+wlxhj22VkON6fN6fJMsGNcMzvcFYfmKPgL0Yf8TCO5A/6ULk6N8LctY3+fPegx+Jgbyc4hh0IXwnOdqa+UZ6Lg1ub4VXr5XnX3P3IxeVDg0VbcJnzv4TbFA+oMXmfM=
Content-Type: application/octet-stream
Content-Length: 0
Connection: Keep-Alive
Accept-Encoding: gzip
Accept-Language: nb-NO,en,*
Host: up.jottacloud.com
"""
url = path.replace('www.jottacloud.com', 'up.jottacloud.com')
# Calculate file length
fileobject.seek(0,2)
contentlen = fileobject.tell()
# Rewind read head to correct offset
# If we're resuming an incomplete upload, continue from that offset
try:
fileobject.seek(resume_offset)
except TypeError as e:
if resume_offset is None:
fileobject.seek(0)
except IOError as e:
log.exception(e)
log.warning('Could not seek to file offset %r, re-starting upload of %r from 0',
resume_offset,
url)
fileobject.seek(0)
# Calculate file md5 hash
md5hash = calculate_md5(fileobject)
log.debug('posting content (len %s, hash %s) to url %r', contentlen, md5hash, url)
try:
mtime = os.path.getmtime(fileobject.name)
timestamp = datetime.datetime.fromtimestamp(mtime).isoformat()
except Exception as e:
if hasattr(fileobject, 'name'):
log.exception('Problems getting mtime from fileobjet: %r', e)
timestamp = datetime.datetime.now().isoformat()
params = {'cphash': md5hash}
m = requests_toolbelt.MultipartEncoder({
'md5': ('', md5hash),
'modified': ('', timestamp),
'created': ('', timestamp),
'file': (os.path.basename(url), fileobject, 'application/octet-stream'),
})
headers = {'JMd5':md5hash,
'JCreated': timestamp,
'JModified': timestamp,
'X-Jfs-DeviceName': 'Jotta',
'JSize': str(contentlen), # headers have to be strings or bytes , cf #122
'jx_csid': '',
'jx_lisence': '',
'content-type': m.content_type,
}
fileobject.seek(0) # rewind read index for requests.post
files = {'md5': ('', md5hash),
'modified': ('', timestamp),
'created': ('', timestamp),
'file': (os.path.basename(url), fileobject, 'application/octet-stream')}
return self.post(url, None, files=files, params=params, extra_headers=headers, upload_callback=upload_callback) |
def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row | Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features. | Below is the the instruction that describes the task:
### Input:
Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
### Response:
def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row |
def get_variable_grammar(self):
"""
A method that returns variable grammar
"""
# Defining a expression for valid word
word_expr = Word(alphanums + '_' + '-')
word_expr2 = Word(initChars=printables, excludeChars=['{', '}', ',', ' '])
name_expr = Suppress('variable') + word_expr + Suppress('{')
state_expr = ZeroOrMore(word_expr2 + Optional(Suppress(",")))
# Defining a variable state expression
variable_state_expr = Suppress('type') + Suppress(word_expr) + Suppress('[') + Suppress(Word(nums)) + \
Suppress(']') + Suppress('{') + Group(state_expr) + Suppress('}') + Suppress(';')
# variable states is of the form type description [args] { val1, val2 }; (comma may or may not be present)
property_expr = Suppress('property') + CharsNotIn(';') + Suppress(';') # Creating a expr to find property
return name_expr, variable_state_expr, property_expr | A method that returns variable grammar | Below is the the instruction that describes the task:
### Input:
A method that returns variable grammar
### Response:
def get_variable_grammar(self):
"""
A method that returns variable grammar
"""
# Defining a expression for valid word
word_expr = Word(alphanums + '_' + '-')
word_expr2 = Word(initChars=printables, excludeChars=['{', '}', ',', ' '])
name_expr = Suppress('variable') + word_expr + Suppress('{')
state_expr = ZeroOrMore(word_expr2 + Optional(Suppress(",")))
# Defining a variable state expression
variable_state_expr = Suppress('type') + Suppress(word_expr) + Suppress('[') + Suppress(Word(nums)) + \
Suppress(']') + Suppress('{') + Group(state_expr) + Suppress('}') + Suppress(';')
# variable states is of the form type description [args] { val1, val2 }; (comma may or may not be present)
property_expr = Suppress('property') + CharsNotIn(';') + Suppress(';') # Creating a expr to find property
return name_expr, variable_state_expr, property_expr |
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y | np.array: The grid points in y. | Below is the the instruction that describes the task:
### Input:
np.array: The grid points in y.
### Response:
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y |
def add_moveTo(self, x, y):
"""Return a newly created `a:moveTo` subtree with point *(x, y)*.
The new `a:moveTo` element is appended to this `a:path` element.
"""
moveTo = self._add_moveTo()
pt = moveTo._add_pt()
pt.x, pt.y = x, y
return moveTo | Return a newly created `a:moveTo` subtree with point *(x, y)*.
The new `a:moveTo` element is appended to this `a:path` element. | Below is the the instruction that describes the task:
### Input:
Return a newly created `a:moveTo` subtree with point *(x, y)*.
The new `a:moveTo` element is appended to this `a:path` element.
### Response:
def add_moveTo(self, x, y):
"""Return a newly created `a:moveTo` subtree with point *(x, y)*.
The new `a:moveTo` element is appended to this `a:path` element.
"""
moveTo = self._add_moveTo()
pt = moveTo._add_pt()
pt.x, pt.y = x, y
return moveTo |
def bulk_add(self, item_id, ref_id=None, tags=None,
time=None, title=None, url=None):
"""
Add an item to list
See: https://getpocket.com/developer/docs/v3/modify
:param item_id: int
:param ref_id: tweet_id
:param tags: list of tags
:param time: time of action
:param title: given title
:param url: item url
:return: self for chaining
:rtype: Pocket
"""
self._add_action('add')
return self | Add an item to list
See: https://getpocket.com/developer/docs/v3/modify
:param item_id: int
:param ref_id: tweet_id
:param tags: list of tags
:param time: time of action
:param title: given title
:param url: item url
:return: self for chaining
:rtype: Pocket | Below is the the instruction that describes the task:
### Input:
Add an item to list
See: https://getpocket.com/developer/docs/v3/modify
:param item_id: int
:param ref_id: tweet_id
:param tags: list of tags
:param time: time of action
:param title: given title
:param url: item url
:return: self for chaining
:rtype: Pocket
### Response:
def bulk_add(self, item_id, ref_id=None, tags=None,
time=None, title=None, url=None):
"""
Add an item to list
See: https://getpocket.com/developer/docs/v3/modify
:param item_id: int
:param ref_id: tweet_id
:param tags: list of tags
:param time: time of action
:param title: given title
:param url: item url
:return: self for chaining
:rtype: Pocket
"""
self._add_action('add')
return self |
async def parse_release_results(soup):
"""
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
releases = []
for item in soup:
child = list(item.children)
temp_rel = {'date': None, 'ages': None, 'platform': None, 'name': None}
temp_rel['date'] = child[0].string
temp_rel['ages'] = child[1].string
temp_rel['platform'] = child[2].abbr.get('title')
temp_rel['name'] = child[3].a.string
releases.append(temp_rel)
del temp_rel
return releases | Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name. | Below is the the instruction that describes the task:
### Input:
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
### Response:
async def parse_release_results(soup):
"""
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
releases = []
for item in soup:
child = list(item.children)
temp_rel = {'date': None, 'ages': None, 'platform': None, 'name': None}
temp_rel['date'] = child[0].string
temp_rel['ages'] = child[1].string
temp_rel['platform'] = child[2].abbr.get('title')
temp_rel['name'] = child[3].a.string
releases.append(temp_rel)
del temp_rel
return releases |
def delete_agent_cloud(self, agent_cloud_id):
"""DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>`
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
response = self._send(http_method='DELETE',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TaskAgentCloud', response) | DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>` | Below is the the instruction that describes the task:
### Input:
DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>`
### Response:
def delete_agent_cloud(self, agent_cloud_id):
"""DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>`
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
response = self._send(http_method='DELETE',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TaskAgentCloud', response) |
def get_scrollbar_value_height(self):
"""Return the value span height of the scrollbar"""
vsb = self.editor.verticalScrollBar()
return vsb.maximum()-vsb.minimum()+vsb.pageStep() | Return the value span height of the scrollbar | Below is the the instruction that describes the task:
### Input:
Return the value span height of the scrollbar
### Response:
def get_scrollbar_value_height(self):
"""Return the value span height of the scrollbar"""
vsb = self.editor.verticalScrollBar()
return vsb.maximum()-vsb.minimum()+vsb.pageStep() |
def _is_valid_dataset(config_value):
'''Datasets must be of form "project.dataset" or "dataset"
'''
return re.match(
# regex matches: project.table -- OR -- table
r'^' + RE_PROJECT + r'\.' + RE_DS_TABLE + r'$|^' + RE_DS_TABLE + r'$',
config_value,
) | Datasets must be of form "project.dataset" or "dataset" | Below is the the instruction that describes the task:
### Input:
Datasets must be of form "project.dataset" or "dataset"
### Response:
def _is_valid_dataset(config_value):
'''Datasets must be of form "project.dataset" or "dataset"
'''
return re.match(
# regex matches: project.table -- OR -- table
r'^' + RE_PROJECT + r'\.' + RE_DS_TABLE + r'$|^' + RE_DS_TABLE + r'$',
config_value,
) |
def get_inspector():
"""Reuse inspector"""
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR | Reuse inspector | Below is the the instruction that describes the task:
### Input:
Reuse inspector
### Response:
def get_inspector():
"""Reuse inspector"""
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR |
def autoconfig_url_from_preferences():
"""
Get the PAC ``AutoConfigURL`` value from the macOS System Preferences.
This setting is visible as the "URL" field in
System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotDarwinError: If called on a non-macOS/OSX platform.
"""
if not ON_DARWIN:
raise NotDarwinError()
try:
config = SystemConfiguration.SCDynamicStoreCopyProxies(None)
except AttributeError:
return # Key or value not found.
if all(('ProxyAutoConfigEnable' in config,
'ProxyAutoConfigURLString' in config,
not config.get('ProxyAutoDiscoveryEnable', 0))):
# Only return a value if it is enabled, not empty, and auto discovery is disabled.
return str(config['ProxyAutoConfigURLString']) | Get the PAC ``AutoConfigURL`` value from the macOS System Preferences.
This setting is visible as the "URL" field in
System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotDarwinError: If called on a non-macOS/OSX platform. | Below is the the instruction that describes the task:
### Input:
Get the PAC ``AutoConfigURL`` value from the macOS System Preferences.
This setting is visible as the "URL" field in
System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotDarwinError: If called on a non-macOS/OSX platform.
### Response:
def autoconfig_url_from_preferences():
"""
Get the PAC ``AutoConfigURL`` value from the macOS System Preferences.
This setting is visible as the "URL" field in
System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration.
:return: The value from the registry, or None if the value isn't configured or available.
Note that it may be local filesystem path instead of a URL.
:rtype: str|None
:raises NotDarwinError: If called on a non-macOS/OSX platform.
"""
if not ON_DARWIN:
raise NotDarwinError()
try:
config = SystemConfiguration.SCDynamicStoreCopyProxies(None)
except AttributeError:
return # Key or value not found.
if all(('ProxyAutoConfigEnable' in config,
'ProxyAutoConfigURLString' in config,
not config.get('ProxyAutoDiscoveryEnable', 0))):
# Only return a value if it is enabled, not empty, and auto discovery is disabled.
return str(config['ProxyAutoConfigURLString']) |
def Vector(self, off):
"""Vector retrieves the start of data of the vector whose offset is
stored at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
x = off + self.Get(N.UOffsetTFlags, off)
# data starts after metadata containing the vector length
x += N.UOffsetTFlags.bytewidth
return x | Vector retrieves the start of data of the vector whose offset is
stored at "off" in this object. | Below is the the instruction that describes the task:
### Input:
Vector retrieves the start of data of the vector whose offset is
stored at "off" in this object.
### Response:
def Vector(self, off):
"""Vector retrieves the start of data of the vector whose offset is
stored at "off" in this object."""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
x = off + self.Get(N.UOffsetTFlags, off)
# data starts after metadata containing the vector length
x += N.UOffsetTFlags.bytewidth
return x |
def message(self, message: Message, rule: Optional[int]) -> None:
"""
Add a message to the appropriate list of messages. If `rule` refers
to a valid id range for a go rule, the message is entered in a list
keyed by the full gorule-{id}. Otherwise, if `rule` is None, or
outside the id range, then we put this in the catch-all "other"
keyed list of messages.
"""
rule_id = self._rule_id(rule)
if rule_id not in self.messages:
self.messages[rule_id] = []
if len(self.messages[rule_id]) < self._rule_message_cap:
self.messages[rule_id].append(message) | Add a message to the appropriate list of messages. If `rule` refers
to a valid id range for a go rule, the message is entered in a list
keyed by the full gorule-{id}. Otherwise, if `rule` is None, or
outside the id range, then we put this in the catch-all "other"
keyed list of messages. | Below is the the instruction that describes the task:
### Input:
Add a message to the appropriate list of messages. If `rule` refers
to a valid id range for a go rule, the message is entered in a list
keyed by the full gorule-{id}. Otherwise, if `rule` is None, or
outside the id range, then we put this in the catch-all "other"
keyed list of messages.
### Response:
def message(self, message: Message, rule: Optional[int]) -> None:
"""
Add a message to the appropriate list of messages. If `rule` refers
to a valid id range for a go rule, the message is entered in a list
keyed by the full gorule-{id}. Otherwise, if `rule` is None, or
outside the id range, then we put this in the catch-all "other"
keyed list of messages.
"""
rule_id = self._rule_id(rule)
if rule_id not in self.messages:
self.messages[rule_id] = []
if len(self.messages[rule_id]) < self._rule_message_cap:
self.messages[rule_id].append(message) |
def iter_items(self, start_key=None, end_key=None, reverse=False):
"""Iterates over the (key, value) items of the associated tree,
in ascending order if reverse is True, iterate in descending order,
reverse defaults to False"""
# optimized iterator (reduced method calls) - faster on CPython but slower on pypy
if self.is_empty():
return []
if reverse:
return self._iter_items_backward(start_key, end_key)
else:
return self._iter_items_forward(start_key, end_key) | Iterates over the (key, value) items of the associated tree,
in ascending order if reverse is True, iterate in descending order,
reverse defaults to False | Below is the the instruction that describes the task:
### Input:
Iterates over the (key, value) items of the associated tree,
in ascending order if reverse is True, iterate in descending order,
reverse defaults to False
### Response:
def iter_items(self, start_key=None, end_key=None, reverse=False):
"""Iterates over the (key, value) items of the associated tree,
in ascending order if reverse is True, iterate in descending order,
reverse defaults to False"""
# optimized iterator (reduced method calls) - faster on CPython but slower on pypy
if self.is_empty():
return []
if reverse:
return self._iter_items_backward(start_key, end_key)
else:
return self._iter_items_forward(start_key, end_key) |
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
) | Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data. | Below is the the instruction that describes the task:
### Input:
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
### Response:
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
) |
def parse_GFF_attribute_string(attrStr, extra_return_first_value=False):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
"""
if attrStr.endswith("\n"):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip(
itertools.count(),
_HTSeq.quotesafe_split(attrStr)):
if _re_attr_empty.match(attr):
continue
if attr.count('"') not in (0, 2):
raise ValueError(
"The attribute string seems to contain mismatched quotes.")
mo = _re_attr_main.match(attr)
if not mo:
raise ValueError("Failure parsing GFF attribute line")
val = mo.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
d[intern(mo.group(1))] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return (d, first_val)
else:
return d | Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID. | Below is the the instruction that describes the task:
### Input:
Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
### Response:
def parse_GFF_attribute_string(attrStr, extra_return_first_value=False):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
"""
if attrStr.endswith("\n"):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip(
itertools.count(),
_HTSeq.quotesafe_split(attrStr)):
if _re_attr_empty.match(attr):
continue
if attr.count('"') not in (0, 2):
raise ValueError(
"The attribute string seems to contain mismatched quotes.")
mo = _re_attr_main.match(attr)
if not mo:
raise ValueError("Failure parsing GFF attribute line")
val = mo.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
d[intern(mo.group(1))] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return (d, first_val)
else:
return d |
def hicpro_pairing_chart (self):
""" Generate Pairing chart """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Unique_paired_alignments'] = { 'color': '#005ce6', 'name': 'Uniquely Aligned' }
keys['Low_qual_pairs'] = { 'color': '#b97b35', 'name': 'Low Quality' }
keys['Pairs_with_singleton'] = { 'color': '#ff9933', 'name': 'Singleton' }
keys['Multiple_pairs_alignments'] = { 'color': '#e67300', 'name': 'Multi Aligned' }
keys['Unmapped_airs'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
# Config for the plot
config = {
'id': 'hicpro_pairing_stats_plot',
'title': 'HiC-Pro: Pairing Statistics',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.hicpro_data, keys, config) | Generate Pairing chart | Below is the the instruction that describes the task:
### Input:
Generate Pairing chart
### Response:
def hicpro_pairing_chart (self):
""" Generate Pairing chart """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Unique_paired_alignments'] = { 'color': '#005ce6', 'name': 'Uniquely Aligned' }
keys['Low_qual_pairs'] = { 'color': '#b97b35', 'name': 'Low Quality' }
keys['Pairs_with_singleton'] = { 'color': '#ff9933', 'name': 'Singleton' }
keys['Multiple_pairs_alignments'] = { 'color': '#e67300', 'name': 'Multi Aligned' }
keys['Unmapped_airs'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
# Config for the plot
config = {
'id': 'hicpro_pairing_stats_plot',
'title': 'HiC-Pro: Pairing Statistics',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.hicpro_data, keys, config) |
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq.getvalue()
self.cookedq.seek(0)
self.cookedq.truncate()
if not buf and self.eof and not self.rawq:
raise EOFError('telnet connection closed')
return buf | Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block. | Below is the the instruction that describes the task:
### Input:
Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
### Response:
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq.getvalue()
self.cookedq.seek(0)
self.cookedq.truncate()
if not buf and self.eof and not self.rawq:
raise EOFError('telnet connection closed')
return buf |
def lm_ffinal(freqs, damping_times, modes):
"""Return the maximum f_final of the modes given, with f_final the frequency
at which the amplitude falls to 1/1000 of the peak amplitude
"""
f_max = {}
for lmn in modes:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
for n in range(nmodes):
f_max['%d%d%d' %(l,m,n)] = qnm_freq_decay(freqs['%d%d%d' %(l,m,n)],
damping_times['%d%d%d' %(l,m,n)], 1./1000)
f_final = max(f_max.values())
if f_final > max_freq:
f_final = max_freq
return f_final | Return the maximum f_final of the modes given, with f_final the frequency
at which the amplitude falls to 1/1000 of the peak amplitude | Below is the the instruction that describes the task:
### Input:
Return the maximum f_final of the modes given, with f_final the frequency
at which the amplitude falls to 1/1000 of the peak amplitude
### Response:
def lm_ffinal(freqs, damping_times, modes):
"""Return the maximum f_final of the modes given, with f_final the frequency
at which the amplitude falls to 1/1000 of the peak amplitude
"""
f_max = {}
for lmn in modes:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
for n in range(nmodes):
f_max['%d%d%d' %(l,m,n)] = qnm_freq_decay(freqs['%d%d%d' %(l,m,n)],
damping_times['%d%d%d' %(l,m,n)], 1./1000)
f_final = max(f_max.values())
if f_final > max_freq:
f_final = max_freq
return f_final |
def bake(self):
"""
Bake an `ansible-lint` command so it's ready to execute and returns
None.
:return: None
"""
options = self.options
default_exclude_list = options.pop('default_exclude')
options_exclude_list = options.pop('exclude')
excludes = default_exclude_list + options_exclude_list
x_list = options.pop('x')
exclude_args = ['--exclude={}'.format(exclude) for exclude in excludes]
x_args = tuple(('-x', x) for x in x_list)
self._ansible_lint_command = sh.ansible_lint.bake(
options,
exclude_args,
sum(x_args, ()),
self._playbook,
_env=self.env,
_out=LOG.out,
_err=LOG.error) | Bake an `ansible-lint` command so it's ready to execute and returns
None.
:return: None | Below is the the instruction that describes the task:
### Input:
Bake an `ansible-lint` command so it's ready to execute and returns
None.
:return: None
### Response:
def bake(self):
"""
Bake an `ansible-lint` command so it's ready to execute and returns
None.
:return: None
"""
options = self.options
default_exclude_list = options.pop('default_exclude')
options_exclude_list = options.pop('exclude')
excludes = default_exclude_list + options_exclude_list
x_list = options.pop('x')
exclude_args = ['--exclude={}'.format(exclude) for exclude in excludes]
x_args = tuple(('-x', x) for x in x_list)
self._ansible_lint_command = sh.ansible_lint.bake(
options,
exclude_args,
sum(x_args, ()),
self._playbook,
_env=self.env,
_out=LOG.out,
_err=LOG.error) |
def fabs(x):
"""
Absolute value function
"""
if isinstance(x, UncertainFunction):
mcpts = np.fabs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.fabs(x) | Absolute value function | Below is the the instruction that describes the task:
### Input:
Absolute value function
### Response:
def fabs(x):
"""
Absolute value function
"""
if isinstance(x, UncertainFunction):
mcpts = np.fabs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.fabs(x) |
def h2o_median_absolute_error(y_actual, y_predicted):
"""
Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0)
"""
ModelBase._check_targets(y_actual, y_predicted)
return (y_predicted - y_actual).abs().median() | Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0) | Below is the the instruction that describes the task:
### Input:
Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0)
### Response:
def h2o_median_absolute_error(y_actual, y_predicted):
"""
Median absolute error regression loss
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:returns: median absolute error loss (best is 0.0)
"""
ModelBase._check_targets(y_actual, y_predicted)
return (y_predicted - y_actual).abs().median() |
def process_payment(self):
'''
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
'''
params = {
'__RequestVerificationToken': self.session.cookies,
'method': 'submit'
}
return self.__post('/PaymentOptions/Proceed', json=params) | Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
### Response:
def process_payment(self):
'''
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
'''
params = {
'__RequestVerificationToken': self.session.cookies,
'method': 'submit'
}
return self.__post('/PaymentOptions/Proceed', json=params) |
def draw(self):
'''
Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples.
'''
observed_arr = self.noise_sampler.generate()
_ = self.__encoder_decoder_controller.encoder.inference(observed_arr)
arr = self.__encoder_decoder_controller.encoder.get_feature_points()
return arr | Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples. | Below is the the instruction that describes the task:
### Input:
Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples.
### Response:
def draw(self):
'''
Draws samples from the `fake` distribution.
Returns:
`np.ndarray` of samples.
'''
observed_arr = self.noise_sampler.generate()
_ = self.__encoder_decoder_controller.encoder.inference(observed_arr)
arr = self.__encoder_decoder_controller.encoder.get_feature_points()
return arr |
def set_default_unit_all(self, twig=None, unit=None, **kwargs):
"""
TODO: add documentation
"""
if twig is not None and unit is None:
# then try to support value as the first argument if no matches with twigs
if isinstance(unit, u.Unit) or not isinstance(twig, str):
unit = twig
twig = None
elif not len(self.filter(twig=twig, check_default=check_default, **kwargs)):
unit = twig
twig = None
for param in self.filter(twig=twig, **kwargs).to_list():
param.set_default_unit(unit) | TODO: add documentation | Below is the the instruction that describes the task:
### Input:
TODO: add documentation
### Response:
def set_default_unit_all(self, twig=None, unit=None, **kwargs):
"""
TODO: add documentation
"""
if twig is not None and unit is None:
# then try to support value as the first argument if no matches with twigs
if isinstance(unit, u.Unit) or not isinstance(twig, str):
unit = twig
twig = None
elif not len(self.filter(twig=twig, check_default=check_default, **kwargs)):
unit = twig
twig = None
for param in self.filter(twig=twig, **kwargs).to_list():
param.set_default_unit(unit) |
def on_step_end(self, step, logs={}):
""" Called at end of each step for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_step_end` callback.
# If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_step_end', None)):
callback.on_step_end(step, logs=logs)
else:
callback.on_batch_end(step, logs=logs) | Called at end of each step for each callback in callbackList | Below is the the instruction that describes the task:
### Input:
Called at end of each step for each callback in callbackList
### Response:
def on_step_end(self, step, logs={}):
""" Called at end of each step for each callback in callbackList"""
for callback in self.callbacks:
# Check if callback supports the more appropriate `on_step_end` callback.
# If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks.
if callable(getattr(callback, 'on_step_end', None)):
callback.on_step_end(step, logs=logs)
else:
callback.on_batch_end(step, logs=logs) |
def run(self, calc_bleu=True, epoch=None, iteration=None, eval_path=None,
summary=False, reference_path=None):
"""
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
"""
if self.cuda:
test_bleu = torch.cuda.FloatTensor([0])
break_training = torch.cuda.LongTensor([0])
else:
test_bleu = torch.FloatTensor([0])
break_training = torch.LongTensor([0])
if eval_path is None:
eval_path = self.build_eval_path(epoch, iteration)
detok_eval_path = eval_path + '.detok'
with contextlib.suppress(FileNotFoundError):
os.remove(eval_path)
os.remove(detok_eval_path)
rank = get_rank()
logging.info(f'Running evaluation on test set')
self.model.eval()
torch.cuda.empty_cache()
output = self.evaluate(epoch, iteration, summary)
output = output[:len(self.loader.dataset)]
output = self.loader.dataset.unsort(output)
if rank == 0:
with open(eval_path, 'a') as eval_file:
eval_file.writelines(output)
if calc_bleu:
self.run_detokenizer(eval_path)
test_bleu[0] = self.run_sacrebleu(detok_eval_path, reference_path)
if summary:
logging.info(f'BLEU on test dataset: {test_bleu[0]:.2f}')
if self.target_bleu and test_bleu[0] >= self.target_bleu:
logging.info(f'Target accuracy reached')
break_training[0] = 1
barrier()
torch.cuda.empty_cache()
logging.info(f'Finished evaluation on test set')
if self.distributed:
dist.broadcast(break_training, 0)
dist.broadcast(test_bleu, 0)
return test_bleu[0].item(), break_training[0].item() | Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation | Below is the the instruction that describes the task:
### Input:
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
### Response:
def run(self, calc_bleu=True, epoch=None, iteration=None, eval_path=None,
summary=False, reference_path=None):
"""
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
"""
if self.cuda:
test_bleu = torch.cuda.FloatTensor([0])
break_training = torch.cuda.LongTensor([0])
else:
test_bleu = torch.FloatTensor([0])
break_training = torch.LongTensor([0])
if eval_path is None:
eval_path = self.build_eval_path(epoch, iteration)
detok_eval_path = eval_path + '.detok'
with contextlib.suppress(FileNotFoundError):
os.remove(eval_path)
os.remove(detok_eval_path)
rank = get_rank()
logging.info(f'Running evaluation on test set')
self.model.eval()
torch.cuda.empty_cache()
output = self.evaluate(epoch, iteration, summary)
output = output[:len(self.loader.dataset)]
output = self.loader.dataset.unsort(output)
if rank == 0:
with open(eval_path, 'a') as eval_file:
eval_file.writelines(output)
if calc_bleu:
self.run_detokenizer(eval_path)
test_bleu[0] = self.run_sacrebleu(detok_eval_path, reference_path)
if summary:
logging.info(f'BLEU on test dataset: {test_bleu[0]:.2f}')
if self.target_bleu and test_bleu[0] >= self.target_bleu:
logging.info(f'Target accuracy reached')
break_training[0] = 1
barrier()
torch.cuda.empty_cache()
logging.info(f'Finished evaluation on test set')
if self.distributed:
dist.broadcast(break_training, 0)
dist.broadcast(test_bleu, 0)
return test_bleu[0].item(), break_training[0].item() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.