code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
async def send_contact(self, chat_id: typing.Union[base.Integer, base.String],
phone_number: base.String, first_name: base.String,
last_name: typing.Union[base.String, None] = None,
vcard: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_to_message_id: typing.Union[base.Integer, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove,
types.ForceReply, None] = None) -> types.Message:
"""
Use this method to send phone contacts.
Source: https://core.telegram.org/bots/api#sendcontact
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param phone_number: Contact's phone number
:type phone_number: :obj:`base.String`
:param first_name: Contact's first name
:type first_name: :obj:`base.String`
:param last_name: Contact's last name
:type last_name: :obj:`typing.Union[base.String, None]`
:param vcard: vcard
:type vcard: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_CONTACT, payload)
return types.Message(**result) | Use this method to send phone contacts.
Source: https://core.telegram.org/bots/api#sendcontact
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param phone_number: Contact's phone number
:type phone_number: :obj:`base.String`
:param first_name: Contact's first name
:type first_name: :obj:`base.String`
:param last_name: Contact's last name
:type last_name: :obj:`typing.Union[base.String, None]`
:param vcard: vcard
:type vcard: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message` | Below is the the instruction that describes the task:
### Input:
Use this method to send phone contacts.
Source: https://core.telegram.org/bots/api#sendcontact
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param phone_number: Contact's phone number
:type phone_number: :obj:`base.String`
:param first_name: Contact's first name
:type first_name: :obj:`base.String`
:param last_name: Contact's last name
:type last_name: :obj:`typing.Union[base.String, None]`
:param vcard: vcard
:type vcard: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
### Response:
async def send_contact(self, chat_id: typing.Union[base.Integer, base.String],
phone_number: base.String, first_name: base.String,
last_name: typing.Union[base.String, None] = None,
vcard: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_to_message_id: typing.Union[base.Integer, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove,
types.ForceReply, None] = None) -> types.Message:
"""
Use this method to send phone contacts.
Source: https://core.telegram.org/bots/api#sendcontact
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param phone_number: Contact's phone number
:type phone_number: :obj:`base.String`
:param first_name: Contact's first name
:type first_name: :obj:`base.String`
:param last_name: Contact's last name
:type last_name: :obj:`typing.Union[base.String, None]`
:param vcard: vcard
:type vcard: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_CONTACT, payload)
return types.Message(**result) |
def gather_command_line_options(filter_disabled=None):
"""Get a sorted list of all CommandLineOption subclasses."""
if filter_disabled is None:
filter_disabled = not SETTINGS.COMMAND_LINE.SHOW_DISABLED_OPTIONS
options = [opt for opt in get_inheritors(CommandLineOption)
if not filter_disabled or opt._enabled]
return sorted(options, key=lambda opt: opt.__name__) | Get a sorted list of all CommandLineOption subclasses. | Below is the the instruction that describes the task:
### Input:
Get a sorted list of all CommandLineOption subclasses.
### Response:
def gather_command_line_options(filter_disabled=None):
"""Get a sorted list of all CommandLineOption subclasses."""
if filter_disabled is None:
filter_disabled = not SETTINGS.COMMAND_LINE.SHOW_DISABLED_OPTIONS
options = [opt for opt in get_inheritors(CommandLineOption)
if not filter_disabled or opt._enabled]
return sorted(options, key=lambda opt: opt.__name__) |
def mine_sub_trees(self, threshold):
"""
Generate subtrees and mine them for patterns.
"""
patterns = {}
mining_order = sorted(self.frequent.keys(),
key=lambda x: self.frequent[x])
# Get items in tree in reverse order of occurrences.
for item in mining_order:
suffixes = []
conditional_tree_input = []
node = self.headers[item]
# Follow node links to get a list of
# all occurrences of a certain item.
while node is not None:
suffixes.append(node)
node = node.link
# For each occurrence of the item,
# trace the path back to the root node.
for suffix in suffixes:
frequency = suffix.count
path = []
parent = suffix.parent
while parent.parent is not None:
path.append(parent.value)
parent = parent.parent
for i in range(frequency):
conditional_tree_input.append(path)
# Now we have the input for a subtree,
# so construct it and grab the patterns.
subtree = FPTree(conditional_tree_input, threshold,
item, self.frequent[item])
subtree_patterns = subtree.mine_patterns(threshold)
# Insert subtree patterns into main patterns dictionary.
for pattern in subtree_patterns.keys():
if pattern in patterns:
patterns[pattern] += subtree_patterns[pattern]
else:
patterns[pattern] = subtree_patterns[pattern]
return patterns | Generate subtrees and mine them for patterns. | Below is the the instruction that describes the task:
### Input:
Generate subtrees and mine them for patterns.
### Response:
def mine_sub_trees(self, threshold):
"""
Generate subtrees and mine them for patterns.
"""
patterns = {}
mining_order = sorted(self.frequent.keys(),
key=lambda x: self.frequent[x])
# Get items in tree in reverse order of occurrences.
for item in mining_order:
suffixes = []
conditional_tree_input = []
node = self.headers[item]
# Follow node links to get a list of
# all occurrences of a certain item.
while node is not None:
suffixes.append(node)
node = node.link
# For each occurrence of the item,
# trace the path back to the root node.
for suffix in suffixes:
frequency = suffix.count
path = []
parent = suffix.parent
while parent.parent is not None:
path.append(parent.value)
parent = parent.parent
for i in range(frequency):
conditional_tree_input.append(path)
# Now we have the input for a subtree,
# so construct it and grab the patterns.
subtree = FPTree(conditional_tree_input, threshold,
item, self.frequent[item])
subtree_patterns = subtree.mine_patterns(threshold)
# Insert subtree patterns into main patterns dictionary.
for pattern in subtree_patterns.keys():
if pattern in patterns:
patterns[pattern] += subtree_patterns[pattern]
else:
patterns[pattern] = subtree_patterns[pattern]
return patterns |
def _execute(self, request):
"""Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API.
"""
if self._rate_limiter:
# Since the ratelimiter library only exposes a context manager
# interface the code has to be duplicated to handle the case where
# no rate limiter is defined.
with self._rate_limiter:
return request.execute(http=self.http,
num_retries=self._num_retries)
return request.execute(http=self.http,
num_retries=self._num_retries) | Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API. | Below is the the instruction that describes the task:
### Input:
Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API.
### Response:
def _execute(self, request):
"""Run execute with retries and rate limiting.
Args:
request (object): The HttpRequest object to execute.
Returns:
dict: The response from the API.
"""
if self._rate_limiter:
# Since the ratelimiter library only exposes a context manager
# interface the code has to be duplicated to handle the case where
# no rate limiter is defined.
with self._rate_limiter:
return request.execute(http=self.http,
num_retries=self._num_retries)
return request.execute(http=self.http,
num_retries=self._num_retries) |
def compute_sector_exposures(positions, sectors, sector_dict=SECTORS):
"""
Returns arrays of long, short and gross sector exposures of an algorithm's
positions
Parameters
----------
positions : pd.DataFrame
Daily equity positions of algorithm, in dollars.
- See full explanation in compute_style_factor_exposures.
sectors : pd.DataFrame
Daily Morningstar sector code per asset
- See full explanation in create_risk_tear_sheet
sector_dict : dict or OrderedDict
Dictionary of all sectors
- Keys are sector codes (e.g. ints or strings) and values are sector
names (which must be strings)
- Defaults to Morningstar sectors
"""
sector_ids = sector_dict.keys()
long_exposures = []
short_exposures = []
gross_exposures = []
net_exposures = []
positions_wo_cash = positions.drop('cash', axis='columns')
long_exposure = positions_wo_cash[positions_wo_cash > 0] \
.sum(axis='columns')
short_exposure = positions_wo_cash[positions_wo_cash < 0] \
.abs().sum(axis='columns')
gross_exposure = positions_wo_cash.abs().sum(axis='columns')
for sector_id in sector_ids:
in_sector = positions_wo_cash[sectors == sector_id]
long_sector = in_sector[in_sector > 0] \
.sum(axis='columns').divide(long_exposure)
short_sector = in_sector[in_sector < 0] \
.sum(axis='columns').divide(short_exposure)
gross_sector = in_sector.abs().sum(axis='columns') \
.divide(gross_exposure)
net_sector = long_sector.subtract(short_sector)
long_exposures.append(long_sector)
short_exposures.append(short_sector)
gross_exposures.append(gross_sector)
net_exposures.append(net_sector)
return long_exposures, short_exposures, gross_exposures, net_exposures | Returns arrays of long, short and gross sector exposures of an algorithm's
positions
Parameters
----------
positions : pd.DataFrame
Daily equity positions of algorithm, in dollars.
- See full explanation in compute_style_factor_exposures.
sectors : pd.DataFrame
Daily Morningstar sector code per asset
- See full explanation in create_risk_tear_sheet
sector_dict : dict or OrderedDict
Dictionary of all sectors
- Keys are sector codes (e.g. ints or strings) and values are sector
names (which must be strings)
- Defaults to Morningstar sectors | Below is the the instruction that describes the task:
### Input:
Returns arrays of long, short and gross sector exposures of an algorithm's
positions
Parameters
----------
positions : pd.DataFrame
Daily equity positions of algorithm, in dollars.
- See full explanation in compute_style_factor_exposures.
sectors : pd.DataFrame
Daily Morningstar sector code per asset
- See full explanation in create_risk_tear_sheet
sector_dict : dict or OrderedDict
Dictionary of all sectors
- Keys are sector codes (e.g. ints or strings) and values are sector
names (which must be strings)
- Defaults to Morningstar sectors
### Response:
def compute_sector_exposures(positions, sectors, sector_dict=SECTORS):
"""
Returns arrays of long, short and gross sector exposures of an algorithm's
positions
Parameters
----------
positions : pd.DataFrame
Daily equity positions of algorithm, in dollars.
- See full explanation in compute_style_factor_exposures.
sectors : pd.DataFrame
Daily Morningstar sector code per asset
- See full explanation in create_risk_tear_sheet
sector_dict : dict or OrderedDict
Dictionary of all sectors
- Keys are sector codes (e.g. ints or strings) and values are sector
names (which must be strings)
- Defaults to Morningstar sectors
"""
sector_ids = sector_dict.keys()
long_exposures = []
short_exposures = []
gross_exposures = []
net_exposures = []
positions_wo_cash = positions.drop('cash', axis='columns')
long_exposure = positions_wo_cash[positions_wo_cash > 0] \
.sum(axis='columns')
short_exposure = positions_wo_cash[positions_wo_cash < 0] \
.abs().sum(axis='columns')
gross_exposure = positions_wo_cash.abs().sum(axis='columns')
for sector_id in sector_ids:
in_sector = positions_wo_cash[sectors == sector_id]
long_sector = in_sector[in_sector > 0] \
.sum(axis='columns').divide(long_exposure)
short_sector = in_sector[in_sector < 0] \
.sum(axis='columns').divide(short_exposure)
gross_sector = in_sector.abs().sum(axis='columns') \
.divide(gross_exposure)
net_sector = long_sector.subtract(short_sector)
long_exposures.append(long_sector)
short_exposures.append(short_sector)
gross_exposures.append(gross_sector)
net_exposures.append(net_sector)
return long_exposures, short_exposures, gross_exposures, net_exposures |
def set_default_init_cli_cmds(self):
"""
Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands.
"""
init_cli_cmds = []
init_cli_cmds.append("set --retcode true")
init_cli_cmds.append("echo off")
init_cli_cmds.append("set --vt100 off")
#set dut name as variable
init_cli_cmds.append('set dut "'+self.name+'"')
init_cli_cmds.append(['set testcase "' + self.testcase + '"', True])
return init_cli_cmds | Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands. | Below is the the instruction that describes the task:
### Input:
Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands.
### Response:
def set_default_init_cli_cmds(self):
"""
Default init commands are set --retcode true, echo off, set --vt100 off, set dut <dut name>
and set testcase <tc name>
:return: List of default cli initialization commands.
"""
init_cli_cmds = []
init_cli_cmds.append("set --retcode true")
init_cli_cmds.append("echo off")
init_cli_cmds.append("set --vt100 off")
#set dut name as variable
init_cli_cmds.append('set dut "'+self.name+'"')
init_cli_cmds.append(['set testcase "' + self.testcase + '"', True])
return init_cli_cmds |
def _suicide_when_without_parent(self, parent_pid):
'''
Kill this process when the parent died.
'''
while True:
time.sleep(5)
try:
# Check pid alive
os.kill(parent_pid, 0)
except OSError:
# Forcibly exit
# Regular sys.exit raises an exception
self.stop()
log.warning('The parent is not alive, exiting.')
os._exit(999) | Kill this process when the parent died. | Below is the the instruction that describes the task:
### Input:
Kill this process when the parent died.
### Response:
def _suicide_when_without_parent(self, parent_pid):
'''
Kill this process when the parent died.
'''
while True:
time.sleep(5)
try:
# Check pid alive
os.kill(parent_pid, 0)
except OSError:
# Forcibly exit
# Regular sys.exit raises an exception
self.stop()
log.warning('The parent is not alive, exiting.')
os._exit(999) |
async def handle_post_request(self, environ):
"""Handle a long-polling POST request from the client."""
length = int(environ.get('CONTENT_LENGTH', '0'))
if length > self.server.max_http_buffer_size:
raise exceptions.ContentTooLongError()
else:
body = await environ['wsgi.input'].read(length)
p = payload.Payload(encoded_payload=body)
for pkt in p.packets:
await self.receive(pkt) | Handle a long-polling POST request from the client. | Below is the the instruction that describes the task:
### Input:
Handle a long-polling POST request from the client.
### Response:
async def handle_post_request(self, environ):
"""Handle a long-polling POST request from the client."""
length = int(environ.get('CONTENT_LENGTH', '0'))
if length > self.server.max_http_buffer_size:
raise exceptions.ContentTooLongError()
else:
body = await environ['wsgi.input'].read(length)
p = payload.Payload(encoded_payload=body)
for pkt in p.packets:
await self.receive(pkt) |
def first_location_of_maximum(x):
"""
Returns the first location of the maximum value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.argmax(x) / len(x) if len(x) > 0 else np.NaN | Returns the first location of the maximum value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float | Below is the the instruction that describes the task:
### Input:
Returns the first location of the maximum value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
### Response:
def first_location_of_maximum(x):
"""
Returns the first location of the maximum value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.argmax(x) / len(x) if len(x) > 0 else np.NaN |
def seed_url(self):
"""A URL that can be used to open the page.
The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then
appended to :py:attr:`base_url` unless the template results in an
absolute URL.
:return: URL that can be used to open the page.
:rtype: str
"""
url = self.base_url
if self.URL_TEMPLATE is not None:
url = urlparse.urljoin(
self.base_url, self.URL_TEMPLATE.format(**self.url_kwargs)
)
if not url:
return None
url_parts = list(urlparse.urlparse(url))
query = urlparse.parse_qsl(url_parts[4])
for k, v in self.url_kwargs.items():
if v is None:
continue
if "{{{}}}".format(k) not in str(self.URL_TEMPLATE):
for i in iterable(v):
query.append((k, i))
url_parts[4] = urlencode(query)
return urlparse.urlunparse(url_parts) | A URL that can be used to open the page.
The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then
appended to :py:attr:`base_url` unless the template results in an
absolute URL.
:return: URL that can be used to open the page.
:rtype: str | Below is the the instruction that describes the task:
### Input:
A URL that can be used to open the page.
The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then
appended to :py:attr:`base_url` unless the template results in an
absolute URL.
:return: URL that can be used to open the page.
:rtype: str
### Response:
def seed_url(self):
"""A URL that can be used to open the page.
The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then
appended to :py:attr:`base_url` unless the template results in an
absolute URL.
:return: URL that can be used to open the page.
:rtype: str
"""
url = self.base_url
if self.URL_TEMPLATE is not None:
url = urlparse.urljoin(
self.base_url, self.URL_TEMPLATE.format(**self.url_kwargs)
)
if not url:
return None
url_parts = list(urlparse.urlparse(url))
query = urlparse.parse_qsl(url_parts[4])
for k, v in self.url_kwargs.items():
if v is None:
continue
if "{{{}}}".format(k) not in str(self.URL_TEMPLATE):
for i in iterable(v):
query.append((k, i))
url_parts[4] = urlencode(query)
return urlparse.urlunparse(url_parts) |
def _find_references(model_name, references=None):
"""
Iterate over model references for `model_name`
and return a list of parent model specifications (including those of
`model_name`, ordered from parent to child).
"""
references = references or []
references.append(model_name)
ref = MODELS[model_name].get('reference')
if ref is not None:
_find_references(ref, references)
parent_models = [m for m in references]
parent_models.reverse()
return parent_models | Iterate over model references for `model_name`
and return a list of parent model specifications (including those of
`model_name`, ordered from parent to child). | Below is the the instruction that describes the task:
### Input:
Iterate over model references for `model_name`
and return a list of parent model specifications (including those of
`model_name`, ordered from parent to child).
### Response:
def _find_references(model_name, references=None):
"""
Iterate over model references for `model_name`
and return a list of parent model specifications (including those of
`model_name`, ordered from parent to child).
"""
references = references or []
references.append(model_name)
ref = MODELS[model_name].get('reference')
if ref is not None:
_find_references(ref, references)
parent_models = [m for m in references]
parent_models.reverse()
return parent_models |
def update_reserved_vlan_range(self, id_or_uri, vlan_pool, force=False):
"""
Updates the reserved vlan ID range for the fabric.
Note:
This method is only available on HPE Synergy.
Args:
id_or_uri: ID or URI of fabric.
vlan_pool (dict): vlan-pool data to update.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
Returns:
dict: The fabric
"""
uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range"
return self._client.update(resource=vlan_pool, uri=uri, force=force, default_values=self.DEFAULT_VALUES) | Updates the reserved vlan ID range for the fabric.
Note:
This method is only available on HPE Synergy.
Args:
id_or_uri: ID or URI of fabric.
vlan_pool (dict): vlan-pool data to update.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
Returns:
dict: The fabric | Below is the the instruction that describes the task:
### Input:
Updates the reserved vlan ID range for the fabric.
Note:
This method is only available on HPE Synergy.
Args:
id_or_uri: ID or URI of fabric.
vlan_pool (dict): vlan-pool data to update.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
Returns:
dict: The fabric
### Response:
def update_reserved_vlan_range(self, id_or_uri, vlan_pool, force=False):
"""
Updates the reserved vlan ID range for the fabric.
Note:
This method is only available on HPE Synergy.
Args:
id_or_uri: ID or URI of fabric.
vlan_pool (dict): vlan-pool data to update.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
Returns:
dict: The fabric
"""
uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range"
return self._client.update(resource=vlan_pool, uri=uri, force=force, default_values=self.DEFAULT_VALUES) |
def __add_prop(self, key, admin=False):
"""Add gettable and settable room config property during runtime"""
def getter(self):
return self.config[key]
def setter(self, val):
if admin and not self.admin:
raise RuntimeError(
f"You can't set the {key} key without mod privileges"
)
self.__set_config_value(self.config.get_real_key(key), val)
setattr(self.__class__, key, property(getter, setter)) | Add gettable and settable room config property during runtime | Below is the the instruction that describes the task:
### Input:
Add gettable and settable room config property during runtime
### Response:
def __add_prop(self, key, admin=False):
"""Add gettable and settable room config property during runtime"""
def getter(self):
return self.config[key]
def setter(self, val):
if admin and not self.admin:
raise RuntimeError(
f"You can't set the {key} key without mod privileges"
)
self.__set_config_value(self.config.get_real_key(key), val)
setattr(self.__class__, key, property(getter, setter)) |
def add_object(self,
name,
mesh,
transform=None):
"""
Add an object to the collision manager.
If an object with the given name is already in the manager,
replace it.
Parameters
----------
name : str
An identifier for the object
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object
"""
# if no transform passed, assume identity transform
if transform is None:
transform = np.eye(4)
transform = np.asanyarray(transform, dtype=np.float32)
if transform.shape != (4, 4):
raise ValueError('transform must be (4,4)!')
# create or recall from cache BVH
bvh = self._get_BVH(mesh)
# create the FCL transform from (4,4) matrix
t = fcl.Transform(transform[:3, :3], transform[:3, 3])
o = fcl.CollisionObject(bvh, t)
# Add collision object to set
if name in self._objs:
self._manager.unregisterObject(self._objs[name])
self._objs[name] = {'obj': o,
'geom': bvh}
# store the name of the geometry
self._names[id(bvh)] = name
self._manager.registerObject(o)
self._manager.update()
return o | Add an object to the collision manager.
If an object with the given name is already in the manager,
replace it.
Parameters
----------
name : str
An identifier for the object
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object | Below is the the instruction that describes the task:
### Input:
Add an object to the collision manager.
If an object with the given name is already in the manager,
replace it.
Parameters
----------
name : str
An identifier for the object
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object
### Response:
def add_object(self,
name,
mesh,
transform=None):
"""
Add an object to the collision manager.
If an object with the given name is already in the manager,
replace it.
Parameters
----------
name : str
An identifier for the object
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object
"""
# if no transform passed, assume identity transform
if transform is None:
transform = np.eye(4)
transform = np.asanyarray(transform, dtype=np.float32)
if transform.shape != (4, 4):
raise ValueError('transform must be (4,4)!')
# create or recall from cache BVH
bvh = self._get_BVH(mesh)
# create the FCL transform from (4,4) matrix
t = fcl.Transform(transform[:3, :3], transform[:3, 3])
o = fcl.CollisionObject(bvh, t)
# Add collision object to set
if name in self._objs:
self._manager.unregisterObject(self._objs[name])
self._objs[name] = {'obj': o,
'geom': bvh}
# store the name of the geometry
self._names[id(bvh)] = name
self._manager.registerObject(o)
self._manager.update()
return o |
def anchor(self, value):
"""
Setter for **self.__anchor** attribute.
:param value: Attribute value.
:type value: int
"""
if value is not None:
assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("anchor", value)
assert value in range(
0, 9), "'{0}' attribute: '{1}' need to be in '0' to '8' range!".format("anchor", value)
self.__anchor = value | Setter for **self.__anchor** attribute.
:param value: Attribute value.
:type value: int | Below is the the instruction that describes the task:
### Input:
Setter for **self.__anchor** attribute.
:param value: Attribute value.
:type value: int
### Response:
def anchor(self, value):
"""
Setter for **self.__anchor** attribute.
:param value: Attribute value.
:type value: int
"""
if value is not None:
assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("anchor", value)
assert value in range(
0, 9), "'{0}' attribute: '{1}' need to be in '0' to '8' range!".format("anchor", value)
self.__anchor = value |
def bcbio_variation_comparison(config_file, base_dir, data):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
resources = config_utils.get_resources("bcbio_variation", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
cmd = ["bcbio-variation"] + jvm_opts + broad.get_default_jvm_opts(tmp_dir) + \
["variant-compare", config_file]
do.run(cmd, "Comparing variant calls using bcbio.variation", data) | Run a variant comparison using the bcbio.variation toolkit, given an input configuration. | Below is the the instruction that describes the task:
### Input:
Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
### Response:
def bcbio_variation_comparison(config_file, base_dir, data):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
resources = config_utils.get_resources("bcbio_variation", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
cmd = ["bcbio-variation"] + jvm_opts + broad.get_default_jvm_opts(tmp_dir) + \
["variant-compare", config_file]
do.run(cmd, "Comparing variant calls using bcbio.variation", data) |
def gap_sizes(self):
"""Return gap sizes per chromosome.
Returns
-------
gap_sizes : dict
a dictionary with chromosomes as key and the total number of
Ns as values
"""
if not self._gap_sizes:
gap_file = self.props["gaps"]["gaps"]
self._gap_sizes = {}
with open(gap_file) as f:
for line in f:
chrom, start, end = line.strip().split("\t")
start, end = int(start), int(end)
self._gap_sizes[chrom] = self._gap_sizes.get(chrom, 0) + end - start
return self._gap_sizes | Return gap sizes per chromosome.
Returns
-------
gap_sizes : dict
a dictionary with chromosomes as key and the total number of
Ns as values | Below is the the instruction that describes the task:
### Input:
Return gap sizes per chromosome.
Returns
-------
gap_sizes : dict
a dictionary with chromosomes as key and the total number of
Ns as values
### Response:
def gap_sizes(self):
"""Return gap sizes per chromosome.
Returns
-------
gap_sizes : dict
a dictionary with chromosomes as key and the total number of
Ns as values
"""
if not self._gap_sizes:
gap_file = self.props["gaps"]["gaps"]
self._gap_sizes = {}
with open(gap_file) as f:
for line in f:
chrom, start, end = line.strip().split("\t")
start, end = int(start), int(end)
self._gap_sizes[chrom] = self._gap_sizes.get(chrom, 0) + end - start
return self._gap_sizes |
def gapfill(
model, core, blocked, exclude, solver, epsilon=0.001, v_max=1000,
weights={}, implicit_sinks=True, allow_bounds_expansion=False):
"""Find a set of reactions to add such that no compounds are blocked.
Returns two iterators: first an iterator of reactions not in
core, that were added to resolve the model. Second, an
iterator of reactions in core that had flux bounds expanded (i.e.
irreversible reactions become reversible). Similarly to
GapFind, this method assumes, by default, implicit sinks for all compounds
in the model so the only factor that influences whether a compound
can be produced is the presence of the compounds needed to produce
it. This means that the resulting model will not necessarily be
flux consistent.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
core: The set of core (already present) reactions in the model.
blocked: The compounds to unblock.
exclude: Set of reactions in core to be excluded from gap-filling (e.g.
biomass reaction).
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
weights: Dictionary of weights for reactions. Weight is the penalty
score for adding the reaction (non-core reactions) or expanding the
flux bounds (all reactions).
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
allow_bounds_expansion: Allow flux bounds to be expanded at the cost
of a penalty which can be specified using weights (traditional
GapFill does not allow this). This includes turning irreversible
reactions reversible.
"""
prob = solver.create_problem()
# Set integrality tolerance such that w constraints are correct
min_tol = prob.integrality_tolerance.min
int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)
if int_tol < prob.integrality_tolerance.value:
prob.integrality_tolerance.value = int_tol
# Define flux variables
v = prob.namespace(model.reactions, lower=-v_max, upper=v_max)
# Add binary indicator variables
database_reactions = set(model.reactions).difference(core, exclude)
ym = prob.namespace(model.reactions, types=lp.VariableType.Binary)
yd = prob.namespace(database_reactions, types=lp.VariableType.Binary)
objective = ym.expr(
(rxnid, weights.get(rxnid, 1)) for rxnid in model.reactions)
objective += yd.expr(
(rxnid, weights.get(rxnid, 1)) for rxnid in database_reactions)
prob.set_objective(objective)
# Add constraints on all reactions
for reaction_id in model.reactions:
lower, upper = (float(x) for x in model.limits[reaction_id])
if reaction_id in exclude or not allow_bounds_expansion:
prob.add_linear_constraints(
upper >= v(reaction_id), v(reaction_id) >= lower)
else:
# Allow flux bounds to expand up to v_max with penalty
delta_lower = min(0, -v_max - lower)
delta_upper = max(0, v_max - upper)
prob.add_linear_constraints(
v(reaction_id) >= lower + ym(reaction_id) * delta_lower,
v(reaction_id) <= upper + ym(reaction_id) * delta_upper)
# Add constraints on database reactions
for reaction_id in database_reactions:
lower, upper = model.limits[reaction_id]
prob.add_linear_constraints(
v(reaction_id) >= yd(reaction_id) * -v_max,
v(reaction_id) <= yd(reaction_id) * v_max)
# Define constraints on production of blocked metabolites in reaction
w = prob.namespace(types=lp.VariableType.Binary)
binary_cons_lhs = {compound: 0 for compound in blocked}
for (compound, reaction_id), value in iteritems(model.matrix):
if reaction_id not in exclude and compound in blocked and value != 0:
w.define([(compound, reaction_id)])
w_var = w((compound, reaction_id))
dv = v(reaction_id) if value > 0 else -v(reaction_id)
prob.add_linear_constraints(
dv <= v_max * w_var,
dv >= epsilon + (-v_max - epsilon) * (1 - w_var))
binary_cons_lhs[compound] += w_var
for compound, lhs in iteritems(binary_cons_lhs):
prob.add_linear_constraints(lhs >= 1)
# Define mass balance constraints
massbalance_lhs = {compound: 0 for compound in model.compounds}
for (compound, reaction_id), value in iteritems(model.matrix):
if reaction_id not in exclude:
massbalance_lhs[compound] += v(reaction_id) * value
for compound, lhs in iteritems(massbalance_lhs):
if implicit_sinks:
# The constraint is merely >0 meaning that we have implicit sinks
# for all compounds.
prob.add_linear_constraints(lhs >= 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
prob.solve(lp.ObjectiveSense.Minimize)
except lp.SolverError as e:
raise_from(GapFillError('Failed to solve gapfill: {}'.format(e)), e)
def added_iter():
for reaction_id in database_reactions:
if yd.value(reaction_id) > 0.5:
yield reaction_id
def no_bounds_iter():
for reaction_id in model.reactions:
if ym.value(reaction_id) > 0.5:
yield reaction_id
return added_iter(), no_bounds_iter() | Find a set of reactions to add such that no compounds are blocked.
Returns two iterators: first an iterator of reactions not in
core, that were added to resolve the model. Second, an
iterator of reactions in core that had flux bounds expanded (i.e.
irreversible reactions become reversible). Similarly to
GapFind, this method assumes, by default, implicit sinks for all compounds
in the model so the only factor that influences whether a compound
can be produced is the presence of the compounds needed to produce
it. This means that the resulting model will not necessarily be
flux consistent.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
core: The set of core (already present) reactions in the model.
blocked: The compounds to unblock.
exclude: Set of reactions in core to be excluded from gap-filling (e.g.
biomass reaction).
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
weights: Dictionary of weights for reactions. Weight is the penalty
score for adding the reaction (non-core reactions) or expanding the
flux bounds (all reactions).
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
allow_bounds_expansion: Allow flux bounds to be expanded at the cost
of a penalty which can be specified using weights (traditional
GapFill does not allow this). This includes turning irreversible
reactions reversible. | Below is the the instruction that describes the task:
### Input:
Find a set of reactions to add such that no compounds are blocked.
Returns two iterators: first an iterator of reactions not in
core, that were added to resolve the model. Second, an
iterator of reactions in core that had flux bounds expanded (i.e.
irreversible reactions become reversible). Similarly to
GapFind, this method assumes, by default, implicit sinks for all compounds
in the model so the only factor that influences whether a compound
can be produced is the presence of the compounds needed to produce
it. This means that the resulting model will not necessarily be
flux consistent.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
core: The set of core (already present) reactions in the model.
blocked: The compounds to unblock.
exclude: Set of reactions in core to be excluded from gap-filling (e.g.
biomass reaction).
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
weights: Dictionary of weights for reactions. Weight is the penalty
score for adding the reaction (non-core reactions) or expanding the
flux bounds (all reactions).
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
allow_bounds_expansion: Allow flux bounds to be expanded at the cost
of a penalty which can be specified using weights (traditional
GapFill does not allow this). This includes turning irreversible
reactions reversible.
### Response:
def gapfill(
model, core, blocked, exclude, solver, epsilon=0.001, v_max=1000,
weights={}, implicit_sinks=True, allow_bounds_expansion=False):
"""Find a set of reactions to add such that no compounds are blocked.
Returns two iterators: first an iterator of reactions not in
core, that were added to resolve the model. Second, an
iterator of reactions in core that had flux bounds expanded (i.e.
irreversible reactions become reversible). Similarly to
GapFind, this method assumes, by default, implicit sinks for all compounds
in the model so the only factor that influences whether a compound
can be produced is the presence of the compounds needed to produce
it. This means that the resulting model will not necessarily be
flux consistent.
This method is implemented as a MILP-program. Therefore it may
not be efficient for larger models.
Args:
model: :class:`MetabolicModel` containing core reactions and reactions
that can be added for gap-filling.
core: The set of core (already present) reactions in the model.
blocked: The compounds to unblock.
exclude: Set of reactions in core to be excluded from gap-filling (e.g.
biomass reaction).
solver: MILP solver instance.
epsilon: Threshold amount of a compound produced for it to not be
considered blocked.
v_max: Maximum flux.
weights: Dictionary of weights for reactions. Weight is the penalty
score for adding the reaction (non-core reactions) or expanding the
flux bounds (all reactions).
implicit_sinks: Whether implicit sinks for all compounds are included
when gap-filling (traditional GapFill uses implicit sinks).
allow_bounds_expansion: Allow flux bounds to be expanded at the cost
of a penalty which can be specified using weights (traditional
GapFill does not allow this). This includes turning irreversible
reactions reversible.
"""
prob = solver.create_problem()
# Set integrality tolerance such that w constraints are correct
min_tol = prob.integrality_tolerance.min
int_tol = _find_integer_tolerance(epsilon, v_max, min_tol)
if int_tol < prob.integrality_tolerance.value:
prob.integrality_tolerance.value = int_tol
# Define flux variables
v = prob.namespace(model.reactions, lower=-v_max, upper=v_max)
# Add binary indicator variables
database_reactions = set(model.reactions).difference(core, exclude)
ym = prob.namespace(model.reactions, types=lp.VariableType.Binary)
yd = prob.namespace(database_reactions, types=lp.VariableType.Binary)
objective = ym.expr(
(rxnid, weights.get(rxnid, 1)) for rxnid in model.reactions)
objective += yd.expr(
(rxnid, weights.get(rxnid, 1)) for rxnid in database_reactions)
prob.set_objective(objective)
# Add constraints on all reactions
for reaction_id in model.reactions:
lower, upper = (float(x) for x in model.limits[reaction_id])
if reaction_id in exclude or not allow_bounds_expansion:
prob.add_linear_constraints(
upper >= v(reaction_id), v(reaction_id) >= lower)
else:
# Allow flux bounds to expand up to v_max with penalty
delta_lower = min(0, -v_max - lower)
delta_upper = max(0, v_max - upper)
prob.add_linear_constraints(
v(reaction_id) >= lower + ym(reaction_id) * delta_lower,
v(reaction_id) <= upper + ym(reaction_id) * delta_upper)
# Add constraints on database reactions
for reaction_id in database_reactions:
lower, upper = model.limits[reaction_id]
prob.add_linear_constraints(
v(reaction_id) >= yd(reaction_id) * -v_max,
v(reaction_id) <= yd(reaction_id) * v_max)
# Define constraints on production of blocked metabolites in reaction
w = prob.namespace(types=lp.VariableType.Binary)
binary_cons_lhs = {compound: 0 for compound in blocked}
for (compound, reaction_id), value in iteritems(model.matrix):
if reaction_id not in exclude and compound in blocked and value != 0:
w.define([(compound, reaction_id)])
w_var = w((compound, reaction_id))
dv = v(reaction_id) if value > 0 else -v(reaction_id)
prob.add_linear_constraints(
dv <= v_max * w_var,
dv >= epsilon + (-v_max - epsilon) * (1 - w_var))
binary_cons_lhs[compound] += w_var
for compound, lhs in iteritems(binary_cons_lhs):
prob.add_linear_constraints(lhs >= 1)
# Define mass balance constraints
massbalance_lhs = {compound: 0 for compound in model.compounds}
for (compound, reaction_id), value in iteritems(model.matrix):
if reaction_id not in exclude:
massbalance_lhs[compound] += v(reaction_id) * value
for compound, lhs in iteritems(massbalance_lhs):
if implicit_sinks:
# The constraint is merely >0 meaning that we have implicit sinks
# for all compounds.
prob.add_linear_constraints(lhs >= 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
prob.solve(lp.ObjectiveSense.Minimize)
except lp.SolverError as e:
raise_from(GapFillError('Failed to solve gapfill: {}'.format(e)), e)
def added_iter():
for reaction_id in database_reactions:
if yd.value(reaction_id) > 0.5:
yield reaction_id
def no_bounds_iter():
for reaction_id in model.reactions:
if ym.value(reaction_id) > 0.5:
yield reaction_id
return added_iter(), no_bounds_iter() |
def _match_cubes(ccube_clean, ccube_dirty,
bexpcube_clean, bexpcube_dirty,
hpx_order):
""" Match the HEALPIX scheme and order of all the input cubes
return a dictionary of cubes with the same HEALPIX scheme and order
"""
if hpx_order == ccube_clean.hpx.order:
ccube_clean_at_order = ccube_clean
else:
ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == ccube_dirty.hpx.order:
ccube_dirty_at_order = ccube_dirty
else:
ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == bexpcube_clean.hpx.order:
bexpcube_clean_at_order = bexpcube_clean
else:
bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == bexpcube_dirty.hpx.order:
bexpcube_dirty_at_order = bexpcube_dirty
else:
bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True)
if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:
ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme()
if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest:
bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme()
if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:
bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme()
ret_dict = dict(ccube_clean=ccube_clean_at_order,
ccube_dirty=ccube_dirty_at_order,
bexpcube_clean=bexpcube_clean_at_order,
bexpcube_dirty=bexpcube_dirty_at_order)
return ret_dict | Match the HEALPIX scheme and order of all the input cubes
return a dictionary of cubes with the same HEALPIX scheme and order | Below is the the instruction that describes the task:
### Input:
Match the HEALPIX scheme and order of all the input cubes
return a dictionary of cubes with the same HEALPIX scheme and order
### Response:
def _match_cubes(ccube_clean, ccube_dirty,
bexpcube_clean, bexpcube_dirty,
hpx_order):
""" Match the HEALPIX scheme and order of all the input cubes
return a dictionary of cubes with the same HEALPIX scheme and order
"""
if hpx_order == ccube_clean.hpx.order:
ccube_clean_at_order = ccube_clean
else:
ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == ccube_dirty.hpx.order:
ccube_dirty_at_order = ccube_dirty
else:
ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == bexpcube_clean.hpx.order:
bexpcube_clean_at_order = bexpcube_clean
else:
bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == bexpcube_dirty.hpx.order:
bexpcube_dirty_at_order = bexpcube_dirty
else:
bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True)
if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:
ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme()
if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest:
bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme()
if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:
bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme()
ret_dict = dict(ccube_clean=ccube_clean_at_order,
ccube_dirty=ccube_dirty_at_order,
bexpcube_clean=bexpcube_clean_at_order,
bexpcube_dirty=bexpcube_dirty_at_order)
return ret_dict |
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get("PATH_INFO", "").lstrip("/")
if path == "metadata":
return metadata(environ, start_response)
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s", path)
if kaka:
logger.info("= KAKA =")
user, authn_ref = info_from_cookie(kaka)
if authn_ref:
environ["idp.authn"] = AUTHN_BROKER[authn_ref]
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s", query)
user = IDP.cache.uid2user[query["id"][0]]
except KeyError:
user = None
url_patterns = AUTHN_URLS
if not user:
logger.info("-- No USER --")
# insert NON_AUTHN_URLS first in case there is no user
url_patterns = NON_AUTHN_URLS + url_patterns
for regex, callback in url_patterns:
match = re.search(regex, path)
if match is not None:
try:
environ["myapp.url_args"] = match.groups()[0]
except IndexError:
environ["myapp.url_args"] = path
logger.debug("Callback: %s", callback)
if isinstance(callback, tuple):
cls = callback[0](environ, start_response, user)
func = getattr(cls, callback[1])
return func()
return callback(environ, start_response, user)
if re.search(r"static/.*", path) is not None:
return staticfile(environ, start_response)
return not_found(environ, start_response) | The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines | Below is the the instruction that describes the task:
### Input:
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
### Response:
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get("PATH_INFO", "").lstrip("/")
if path == "metadata":
return metadata(environ, start_response)
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s", path)
if kaka:
logger.info("= KAKA =")
user, authn_ref = info_from_cookie(kaka)
if authn_ref:
environ["idp.authn"] = AUTHN_BROKER[authn_ref]
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s", query)
user = IDP.cache.uid2user[query["id"][0]]
except KeyError:
user = None
url_patterns = AUTHN_URLS
if not user:
logger.info("-- No USER --")
# insert NON_AUTHN_URLS first in case there is no user
url_patterns = NON_AUTHN_URLS + url_patterns
for regex, callback in url_patterns:
match = re.search(regex, path)
if match is not None:
try:
environ["myapp.url_args"] = match.groups()[0]
except IndexError:
environ["myapp.url_args"] = path
logger.debug("Callback: %s", callback)
if isinstance(callback, tuple):
cls = callback[0](environ, start_response, user)
func = getattr(cls, callback[1])
return func()
return callback(environ, start_response, user)
if re.search(r"static/.*", path) is not None:
return staticfile(environ, start_response)
return not_found(environ, start_response) |
def choose_parsimonious_states(tree, ps_feature, out_feature):
"""
Converts the content of the get_personalized_feature_name(feature, PARS_STATES) node feature to the predicted states
and stores them in the `feature` feature to each node.
The get_personalized_feature_name(feature, PARS_STATES) is deleted.
:param feature: str, character for which the parsimonious states are reconstructed
:param tree: ete3.Tree, the tree of interest
:return: int, number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also adds parsimonious states as the `feature` feature to each node
"""
num_scenarios = 1
unresolved_nodes = 0
num_states = 0
for node in tree.traverse():
states = getattr(node, ps_feature)
node.add_feature(out_feature, states)
n = len(states)
num_scenarios *= n
unresolved_nodes += 1 if n > 1 else 0
num_states += n
return num_scenarios, unresolved_nodes, num_states | Converts the content of the get_personalized_feature_name(feature, PARS_STATES) node feature to the predicted states
and stores them in the `feature` feature to each node.
The get_personalized_feature_name(feature, PARS_STATES) is deleted.
:param feature: str, character for which the parsimonious states are reconstructed
:param tree: ete3.Tree, the tree of interest
:return: int, number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also adds parsimonious states as the `feature` feature to each node | Below is the the instruction that describes the task:
### Input:
Converts the content of the get_personalized_feature_name(feature, PARS_STATES) node feature to the predicted states
and stores them in the `feature` feature to each node.
The get_personalized_feature_name(feature, PARS_STATES) is deleted.
:param feature: str, character for which the parsimonious states are reconstructed
:param tree: ete3.Tree, the tree of interest
:return: int, number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also adds parsimonious states as the `feature` feature to each node
### Response:
def choose_parsimonious_states(tree, ps_feature, out_feature):
"""
Converts the content of the get_personalized_feature_name(feature, PARS_STATES) node feature to the predicted states
and stores them in the `feature` feature to each node.
The get_personalized_feature_name(feature, PARS_STATES) is deleted.
:param feature: str, character for which the parsimonious states are reconstructed
:param tree: ete3.Tree, the tree of interest
:return: int, number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also adds parsimonious states as the `feature` feature to each node
"""
num_scenarios = 1
unresolved_nodes = 0
num_states = 0
for node in tree.traverse():
states = getattr(node, ps_feature)
node.add_feature(out_feature, states)
n = len(states)
num_scenarios *= n
unresolved_nodes += 1 if n > 1 else 0
num_states += n
return num_scenarios, unresolved_nodes, num_states |
def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir) | Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow | Below is the the instruction that describes the task:
### Input:
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
### Response:
def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir) |
def _update_belief(self, belief_prop, clique, clique_potential, message=None):
"""
Method for updating the belief.
Parameters:
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique.
"""
old_factor = belief_prop.junction_tree.get_factors(clique)
belief_prop.junction_tree.remove_factors(old_factor)
if message:
if message.scope() and clique_potential.scope():
new_factor = old_factor * message
new_factor = new_factor / clique_potential
else:
new_factor = old_factor
else:
new_factor = old_factor * clique_potential
belief_prop.junction_tree.add_factors(new_factor)
belief_prop.calibrate() | Method for updating the belief.
Parameters:
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique. | Below is the the instruction that describes the task:
### Input:
Method for updating the belief.
Parameters:
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique.
### Response:
def _update_belief(self, belief_prop, clique, clique_potential, message=None):
"""
Method for updating the belief.
Parameters:
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique.
"""
old_factor = belief_prop.junction_tree.get_factors(clique)
belief_prop.junction_tree.remove_factors(old_factor)
if message:
if message.scope() and clique_potential.scope():
new_factor = old_factor * message
new_factor = new_factor / clique_potential
else:
new_factor = old_factor
else:
new_factor = old_factor * clique_potential
belief_prop.junction_tree.add_factors(new_factor)
belief_prop.calibrate() |
def tag_audio_file(audio_file, tracklisting):
"""
Adds tracklisting as list to lyrics tag of audio file if not present.
Returns True if successful or not needed, False if tagging fails.
"""
try:
save_tag_to_audio_file(audio_file, tracklisting)
# TODO: is IOError required now or would the mediafile exception cover it?
except (IOError, mediafile.UnreadableFileError):
print("Unable to save tag to file:", audio_file)
audio_tagging_successful = False
except TagNotNeededError:
audio_tagging_successful = True
else:
audio_tagging_successful = True
return audio_tagging_successful | Adds tracklisting as list to lyrics tag of audio file if not present.
Returns True if successful or not needed, False if tagging fails. | Below is the the instruction that describes the task:
### Input:
Adds tracklisting as list to lyrics tag of audio file if not present.
Returns True if successful or not needed, False if tagging fails.
### Response:
def tag_audio_file(audio_file, tracklisting):
"""
Adds tracklisting as list to lyrics tag of audio file if not present.
Returns True if successful or not needed, False if tagging fails.
"""
try:
save_tag_to_audio_file(audio_file, tracklisting)
# TODO: is IOError required now or would the mediafile exception cover it?
except (IOError, mediafile.UnreadableFileError):
print("Unable to save tag to file:", audio_file)
audio_tagging_successful = False
except TagNotNeededError:
audio_tagging_successful = True
else:
audio_tagging_successful = True
return audio_tagging_successful |
def isochone_ratio(e, rd, r_hyp):
"""
Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio
"""
if e == 0.:
c_prime = 0.8
elif e > 0.:
c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e))
return c_prime | Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio | Below is the the instruction that describes the task:
### Input:
Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio
### Response:
def isochone_ratio(e, rd, r_hyp):
"""
Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio
"""
if e == 0.:
c_prime = 0.8
elif e > 0.:
c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e))
return c_prime |
def sentiment(self):
"""
Returns average sentiment of document. Must have sentiment enabled in XML output.
:getter: returns average sentiment of the document
:type: float
"""
if self._sentiment is None:
results = self._xml.xpath('/root/document/sentences')
self._sentiment = float(results[0].get("averageSentiment", 0)) if len(results) > 0 else None
return self._sentiment | Returns average sentiment of document. Must have sentiment enabled in XML output.
:getter: returns average sentiment of the document
:type: float | Below is the the instruction that describes the task:
### Input:
Returns average sentiment of document. Must have sentiment enabled in XML output.
:getter: returns average sentiment of the document
:type: float
### Response:
def sentiment(self):
"""
Returns average sentiment of document. Must have sentiment enabled in XML output.
:getter: returns average sentiment of the document
:type: float
"""
if self._sentiment is None:
results = self._xml.xpath('/root/document/sentences')
self._sentiment = float(results[0].get("averageSentiment", 0)) if len(results) > 0 else None
return self._sentiment |
def _build(self, inputs, is_training):
"""Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data. When
this is set to False, the internal moving average statistics will not be
updated.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to.
"""
# Ensure that the weights are read fresh for each timestep, which otherwise
# would not be guaranteed in an RNN setup. Note that this relies on inputs
# having a data dependency with the output of the previous timestep - if
# this is not the case, there is no way to serialize the order of weight
# updates within the module, so explicit external dependencies must be used.
with tf.control_dependencies([inputs]):
w = self._w.read_value()
input_shape = tf.shape(inputs)
with tf.control_dependencies([
tf.Assert(tf.equal(input_shape[-1], self._embedding_dim),
[input_shape])]):
flat_inputs = tf.reshape(inputs, [-1, self._embedding_dim])
distances = (tf.reduce_sum(flat_inputs**2, 1, keepdims=True)
- 2 * tf.matmul(flat_inputs, w)
+ tf.reduce_sum(w ** 2, 0, keepdims=True))
encoding_indices = tf.argmax(- distances, 1)
encodings = tf.one_hot(encoding_indices, self._num_embeddings)
encoding_indices = tf.reshape(encoding_indices, tf.shape(inputs)[:-1])
quantized = self.quantize(encoding_indices)
e_latent_loss = tf.reduce_mean((tf.stop_gradient(quantized) - inputs) ** 2)
if is_training:
updated_ema_cluster_size = moving_averages.assign_moving_average(
self._ema_cluster_size, tf.reduce_sum(encodings, 0), self._decay)
dw = tf.matmul(flat_inputs, encodings, transpose_a=True)
updated_ema_w = moving_averages.assign_moving_average(self._ema_w, dw,
self._decay)
n = tf.reduce_sum(updated_ema_cluster_size)
updated_ema_cluster_size = (
(updated_ema_cluster_size + self._epsilon)
/ (n + self._num_embeddings * self._epsilon) * n)
normalised_updated_ema_w = (
updated_ema_w / tf.reshape(updated_ema_cluster_size, [1, -1]))
with tf.control_dependencies([e_latent_loss]):
update_w = tf.assign(self._w, normalised_updated_ema_w)
with tf.control_dependencies([update_w]):
loss = self._commitment_cost * e_latent_loss
else:
loss = self._commitment_cost * e_latent_loss
quantized = inputs + tf.stop_gradient(quantized - inputs)
avg_probs = tf.reduce_mean(encodings, 0)
perplexity = tf.exp(- tf.reduce_sum(avg_probs * tf.log(avg_probs + 1e-10)))
return {'quantize': quantized,
'loss': loss,
'perplexity': perplexity,
'encodings': encodings,
'encoding_indices': encoding_indices,} | Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data. When
this is set to False, the internal moving average statistics will not be
updated.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to. | Below is the the instruction that describes the task:
### Input:
Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data. When
this is set to False, the internal moving average statistics will not be
updated.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to.
### Response:
def _build(self, inputs, is_training):
"""Connects the module to some inputs.
Args:
inputs: Tensor, final dimension must be equal to embedding_dim. All other
leading dimensions will be flattened and treated as a large batch.
is_training: boolean, whether this connection is to training data. When
this is set to False, the internal moving average statistics will not be
updated.
Returns:
dict containing the following keys and values:
quantize: Tensor containing the quantized version of the input.
loss: Tensor containing the loss to optimize.
perplexity: Tensor containing the perplexity of the encodings.
encodings: Tensor containing the discrete encodings, ie which element
of the quantized space each input element was mapped to.
encoding_indices: Tensor containing the discrete encoding indices, ie
which element of the quantized space each input element was mapped to.
"""
# Ensure that the weights are read fresh for each timestep, which otherwise
# would not be guaranteed in an RNN setup. Note that this relies on inputs
# having a data dependency with the output of the previous timestep - if
# this is not the case, there is no way to serialize the order of weight
# updates within the module, so explicit external dependencies must be used.
with tf.control_dependencies([inputs]):
w = self._w.read_value()
input_shape = tf.shape(inputs)
with tf.control_dependencies([
tf.Assert(tf.equal(input_shape[-1], self._embedding_dim),
[input_shape])]):
flat_inputs = tf.reshape(inputs, [-1, self._embedding_dim])
distances = (tf.reduce_sum(flat_inputs**2, 1, keepdims=True)
- 2 * tf.matmul(flat_inputs, w)
+ tf.reduce_sum(w ** 2, 0, keepdims=True))
encoding_indices = tf.argmax(- distances, 1)
encodings = tf.one_hot(encoding_indices, self._num_embeddings)
encoding_indices = tf.reshape(encoding_indices, tf.shape(inputs)[:-1])
quantized = self.quantize(encoding_indices)
e_latent_loss = tf.reduce_mean((tf.stop_gradient(quantized) - inputs) ** 2)
if is_training:
updated_ema_cluster_size = moving_averages.assign_moving_average(
self._ema_cluster_size, tf.reduce_sum(encodings, 0), self._decay)
dw = tf.matmul(flat_inputs, encodings, transpose_a=True)
updated_ema_w = moving_averages.assign_moving_average(self._ema_w, dw,
self._decay)
n = tf.reduce_sum(updated_ema_cluster_size)
updated_ema_cluster_size = (
(updated_ema_cluster_size + self._epsilon)
/ (n + self._num_embeddings * self._epsilon) * n)
normalised_updated_ema_w = (
updated_ema_w / tf.reshape(updated_ema_cluster_size, [1, -1]))
with tf.control_dependencies([e_latent_loss]):
update_w = tf.assign(self._w, normalised_updated_ema_w)
with tf.control_dependencies([update_w]):
loss = self._commitment_cost * e_latent_loss
else:
loss = self._commitment_cost * e_latent_loss
quantized = inputs + tf.stop_gradient(quantized - inputs)
avg_probs = tf.reduce_mean(encodings, 0)
perplexity = tf.exp(- tf.reduce_sum(avg_probs * tf.log(avg_probs + 1e-10)))
return {'quantize': quantized,
'loss': loss,
'perplexity': perplexity,
'encodings': encodings,
'encoding_indices': encoding_indices,} |
def filter_unbound_ports(query):
"""Filter ports not bound to a host or network"""
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
query = (query
.join_if_necessary(port_model)
.join_if_necessary(binding_level_model)
.filter(
binding_level_model.host != '',
port_model.device_id != none,
port_model.network_id != none))
return query | Filter ports not bound to a host or network | Below is the the instruction that describes the task:
### Input:
Filter ports not bound to a host or network
### Response:
def filter_unbound_ports(query):
"""Filter ports not bound to a host or network"""
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
query = (query
.join_if_necessary(port_model)
.join_if_necessary(binding_level_model)
.filter(
binding_level_model.host != '',
port_model.device_id != none,
port_model.network_id != none))
return query |
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=stride, padding='SAME',
activation=None)
return x | Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1. | Below is the the instruction that describes the task:
### Input:
Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
### Response:
def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'):
"""Downsamples 'x' by `stride` using a 1x1 convolution filter.
Args:
x: input tensor of size [N, H, W, C]
output_channels: Desired number of output channels.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: What stride to use. Usually 1 or 2.
scope: Optional variable scope.
Returns:
A downsampled tensor of size [N, H/2, W/2, output_channels] if stride
is 2, else returns a tensor of size [N, H, W, output_channels] if
stride is 1.
"""
conv = CONFIG[dim]['conv']
with tf.variable_scope(scope):
x = conv(x, output_channels, 1, strides=stride, padding='SAME',
activation=None)
return x |
def __make_security_group_api_request(server_context, api, user_ids, group_id, container_path):
"""
Execute a request against the LabKey Security Controller Group Membership apis
:param server_context: A LabKey server context. See utils.create_server_context.
:param api: Action to execute
:param user_ids: user ids to apply action to
:param group_id: group id to apply action to
:param container_path: Additional container context path
:return: Request json object
"""
url = server_context.build_url(security_controller, api, container_path)
# if user_ids is only a single scalar make it an array
if not hasattr(user_ids, "__iter__"):
user_ids = [user_ids]
return server_context.make_request(url, {
'groupId': group_id,
'principalIds': user_ids
}) | Execute a request against the LabKey Security Controller Group Membership apis
:param server_context: A LabKey server context. See utils.create_server_context.
:param api: Action to execute
:param user_ids: user ids to apply action to
:param group_id: group id to apply action to
:param container_path: Additional container context path
:return: Request json object | Below is the the instruction that describes the task:
### Input:
Execute a request against the LabKey Security Controller Group Membership apis
:param server_context: A LabKey server context. See utils.create_server_context.
:param api: Action to execute
:param user_ids: user ids to apply action to
:param group_id: group id to apply action to
:param container_path: Additional container context path
:return: Request json object
### Response:
def __make_security_group_api_request(server_context, api, user_ids, group_id, container_path):
"""
Execute a request against the LabKey Security Controller Group Membership apis
:param server_context: A LabKey server context. See utils.create_server_context.
:param api: Action to execute
:param user_ids: user ids to apply action to
:param group_id: group id to apply action to
:param container_path: Additional container context path
:return: Request json object
"""
url = server_context.build_url(security_controller, api, container_path)
# if user_ids is only a single scalar make it an array
if not hasattr(user_ids, "__iter__"):
user_ids = [user_ids]
return server_context.make_request(url, {
'groupId': group_id,
'principalIds': user_ids
}) |
def p_sens_all_paren(self, p):
'senslist : AT LPAREN TIMES RPAREN'
p[0] = SensList(
(Sens(None, 'all', lineno=p.lineno(1)),), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | senslist : AT LPAREN TIMES RPAREN | Below is the the instruction that describes the task:
### Input:
senslist : AT LPAREN TIMES RPAREN
### Response:
def p_sens_all_paren(self, p):
'senslist : AT LPAREN TIMES RPAREN'
p[0] = SensList(
(Sens(None, 'all', lineno=p.lineno(1)),), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH]) | Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params | Below is the the instruction that describes the task:
### Input:
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
### Response:
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH]) |
def CharacterData(self, data):
'''
Expat character data event handler
'''
if data.strip():
data = data.encode()
if not self.data:
self.data = data
else:
self.data += data | Expat character data event handler | Below is the the instruction that describes the task:
### Input:
Expat character data event handler
### Response:
def CharacterData(self, data):
'''
Expat character data event handler
'''
if data.strip():
data = data.encode()
if not self.data:
self.data = data
else:
self.data += data |
def _handle_exception(self, exc):
"""Common exception handling behavior across all exceptions.
.. note:: This for internal use and should not be extended or used
directly.
"""
exc_info = sys.exc_info()
self.logger.exception(
'%s while processing message #%s',
exc.__class__.__name__, self._message.delivery_tag,
exc_info=exc_info)
self._measurement.set_tag('exception', exc.__class__.__name__)
if hasattr(exc, 'metric') and exc.metric:
self._measurement.set_tag('error', exc.metric)
self._process.send_exception_to_sentry(exc_info)
self._maybe_clear_confirmation_futures()
if not self._finished:
self.on_finish(exc)
self._finished = True | Common exception handling behavior across all exceptions.
.. note:: This for internal use and should not be extended or used
directly. | Below is the the instruction that describes the task:
### Input:
Common exception handling behavior across all exceptions.
.. note:: This for internal use and should not be extended or used
directly.
### Response:
def _handle_exception(self, exc):
"""Common exception handling behavior across all exceptions.
.. note:: This for internal use and should not be extended or used
directly.
"""
exc_info = sys.exc_info()
self.logger.exception(
'%s while processing message #%s',
exc.__class__.__name__, self._message.delivery_tag,
exc_info=exc_info)
self._measurement.set_tag('exception', exc.__class__.__name__)
if hasattr(exc, 'metric') and exc.metric:
self._measurement.set_tag('error', exc.metric)
self._process.send_exception_to_sentry(exc_info)
self._maybe_clear_confirmation_futures()
if not self._finished:
self.on_finish(exc)
self._finished = True |
def _updateVariantAnnotationSets(self, variantFile, dataUrl):
"""
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
"""
# TODO check the consistency of this between VCF files.
if not self.isAnnotated():
annotationType = None
for record in variantFile.header.records:
if record.type == "GENERIC":
if record.key == "SnpEffVersion":
annotationType = ANNOTATIONS_SNPEFF
elif record.key == "VEP":
version = record.value.split()[0]
# TODO we need _much_ more sophisticated processing
# of VEP versions here. When do they become
# incompatible?
if version == "v82":
annotationType = ANNOTATIONS_VEP_V82
elif version == "v77":
annotationType = ANNOTATIONS_VEP_V77
else:
# TODO raise a proper typed exception there with
# the file name as an argument.
raise ValueError(
"Unsupported VEP version {} in '{}'".format(
version, dataUrl))
if annotationType is None:
infoKeys = variantFile.header.info.keys()
if 'CSQ' in infoKeys or 'ANN' in infoKeys:
# TODO likewise, we want a properly typed exception that
# we can throw back to the repo manager UI and display
# as an import error.
raise ValueError(
"Unsupported annotations in '{}'".format(dataUrl))
if annotationType is not None:
vas = HtslibVariantAnnotationSet(self, self.getLocalId())
vas.populateFromFile(variantFile, annotationType)
self.addVariantAnnotationSet(vas) | Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile. | Below is the the instruction that describes the task:
### Input:
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
### Response:
def _updateVariantAnnotationSets(self, variantFile, dataUrl):
"""
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
"""
# TODO check the consistency of this between VCF files.
if not self.isAnnotated():
annotationType = None
for record in variantFile.header.records:
if record.type == "GENERIC":
if record.key == "SnpEffVersion":
annotationType = ANNOTATIONS_SNPEFF
elif record.key == "VEP":
version = record.value.split()[0]
# TODO we need _much_ more sophisticated processing
# of VEP versions here. When do they become
# incompatible?
if version == "v82":
annotationType = ANNOTATIONS_VEP_V82
elif version == "v77":
annotationType = ANNOTATIONS_VEP_V77
else:
# TODO raise a proper typed exception there with
# the file name as an argument.
raise ValueError(
"Unsupported VEP version {} in '{}'".format(
version, dataUrl))
if annotationType is None:
infoKeys = variantFile.header.info.keys()
if 'CSQ' in infoKeys or 'ANN' in infoKeys:
# TODO likewise, we want a properly typed exception that
# we can throw back to the repo manager UI and display
# as an import error.
raise ValueError(
"Unsupported annotations in '{}'".format(dataUrl))
if annotationType is not None:
vas = HtslibVariantAnnotationSet(self, self.getLocalId())
vas.populateFromFile(variantFile, annotationType)
self.addVariantAnnotationSet(vas) |
def get_server_url(self):
"""Return the configured server url
:returns: server url
"""
server_host = self.driver_wrapper.config.get('Server', 'host')
server_port = self.driver_wrapper.config.get('Server', 'port')
server_username = self.driver_wrapper.config.get_optional('Server', 'username')
server_password = self.driver_wrapper.config.get_optional('Server', 'password')
server_auth = '{}:{}@'.format(server_username, server_password) if server_username and server_password else ''
server_url = 'http://{}{}:{}'.format(server_auth, server_host, server_port)
return server_url | Return the configured server url
:returns: server url | Below is the the instruction that describes the task:
### Input:
Return the configured server url
:returns: server url
### Response:
def get_server_url(self):
"""Return the configured server url
:returns: server url
"""
server_host = self.driver_wrapper.config.get('Server', 'host')
server_port = self.driver_wrapper.config.get('Server', 'port')
server_username = self.driver_wrapper.config.get_optional('Server', 'username')
server_password = self.driver_wrapper.config.get_optional('Server', 'password')
server_auth = '{}:{}@'.format(server_username, server_password) if server_username and server_password else ''
server_url = 'http://{}{}:{}'.format(server_auth, server_host, server_port)
return server_url |
def combine_with(self, additional_constraints):
"""Combines two sets of constraints into a coherent single set."""
x = additional_constraints
if not isinstance(additional_constraints, AffineWarpConstraints):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for left, right in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for x, y in zip(left, right)])
return AffineWarpConstraints(new_constraints) | Combines two sets of constraints into a coherent single set. | Below is the the instruction that describes the task:
### Input:
Combines two sets of constraints into a coherent single set.
### Response:
def combine_with(self, additional_constraints):
"""Combines two sets of constraints into a coherent single set."""
x = additional_constraints
if not isinstance(additional_constraints, AffineWarpConstraints):
x = AffineWarpConstraints(additional_constraints)
new_constraints = []
for left, right in zip(self._constraints, x.constraints):
new_constraints.append([self._combine(x, y) for x, y in zip(left, right)])
return AffineWarpConstraints(new_constraints) |
def backbone(self):
"""Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O).
"""
try:
backbone = OrderedDict([('N', self.atoms['N']),
('CA', self.atoms['CA']),
('C', self.atoms['C']),
('O', self.atoms['O'])])
except KeyError:
missing_atoms = filter(lambda x: x not in self.atoms.keys(),
('N', 'CA', 'C', 'O')
)
raise KeyError('Error in residue {} {} {}, missing ({}) atoms. '
'`atoms` must be an `OrderedDict` with coordinates '
'defined for the backbone (N, CA, C, O) atoms.'
.format(self.ampal_parent.id, self.mol_code,
self.id, ', '.join(missing_atoms)))
bb_monomer = Residue(backbone, self.mol_code, monomer_id=self.id,
insertion_code=self.insertion_code,
is_hetero=self.is_hetero)
return bb_monomer | Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O). | Below is the the instruction that describes the task:
### Input:
Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O).
### Response:
def backbone(self):
"""Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O).
"""
try:
backbone = OrderedDict([('N', self.atoms['N']),
('CA', self.atoms['CA']),
('C', self.atoms['C']),
('O', self.atoms['O'])])
except KeyError:
missing_atoms = filter(lambda x: x not in self.atoms.keys(),
('N', 'CA', 'C', 'O')
)
raise KeyError('Error in residue {} {} {}, missing ({}) atoms. '
'`atoms` must be an `OrderedDict` with coordinates '
'defined for the backbone (N, CA, C, O) atoms.'
.format(self.ampal_parent.id, self.mol_code,
self.id, ', '.join(missing_atoms)))
bb_monomer = Residue(backbone, self.mol_code, monomer_id=self.id,
insertion_code=self.insertion_code,
is_hetero=self.is_hetero)
return bb_monomer |
def inner(self, x1, x2):
"""Calculate the constant-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=x1[0].space.dtype, count=len(x1))
inner = self.const * np.sum(inners)
return x1.space.field.element(inner) | Calculate the constant-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements. | Below is the the instruction that describes the task:
### Input:
Calculate the constant-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
### Response:
def inner(self, x1, x2):
"""Calculate the constant-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=x1[0].space.dtype, count=len(x1))
inner = self.const * np.sum(inners)
return x1.space.field.element(inner) |
def get_decision_tree(self, agent_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION):
"""Get decision tree.
:param str agent_id: the id of the agent to get the tree. It
must be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:param int timestamp: Optional. The decision tree is comptuted
at this timestamp.
:default timestamp: None, means that we get the tree computed
with all its context history.
:param version: version of the tree to get.
:type version: str or int.
:default version: default version of the tree.
:return: decision tree.
:rtype: dict.
:raises CraftAiLongRequestTimeOutError: if the API doesn't get
the tree in the time given by the configuration.
"""
# Raises an error when agent_id is invalid
self._check_agent_id(agent_id)
if self._config["decisionTreeRetrievalTimeout"] is False:
# Don't retry
return self._get_decision_tree(agent_id, timestamp, version)
start = current_time_ms()
while True:
now = current_time_ms()
if now - start > self._config["decisionTreeRetrievalTimeout"]:
# Client side timeout
raise CraftAiLongRequestTimeOutError()
try:
return self._get_decision_tree(agent_id, timestamp, version)
except CraftAiLongRequestTimeOutError:
# Do nothing and continue.
continue | Get decision tree.
:param str agent_id: the id of the agent to get the tree. It
must be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:param int timestamp: Optional. The decision tree is comptuted
at this timestamp.
:default timestamp: None, means that we get the tree computed
with all its context history.
:param version: version of the tree to get.
:type version: str or int.
:default version: default version of the tree.
:return: decision tree.
:rtype: dict.
:raises CraftAiLongRequestTimeOutError: if the API doesn't get
the tree in the time given by the configuration. | Below is the the instruction that describes the task:
### Input:
Get decision tree.
:param str agent_id: the id of the agent to get the tree. It
must be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:param int timestamp: Optional. The decision tree is comptuted
at this timestamp.
:default timestamp: None, means that we get the tree computed
with all its context history.
:param version: version of the tree to get.
:type version: str or int.
:default version: default version of the tree.
:return: decision tree.
:rtype: dict.
:raises CraftAiLongRequestTimeOutError: if the API doesn't get
the tree in the time given by the configuration.
### Response:
def get_decision_tree(self, agent_id, timestamp=None, version=DEFAULT_DECISION_TREE_VERSION):
"""Get decision tree.
:param str agent_id: the id of the agent to get the tree. It
must be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:param int timestamp: Optional. The decision tree is comptuted
at this timestamp.
:default timestamp: None, means that we get the tree computed
with all its context history.
:param version: version of the tree to get.
:type version: str or int.
:default version: default version of the tree.
:return: decision tree.
:rtype: dict.
:raises CraftAiLongRequestTimeOutError: if the API doesn't get
the tree in the time given by the configuration.
"""
# Raises an error when agent_id is invalid
self._check_agent_id(agent_id)
if self._config["decisionTreeRetrievalTimeout"] is False:
# Don't retry
return self._get_decision_tree(agent_id, timestamp, version)
start = current_time_ms()
while True:
now = current_time_ms()
if now - start > self._config["decisionTreeRetrievalTimeout"]:
# Client side timeout
raise CraftAiLongRequestTimeOutError()
try:
return self._get_decision_tree(agent_id, timestamp, version)
except CraftAiLongRequestTimeOutError:
# Do nothing and continue.
continue |
def documentation(self):
"""Return the documentation, from the documentation.md file, with template substitutions"""
# Return the documentation as a scalar term, which has .text() and .html methods to do
# metadata substitution using Jinja
s = ''
rc = self.build_source_files.documentation.record_content
if rc:
s += rc
for k, v in self.metadata.documentation.items():
if v:
s += '\n### {}\n{}'.format(k.title(), v)
return self.metadata.scalar_term(s) | Return the documentation, from the documentation.md file, with template substitutions | Below is the the instruction that describes the task:
### Input:
Return the documentation, from the documentation.md file, with template substitutions
### Response:
def documentation(self):
"""Return the documentation, from the documentation.md file, with template substitutions"""
# Return the documentation as a scalar term, which has .text() and .html methods to do
# metadata substitution using Jinja
s = ''
rc = self.build_source_files.documentation.record_content
if rc:
s += rc
for k, v in self.metadata.documentation.items():
if v:
s += '\n### {}\n{}'.format(k.title(), v)
return self.metadata.scalar_term(s) |
def decr(self, conn, key, decrement=1):
"""Command is used to change data for some item in-place,
decrementing it. The data for the item is treated as decimal
representation of a 64-bit unsigned integer.
:param key: ``bytes``, is the key of the item the client wishes
to change
:param decrement: ``int``, is the amount by which the client
wants to decrease the item.
:return: ``int`` new value of the item's data,
after the increment or ``None`` to indicate the item with
this value was not found
"""
assert self._validate_key(key)
resp = yield from self._incr_decr(
conn, b'decr', key, decrement)
return resp | Command is used to change data for some item in-place,
decrementing it. The data for the item is treated as decimal
representation of a 64-bit unsigned integer.
:param key: ``bytes``, is the key of the item the client wishes
to change
:param decrement: ``int``, is the amount by which the client
wants to decrease the item.
:return: ``int`` new value of the item's data,
after the increment or ``None`` to indicate the item with
this value was not found | Below is the the instruction that describes the task:
### Input:
Command is used to change data for some item in-place,
decrementing it. The data for the item is treated as decimal
representation of a 64-bit unsigned integer.
:param key: ``bytes``, is the key of the item the client wishes
to change
:param decrement: ``int``, is the amount by which the client
wants to decrease the item.
:return: ``int`` new value of the item's data,
after the increment or ``None`` to indicate the item with
this value was not found
### Response:
def decr(self, conn, key, decrement=1):
"""Command is used to change data for some item in-place,
decrementing it. The data for the item is treated as decimal
representation of a 64-bit unsigned integer.
:param key: ``bytes``, is the key of the item the client wishes
to change
:param decrement: ``int``, is the amount by which the client
wants to decrease the item.
:return: ``int`` new value of the item's data,
after the increment or ``None`` to indicate the item with
this value was not found
"""
assert self._validate_key(key)
resp = yield from self._incr_decr(
conn, b'decr', key, decrement)
return resp |
def to_dict(self, nested=False):
"""Return dict object with model's data.
:param nested: flag to return nested relationships' data if true
:type: bool
:return: dict
"""
result = dict()
for key in self.columns:
result[key] = getattr(self, key)
if nested:
for key in self.relations:
obj = getattr(self, key)
if isinstance(obj, SerializeMixin):
result[key] = obj.to_dict()
elif isinstance(obj, Iterable):
result[key] = [o.to_dict() for o in obj]
return result | Return dict object with model's data.
:param nested: flag to return nested relationships' data if true
:type: bool
:return: dict | Below is the the instruction that describes the task:
### Input:
Return dict object with model's data.
:param nested: flag to return nested relationships' data if true
:type: bool
:return: dict
### Response:
def to_dict(self, nested=False):
"""Return dict object with model's data.
:param nested: flag to return nested relationships' data if true
:type: bool
:return: dict
"""
result = dict()
for key in self.columns:
result[key] = getattr(self, key)
if nested:
for key in self.relations:
obj = getattr(self, key)
if isinstance(obj, SerializeMixin):
result[key] = obj.to_dict()
elif isinstance(obj, Iterable):
result[key] = [o.to_dict() for o in obj]
return result |
def _calculate_gain(self, cost_base, y_true, X, cost_mat, split):
""" Private function to calculate the gain in cost of using split in the
current node.
Parameters
----------
cost_base : float
Cost of the naive prediction
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
split : tuple of len = 2
split[0] = feature to split = j
split[1] = where to split = l
Returns
-------
tuple(gain : float, left node prediction : int)
"""
# Check if cost_base == 0, then no gain is possible
#TODO: This must be check in _best_split
if cost_base == 0.0:
return 0.0, int(np.sign(y_true.mean() - 0.5) == 1) # In case cost_b==0 and pi_1!=(0,1)
j, l = split
filter_Xl = (X[:, j] <= l)
filter_Xr = ~filter_Xl
n_samples, n_features = X.shape
# Check if one of the leafs is empty
#TODO: This must be check in _best_split
if np.nonzero(filter_Xl)[0].shape[0] in [0, n_samples]: # One leaft is empty
return 0.0, 0.0
# Split X in Xl and Xr according to rule split
Xl_cost, Xl_pred, _ = self._node_cost(y_true[filter_Xl], cost_mat[filter_Xl, :])
Xr_cost, _, _ = self._node_cost(y_true[filter_Xr], cost_mat[filter_Xr, :])
if self.criterion_weight:
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
Xl_w = n_samples_Xl * 1.0 / n_samples
Xr_w = 1 - Xl_w
gain = round((cost_base - (Xl_w * Xl_cost + Xr_w * Xr_cost)) / cost_base, 6)
else:
gain = round((cost_base - (Xl_cost + Xr_cost)) / cost_base, 6)
return gain, Xl_pred | Private function to calculate the gain in cost of using split in the
current node.
Parameters
----------
cost_base : float
Cost of the naive prediction
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
split : tuple of len = 2
split[0] = feature to split = j
split[1] = where to split = l
Returns
-------
tuple(gain : float, left node prediction : int) | Below is the the instruction that describes the task:
### Input:
Private function to calculate the gain in cost of using split in the
current node.
Parameters
----------
cost_base : float
Cost of the naive prediction
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
split : tuple of len = 2
split[0] = feature to split = j
split[1] = where to split = l
Returns
-------
tuple(gain : float, left node prediction : int)
### Response:
def _calculate_gain(self, cost_base, y_true, X, cost_mat, split):
""" Private function to calculate the gain in cost of using split in the
current node.
Parameters
----------
cost_base : float
Cost of the naive prediction
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
split : tuple of len = 2
split[0] = feature to split = j
split[1] = where to split = l
Returns
-------
tuple(gain : float, left node prediction : int)
"""
# Check if cost_base == 0, then no gain is possible
#TODO: This must be check in _best_split
if cost_base == 0.0:
return 0.0, int(np.sign(y_true.mean() - 0.5) == 1) # In case cost_b==0 and pi_1!=(0,1)
j, l = split
filter_Xl = (X[:, j] <= l)
filter_Xr = ~filter_Xl
n_samples, n_features = X.shape
# Check if one of the leafs is empty
#TODO: This must be check in _best_split
if np.nonzero(filter_Xl)[0].shape[0] in [0, n_samples]: # One leaft is empty
return 0.0, 0.0
# Split X in Xl and Xr according to rule split
Xl_cost, Xl_pred, _ = self._node_cost(y_true[filter_Xl], cost_mat[filter_Xl, :])
Xr_cost, _, _ = self._node_cost(y_true[filter_Xr], cost_mat[filter_Xr, :])
if self.criterion_weight:
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
Xl_w = n_samples_Xl * 1.0 / n_samples
Xr_w = 1 - Xl_w
gain = round((cost_base - (Xl_w * Xl_cost + Xr_w * Xr_cost)) / cost_base, 6)
else:
gain = round((cost_base - (Xl_cost + Xr_cost)) / cost_base, 6)
return gain, Xl_pred |
def host(self):
# pylint: disable=too-many-branches
"""Get a passive checks for an host and its services
This function builds the external commands corresponding to the host and services
provided information
:param host_name: host name
:param data: dictionary of the host properties to be modified
:return: command line
"""
logger.debug("Host status...")
if cherrypy.request.method not in ["PATCH", "POST"]:
cherrypy.response.status = 405
return {'_status': 'ERR',
'_error': 'You must only PATCH or POST on this endpoint.'}
# Update an host
# ---
if not cherrypy.request.json:
return {'_status': 'ERR',
'_error': 'You must send parameters on this endpoint.'}
host_name = None
if cherrypy.request.json.get('name', None) is not None:
host_name = cherrypy.request.json.get('name', None)
if not host_name:
return {'_status': 'ERR',
'_error': 'Missing targeted host name.'}
# Get provided data
# ---
logger.debug("Posted data: %s", cherrypy.request.json)
# Check if the host exist in Alignak
# ---
# todo: Not mandatory but it would be clean...
# Prepare response
# ---
ws_result = {'_status': 'OK',
'_result': ['%s is alive :)' % host_name],
'_issues': []}
# Manage the host livestate
# ---
# Alert on unordered livestate if several information exist
now = int(time.time())
livestate = cherrypy.request.json.get('livestate', None)
if not livestate:
# Create an host live state command
livestate = {'state': "UP"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the host '%s' livestate, "
"assuming host is UP!" % host_name)
ls['state'] = 'UP'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the host '%s'. "
"The Alignak scheduler may not handle the check result!",
host_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['UP', 'DOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Host state should be UP, DOWN or UNREACHABLE"
", and not '%s'." % (state))
else:
# Create an host live state command
command = self._build_host_livestate(host_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
services = cherrypy.request.json.get('services', None)
if not services:
return ws_result
for service in services:
service_name = service.get('name', None)
if service_name is None:
ws_result['_issues'].append("A service does not have a 'name' property")
continue
livestate = service.get('livestate', None)
if not livestate:
# Create a service live state command
livestate = {'state': "OK"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the service %s/%s livestate, "
"assuming service is OK!"
% (host_name, service_name))
ls['state'] = 'OK'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the service: %s/%s. "
"The Alignak scheduler may not handle the check result!",
host_name, service_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['OK', 'WARNING', 'CRITICAL', 'UNKNOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Service %s/%s state must be OK, WARNING, "
"CRITICAL, UNKNOWN or UNREACHABLE, and not %s."
% (host_name, service_name, state))
else:
# Create a service live state command
command = self._build_service_livestate(host_name, service_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
return ws_result | Get a passive checks for an host and its services
This function builds the external commands corresponding to the host and services
provided information
:param host_name: host name
:param data: dictionary of the host properties to be modified
:return: command line | Below is the the instruction that describes the task:
### Input:
Get a passive checks for an host and its services
This function builds the external commands corresponding to the host and services
provided information
:param host_name: host name
:param data: dictionary of the host properties to be modified
:return: command line
### Response:
def host(self):
# pylint: disable=too-many-branches
"""Get a passive checks for an host and its services
This function builds the external commands corresponding to the host and services
provided information
:param host_name: host name
:param data: dictionary of the host properties to be modified
:return: command line
"""
logger.debug("Host status...")
if cherrypy.request.method not in ["PATCH", "POST"]:
cherrypy.response.status = 405
return {'_status': 'ERR',
'_error': 'You must only PATCH or POST on this endpoint.'}
# Update an host
# ---
if not cherrypy.request.json:
return {'_status': 'ERR',
'_error': 'You must send parameters on this endpoint.'}
host_name = None
if cherrypy.request.json.get('name', None) is not None:
host_name = cherrypy.request.json.get('name', None)
if not host_name:
return {'_status': 'ERR',
'_error': 'Missing targeted host name.'}
# Get provided data
# ---
logger.debug("Posted data: %s", cherrypy.request.json)
# Check if the host exist in Alignak
# ---
# todo: Not mandatory but it would be clean...
# Prepare response
# ---
ws_result = {'_status': 'OK',
'_result': ['%s is alive :)' % host_name],
'_issues': []}
# Manage the host livestate
# ---
# Alert on unordered livestate if several information exist
now = int(time.time())
livestate = cherrypy.request.json.get('livestate', None)
if not livestate:
# Create an host live state command
livestate = {'state': "UP"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the host '%s' livestate, "
"assuming host is UP!" % host_name)
ls['state'] = 'UP'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the host '%s'. "
"The Alignak scheduler may not handle the check result!",
host_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['UP', 'DOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Host state should be UP, DOWN or UNREACHABLE"
", and not '%s'." % (state))
else:
# Create an host live state command
command = self._build_host_livestate(host_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
services = cherrypy.request.json.get('services', None)
if not services:
return ws_result
for service in services:
service_name = service.get('name', None)
if service_name is None:
ws_result['_issues'].append("A service does not have a 'name' property")
continue
livestate = service.get('livestate', None)
if not livestate:
# Create a service live state command
livestate = {'state': "OK"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the service %s/%s livestate, "
"assuming service is OK!"
% (host_name, service_name))
ls['state'] = 'OK'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the service: %s/%s. "
"The Alignak scheduler may not handle the check result!",
host_name, service_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['OK', 'WARNING', 'CRITICAL', 'UNKNOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Service %s/%s state must be OK, WARNING, "
"CRITICAL, UNKNOWN or UNREACHABLE, and not %s."
% (host_name, service_name, state))
else:
# Create a service live state command
command = self._build_service_livestate(host_name, service_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
return ws_result |
def sample_slice(args):
"""
Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
"""
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
# Periodicity.
nonperiodic = kwargs.get('nonperiodic', None)
# Setup.
n = len(u)
slices = kwargs.get('slices', 5) # number of slices
nc = 0
nexpand = 0
ncontract = 0
fscale = []
# Modifying axes and computing lengths.
axes = scale * axes.T # scale based on past tuning
axlens = [linalg.norm(axis) for axis in axes]
# Slice sampling loop.
for it in range(slices):
# Shuffle axis update order.
idxs = np.arange(n)
rstate.shuffle(idxs)
# Slice sample along a random direction.
for idx in idxs:
# Select axis.
axis = axes[idx]
axlen = axlens[idx]
# Define starting "window".
r = rstate.rand() # initial scale/offset
u_l = u - r * axis # left bound
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
u_r = u + (1 - r) * axis # right bound
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
# "Stepping out" the left and right bounds.
while logl_l >= loglstar:
u_l -= axis
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
while logl_r >= loglstar:
u_r += axis
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
# Sample within limits. If the sample is not valid, shrink
# the limits until we hit the `loglstar` bound.
while True:
u_hat = u_r - u_l
u_prop = u_l + rstate.rand() * u_hat # scale from left
if unitcheck(u_prop, nonperiodic):
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
else:
logl_prop = -np.inf
nc += 1
ncontract += 1
# If we succeed, move to the new position.
if logl_prop >= loglstar:
window = linalg.norm(u_hat) # length of window
fscale.append(window / axlen)
u = u_prop
break
# If we fail, check if the new point is to the left/right of
# our original point along our proposal axis and update
# the bounds accordingly.
else:
s = np.dot(u_prop - u, u_hat) # check sign (+/-)
if s < 0: # left
u_l = u_prop
elif s > 0: # right
u_r = u_prop
else:
raise RuntimeError("Slice sampler has failed to find "
"a valid point. Some useful "
"output quantities:\n"
"u: {0}\n"
"u_left: {1}\n"
"u_right: {2}\n"
"u_hat: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"axlens: {8}\n"
"s: {9}."
.format(u, u_l, u_r, u_hat, u_prop,
loglstar, logl_prop,
axes, axlens, s))
blob = {'fscale': np.mean(fscale),
'nexpand': nexpand, 'ncontract': ncontract}
return u_prop, v_prop, logl_prop, nc, blob | Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`. | Below is the the instruction that describes the task:
### Input:
Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
### Response:
def sample_slice(args):
"""
Return a new live point proposed by a series of random slices
away from an existing live point. Standard "Gibs-like" implementation where
a single multivariate "slice" is a combination of `ndim` univariate slices
through each axis.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For slices new positions are
proposed along the arthogonal basis defined by :data:`axes`.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
"""
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
# Periodicity.
nonperiodic = kwargs.get('nonperiodic', None)
# Setup.
n = len(u)
slices = kwargs.get('slices', 5) # number of slices
nc = 0
nexpand = 0
ncontract = 0
fscale = []
# Modifying axes and computing lengths.
axes = scale * axes.T # scale based on past tuning
axlens = [linalg.norm(axis) for axis in axes]
# Slice sampling loop.
for it in range(slices):
# Shuffle axis update order.
idxs = np.arange(n)
rstate.shuffle(idxs)
# Slice sample along a random direction.
for idx in idxs:
# Select axis.
axis = axes[idx]
axlen = axlens[idx]
# Define starting "window".
r = rstate.rand() # initial scale/offset
u_l = u - r * axis # left bound
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
u_r = u + (1 - r) * axis # right bound
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
# "Stepping out" the left and right bounds.
while logl_l >= loglstar:
u_l -= axis
if unitcheck(u_l, nonperiodic):
v_l = prior_transform(np.array(u_l))
logl_l = loglikelihood(np.array(v_l))
else:
logl_l = -np.inf
nc += 1
nexpand += 1
while logl_r >= loglstar:
u_r += axis
if unitcheck(u_r, nonperiodic):
v_r = prior_transform(np.array(u_r))
logl_r = loglikelihood(np.array(v_r))
else:
logl_r = -np.inf
nc += 1
nexpand += 1
# Sample within limits. If the sample is not valid, shrink
# the limits until we hit the `loglstar` bound.
while True:
u_hat = u_r - u_l
u_prop = u_l + rstate.rand() * u_hat # scale from left
if unitcheck(u_prop, nonperiodic):
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
else:
logl_prop = -np.inf
nc += 1
ncontract += 1
# If we succeed, move to the new position.
if logl_prop >= loglstar:
window = linalg.norm(u_hat) # length of window
fscale.append(window / axlen)
u = u_prop
break
# If we fail, check if the new point is to the left/right of
# our original point along our proposal axis and update
# the bounds accordingly.
else:
s = np.dot(u_prop - u, u_hat) # check sign (+/-)
if s < 0: # left
u_l = u_prop
elif s > 0: # right
u_r = u_prop
else:
raise RuntimeError("Slice sampler has failed to find "
"a valid point. Some useful "
"output quantities:\n"
"u: {0}\n"
"u_left: {1}\n"
"u_right: {2}\n"
"u_hat: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"axlens: {8}\n"
"s: {9}."
.format(u, u_l, u_r, u_hat, u_prop,
loglstar, logl_prop,
axes, axlens, s))
blob = {'fscale': np.mean(fscale),
'nexpand': nexpand, 'ncontract': ncontract}
return u_prop, v_prop, logl_prop, nc, blob |
def disconnect(self):
"""Disconnect from the Graphite server if connected."""
if self.sock is not None:
try:
self.sock.close()
except socket.error:
pass
finally:
self.sock = None | Disconnect from the Graphite server if connected. | Below is the the instruction that describes the task:
### Input:
Disconnect from the Graphite server if connected.
### Response:
def disconnect(self):
"""Disconnect from the Graphite server if connected."""
if self.sock is not None:
try:
self.sock.close()
except socket.error:
pass
finally:
self.sock = None |
def standard_input():
"""Generator that yields lines from standard input."""
with click.get_text_stream("stdin") as stdin:
while stdin.readable():
line = stdin.readline()
if line:
yield line.strip().encode("utf-8") | Generator that yields lines from standard input. | Below is the the instruction that describes the task:
### Input:
Generator that yields lines from standard input.
### Response:
def standard_input():
"""Generator that yields lines from standard input."""
with click.get_text_stream("stdin") as stdin:
while stdin.readable():
line = stdin.readline()
if line:
yield line.strip().encode("utf-8") |
def make_random_histogram(length=100, num_bins=10):
"Returns a sequence of histogram density values that sum to 1.0"
hist, bin_edges = np.histogram(np.random.random(length),
bins=num_bins, density=True)
# to ensure they sum to 1.0
hist = hist / sum(hist)
if len(hist) < 2:
raise ValueError('Invalid histogram')
return hist | Returns a sequence of histogram density values that sum to 1.0 | Below is the the instruction that describes the task:
### Input:
Returns a sequence of histogram density values that sum to 1.0
### Response:
def make_random_histogram(length=100, num_bins=10):
"Returns a sequence of histogram density values that sum to 1.0"
hist, bin_edges = np.histogram(np.random.random(length),
bins=num_bins, density=True)
# to ensure they sum to 1.0
hist = hist / sum(hist)
if len(hist) < 2:
raise ValueError('Invalid histogram')
return hist |
def _config_parser_constrained(self, read_only):
""":return: Config Parser constrained to our submodule in read or write mode"""
try:
pc = self.parent_commit
except ValueError:
pc = None
# end handle empty parent repository
parser = self._config_parser(self.repo, pc, read_only)
parser.set_submodule(self)
return SectionConstraint(parser, sm_section(self.name)) | :return: Config Parser constrained to our submodule in read or write mode | Below is the the instruction that describes the task:
### Input:
:return: Config Parser constrained to our submodule in read or write mode
### Response:
def _config_parser_constrained(self, read_only):
""":return: Config Parser constrained to our submodule in read or write mode"""
try:
pc = self.parent_commit
except ValueError:
pc = None
# end handle empty parent repository
parser = self._config_parser(self.repo, pc, read_only)
parser.set_submodule(self)
return SectionConstraint(parser, sm_section(self.name)) |
def argument(self, argument_dest, arg_type=None, **kwargs):
""" Register an argument for the given command scope using a knack.arguments.CLIArgumentType
:param argument_dest: The destination argument to add this argument type to
:type argument_dest: str
:param arg_type: Predefined CLIArgumentType definition to register, as modified by any provided kwargs.
:type arg_type: knack.arguments.CLIArgumentType
:param kwargs: Possible values: `options_list`, `validator`, `completer`, `nargs`, `action`, `const`, `default`,
`type`, `choices`, `required`, `help`, `metavar`. See /docs/arguments.md.
"""
self._check_stale()
if not self._applicable():
return
deprecate_action = self._handle_deprecations(argument_dest, **kwargs)
if deprecate_action:
kwargs['action'] = deprecate_action
self.command_loader.argument_registry.register_cli_argument(self.command_scope,
argument_dest,
arg_type,
**kwargs) | Register an argument for the given command scope using a knack.arguments.CLIArgumentType
:param argument_dest: The destination argument to add this argument type to
:type argument_dest: str
:param arg_type: Predefined CLIArgumentType definition to register, as modified by any provided kwargs.
:type arg_type: knack.arguments.CLIArgumentType
:param kwargs: Possible values: `options_list`, `validator`, `completer`, `nargs`, `action`, `const`, `default`,
`type`, `choices`, `required`, `help`, `metavar`. See /docs/arguments.md. | Below is the the instruction that describes the task:
### Input:
Register an argument for the given command scope using a knack.arguments.CLIArgumentType
:param argument_dest: The destination argument to add this argument type to
:type argument_dest: str
:param arg_type: Predefined CLIArgumentType definition to register, as modified by any provided kwargs.
:type arg_type: knack.arguments.CLIArgumentType
:param kwargs: Possible values: `options_list`, `validator`, `completer`, `nargs`, `action`, `const`, `default`,
`type`, `choices`, `required`, `help`, `metavar`. See /docs/arguments.md.
### Response:
def argument(self, argument_dest, arg_type=None, **kwargs):
""" Register an argument for the given command scope using a knack.arguments.CLIArgumentType
:param argument_dest: The destination argument to add this argument type to
:type argument_dest: str
:param arg_type: Predefined CLIArgumentType definition to register, as modified by any provided kwargs.
:type arg_type: knack.arguments.CLIArgumentType
:param kwargs: Possible values: `options_list`, `validator`, `completer`, `nargs`, `action`, `const`, `default`,
`type`, `choices`, `required`, `help`, `metavar`. See /docs/arguments.md.
"""
self._check_stale()
if not self._applicable():
return
deprecate_action = self._handle_deprecations(argument_dest, **kwargs)
if deprecate_action:
kwargs['action'] = deprecate_action
self.command_loader.argument_registry.register_cli_argument(self.command_scope,
argument_dest,
arg_type,
**kwargs) |
def link(self, content, link, title=''):
""" Emit a link, potentially remapped based on our embed or static rules """
link = links.resolve(link, self._search_path,
self._config.get('absolute'))
return '{}{}</a>'.format(
utils.make_tag('a', {
'href': link,
'title': title if title else None
}),
content) | Emit a link, potentially remapped based on our embed or static rules | Below is the the instruction that describes the task:
### Input:
Emit a link, potentially remapped based on our embed or static rules
### Response:
def link(self, content, link, title=''):
""" Emit a link, potentially remapped based on our embed or static rules """
link = links.resolve(link, self._search_path,
self._config.get('absolute'))
return '{}{}</a>'.format(
utils.make_tag('a', {
'href': link,
'title': title if title else None
}),
content) |
def post_user_login(sender, request, user, **kwargs):
"""
Create a profile for the user, when missing.
Make sure that all neccessary user groups exist and have the right permissions.
We need that automatism for people not calling the configure tool,
admin rights for admins after the first login, and similar cases.
"""
logger.debug("Running post-processing for user login.")
# Users created by social login or admins have no profile.
# We fix that during their first login.
try:
with transaction.atomic():
profile, created = UserProfile.objects.get_or_create(user=user)
if created:
logger.info("Created missing profile for user " + str(user.pk))
except Exception as e:
logger.error("Error while creating user profile: " + str(e))
check_permission_system() | Create a profile for the user, when missing.
Make sure that all neccessary user groups exist and have the right permissions.
We need that automatism for people not calling the configure tool,
admin rights for admins after the first login, and similar cases. | Below is the the instruction that describes the task:
### Input:
Create a profile for the user, when missing.
Make sure that all neccessary user groups exist and have the right permissions.
We need that automatism for people not calling the configure tool,
admin rights for admins after the first login, and similar cases.
### Response:
def post_user_login(sender, request, user, **kwargs):
"""
Create a profile for the user, when missing.
Make sure that all neccessary user groups exist and have the right permissions.
We need that automatism for people not calling the configure tool,
admin rights for admins after the first login, and similar cases.
"""
logger.debug("Running post-processing for user login.")
# Users created by social login or admins have no profile.
# We fix that during their first login.
try:
with transaction.atomic():
profile, created = UserProfile.objects.get_or_create(user=user)
if created:
logger.info("Created missing profile for user " + str(user.pk))
except Exception as e:
logger.error("Error while creating user profile: " + str(e))
check_permission_system() |
def _append_plain_text(self, text, before_prompt=False):
""" Appends plain text, processing ANSI codes if enabled.
"""
self._append_custom(self._insert_plain_text, text, before_prompt) | Appends plain text, processing ANSI codes if enabled. | Below is the the instruction that describes the task:
### Input:
Appends plain text, processing ANSI codes if enabled.
### Response:
def _append_plain_text(self, text, before_prompt=False):
""" Appends plain text, processing ANSI codes if enabled.
"""
self._append_custom(self._insert_plain_text, text, before_prompt) |
def _pad_arrays(t, arrays, indices, span, period):
"""Internal routine to pad arrays for periodic models."""
N = len(t)
if indices is None:
indices = np.arange(N)
pad_left = max(0, 0 - np.min(indices - span // 2))
pad_right = max(0, np.max(indices + span - span // 2) - (N - 1))
if pad_left + pad_right > 0:
Nright, pad_right = divmod(pad_right, N)
Nleft, pad_left = divmod(pad_left, N)
t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period]
+ [t + i * period
for i in range(-Nleft, Nright + 1)]
+ [t[:pad_right] + (Nright + 1) * period])
arrays = [np.concatenate([a[N - pad_left:]]
+ (Nleft + Nright + 1) * [a]
+ [a[:pad_right]])
for a in arrays]
pad_left = pad_left % N
Nright = pad_right / N
pad_right = pad_right % N
return (t, arrays, slice(pad_left + Nleft * N,
pad_left + (Nleft + 1) * N))
else:
return (t, arrays, slice(None)) | Internal routine to pad arrays for periodic models. | Below is the the instruction that describes the task:
### Input:
Internal routine to pad arrays for periodic models.
### Response:
def _pad_arrays(t, arrays, indices, span, period):
"""Internal routine to pad arrays for periodic models."""
N = len(t)
if indices is None:
indices = np.arange(N)
pad_left = max(0, 0 - np.min(indices - span // 2))
pad_right = max(0, np.max(indices + span - span // 2) - (N - 1))
if pad_left + pad_right > 0:
Nright, pad_right = divmod(pad_right, N)
Nleft, pad_left = divmod(pad_left, N)
t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period]
+ [t + i * period
for i in range(-Nleft, Nright + 1)]
+ [t[:pad_right] + (Nright + 1) * period])
arrays = [np.concatenate([a[N - pad_left:]]
+ (Nleft + Nright + 1) * [a]
+ [a[:pad_right]])
for a in arrays]
pad_left = pad_left % N
Nright = pad_right / N
pad_right = pad_right % N
return (t, arrays, slice(pad_left + Nleft * N,
pad_left + (Nleft + 1) * N))
else:
return (t, arrays, slice(None)) |
def multiply(self, number):
"""Return a Vector as the product of the vector and a real number."""
return self.from_list([x * number for x in self.to_list()]) | Return a Vector as the product of the vector and a real number. | Below is the the instruction that describes the task:
### Input:
Return a Vector as the product of the vector and a real number.
### Response:
def multiply(self, number):
"""Return a Vector as the product of the vector and a real number."""
return self.from_list([x * number for x in self.to_list()]) |
def environmentvip_step(self, finality='', client='', environmentp44=''):
"""
List finality, client or environment vip list.
Param finality: finality of environment(optional)
Param client: client of environment(optional)
Param environmentp44: environmentp44(optional)
Return finality list: when request has no finality and client.
Return client list: when request has only finality.
Return list environment vip: when request has finality and client.
Return environment vip: when request has finality, client and environmentvip
"""
uri = 'api/v3/environment-vip/step/?finality=%s&client=%s&environmentp44=%s' % (
finality, client, environmentp44)
return super(ApiEnvironmentVip, self).get(
uri) | List finality, client or environment vip list.
Param finality: finality of environment(optional)
Param client: client of environment(optional)
Param environmentp44: environmentp44(optional)
Return finality list: when request has no finality and client.
Return client list: when request has only finality.
Return list environment vip: when request has finality and client.
Return environment vip: when request has finality, client and environmentvip | Below is the the instruction that describes the task:
### Input:
List finality, client or environment vip list.
Param finality: finality of environment(optional)
Param client: client of environment(optional)
Param environmentp44: environmentp44(optional)
Return finality list: when request has no finality and client.
Return client list: when request has only finality.
Return list environment vip: when request has finality and client.
Return environment vip: when request has finality, client and environmentvip
### Response:
def environmentvip_step(self, finality='', client='', environmentp44=''):
"""
List finality, client or environment vip list.
Param finality: finality of environment(optional)
Param client: client of environment(optional)
Param environmentp44: environmentp44(optional)
Return finality list: when request has no finality and client.
Return client list: when request has only finality.
Return list environment vip: when request has finality and client.
Return environment vip: when request has finality, client and environmentvip
"""
uri = 'api/v3/environment-vip/step/?finality=%s&client=%s&environmentp44=%s' % (
finality, client, environmentp44)
return super(ApiEnvironmentVip, self).get(
uri) |
def valid_station(station: str):
"""
Checks the validity of a station ident
This function doesn't return anything. It merely raises a BadStation error if needed
"""
station = station.strip()
if len(station) != 4:
raise BadStation('ICAO station idents must be four characters long')
uses_na_format(station) | Checks the validity of a station ident
This function doesn't return anything. It merely raises a BadStation error if needed | Below is the the instruction that describes the task:
### Input:
Checks the validity of a station ident
This function doesn't return anything. It merely raises a BadStation error if needed
### Response:
def valid_station(station: str):
"""
Checks the validity of a station ident
This function doesn't return anything. It merely raises a BadStation error if needed
"""
station = station.strip()
if len(station) != 4:
raise BadStation('ICAO station idents must be four characters long')
uses_na_format(station) |
def _setbin_unsafe(self, binstring):
"""Same as _setbin_safe, but input isn't sanity checked. binstring mustn't start with '0b'."""
length = len(binstring)
# pad with zeros up to byte boundary if needed
boundary = ((length + 7) // 8) * 8
padded_binstring = binstring + '0' * (boundary - length)\
if len(binstring) < boundary else binstring
try:
bytelist = [int(padded_binstring[x:x + 8], 2)
for x in xrange(0, len(padded_binstring), 8)]
except ValueError:
raise CreationError("Invalid character in bin initialiser {0}.", binstring)
self._setbytes_unsafe(bytearray(bytelist), length, 0) | Same as _setbin_safe, but input isn't sanity checked. binstring mustn't start with '0b'. | Below is the the instruction that describes the task:
### Input:
Same as _setbin_safe, but input isn't sanity checked. binstring mustn't start with '0b'.
### Response:
def _setbin_unsafe(self, binstring):
"""Same as _setbin_safe, but input isn't sanity checked. binstring mustn't start with '0b'."""
length = len(binstring)
# pad with zeros up to byte boundary if needed
boundary = ((length + 7) // 8) * 8
padded_binstring = binstring + '0' * (boundary - length)\
if len(binstring) < boundary else binstring
try:
bytelist = [int(padded_binstring[x:x + 8], 2)
for x in xrange(0, len(padded_binstring), 8)]
except ValueError:
raise CreationError("Invalid character in bin initialiser {0}.", binstring)
self._setbytes_unsafe(bytearray(bytelist), length, 0) |
def refresh(self):
"""Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
config = self._get_all_details()
raid_info = _convert_to_dict(config)
self.controllers = []
for key, value in raid_info.items():
self.controllers.append(Controller(key, value, self))
self.last_updated = time.time() | Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed. | Below is the the instruction that describes the task:
### Input:
Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
### Response:
def refresh(self):
"""Refresh the server and it's child objects.
This method removes all the cache information in the server
and it's child objects, and fetches the information again from
the server using hpssacli/ssacli command.
:raises: HPSSAOperationError, if hpssacli/ssacli operation failed.
"""
config = self._get_all_details()
raid_info = _convert_to_dict(config)
self.controllers = []
for key, value in raid_info.items():
self.controllers.append(Controller(key, value, self))
self.last_updated = time.time() |
def write(self, filename=None):
"""Save exif data to file."""
if filename is None:
filename = self._filename
exif_bytes = piexif.dump(self._ef)
with open(self._filename, "rb") as fin:
img = fin.read()
try:
piexif.insert(exif_bytes, img, filename)
except IOError:
type, value, traceback = sys.exc_info()
print >> sys.stderr, "Error saving file:", value | Save exif data to file. | Below is the the instruction that describes the task:
### Input:
Save exif data to file.
### Response:
def write(self, filename=None):
"""Save exif data to file."""
if filename is None:
filename = self._filename
exif_bytes = piexif.dump(self._ef)
with open(self._filename, "rb") as fin:
img = fin.read()
try:
piexif.insert(exif_bytes, img, filename)
except IOError:
type, value, traceback = sys.exc_info()
print >> sys.stderr, "Error saving file:", value |
def wstatus_to_str(status):
"""
Parse and format a :func:`os.waitpid` exit status.
"""
if os.WIFEXITED(status):
return 'exited with return code %d' % (os.WEXITSTATUS(status),)
if os.WIFSIGNALED(status):
n = os.WTERMSIG(status)
return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n))
if os.WIFSTOPPED(status):
n = os.WSTOPSIG(status)
return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n))
return 'unknown wait status (%d)' % (status,) | Parse and format a :func:`os.waitpid` exit status. | Below is the the instruction that describes the task:
### Input:
Parse and format a :func:`os.waitpid` exit status.
### Response:
def wstatus_to_str(status):
"""
Parse and format a :func:`os.waitpid` exit status.
"""
if os.WIFEXITED(status):
return 'exited with return code %d' % (os.WEXITSTATUS(status),)
if os.WIFSIGNALED(status):
n = os.WTERMSIG(status)
return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n))
if os.WIFSTOPPED(status):
n = os.WSTOPSIG(status)
return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n))
return 'unknown wait status (%d)' % (status,) |
def metadata_and_language_from_option_line(self, line):
"""Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary."""
if self.start_code_re.match(line):
self.language, self.metadata = self.options_to_metadata(line[line.find('%%') + 2:])
elif self.alternative_start_code_re.match(line):
self.metadata = {} | Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary. | Below is the the instruction that describes the task:
### Input:
Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary.
### Response:
def metadata_and_language_from_option_line(self, line):
"""Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary."""
if self.start_code_re.match(line):
self.language, self.metadata = self.options_to_metadata(line[line.find('%%') + 2:])
elif self.alternative_start_code_re.match(line):
self.metadata = {} |
def overlay(self, matchers, force=False):
"""
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
"""
for m in matchers:
if m in self._ran_matchers:
continue
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self))
self.overlays.sort(key=lambda o: o.start, reverse=True) | Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c | Below is the the instruction that describes the task:
### Input:
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
### Response:
def overlay(self, matchers, force=False):
"""
Given a list of matchers create overlays based on them. Normally I
will remember what overlays were run this way and will avoid
re-running them but you can `force` me to. This is the
recommended way of running overlays.c
"""
for m in matchers:
if m in self._ran_matchers:
continue
self._ran_matchers.append(m)
self.overlays += list(m.offset_overlays(self))
self.overlays.sort(key=lambda o: o.start, reverse=True) |
def action_notify(self, action):
"""
Notify all subscribers of an action status change.
action -- the action whose status changed
"""
message = json.dumps({
'messageType': 'actionStatus',
'data': action.as_action_description(),
})
for subscriber in list(self.subscribers):
try:
subscriber.write_message(message)
except tornado.websocket.WebSocketClosedError:
pass | Notify all subscribers of an action status change.
action -- the action whose status changed | Below is the the instruction that describes the task:
### Input:
Notify all subscribers of an action status change.
action -- the action whose status changed
### Response:
def action_notify(self, action):
"""
Notify all subscribers of an action status change.
action -- the action whose status changed
"""
message = json.dumps({
'messageType': 'actionStatus',
'data': action.as_action_description(),
})
for subscriber in list(self.subscribers):
try:
subscriber.write_message(message)
except tornado.websocket.WebSocketClosedError:
pass |
def sky(input=None,outExt=None,configObj=None, group=None, editpars=False, **inputDict):
"""
Perform sky subtraction on input list of images
Parameters
----------
input : str or list of str
a python list of image filenames, or just a single filename
configObj : configObject
an instance of configObject
inputDict : dict, optional
an optional list of parameters specified by the user
outExt : str
The extension of the output image. If the output already exists
then the input image is overwritten
Notes
-----
These are parameters that the configObj should contain by default,
they can be altered on the fly using the inputDict
Parameters that should be in configobj:
========== ===================================================================
Name Definition
========== ===================================================================
skymethod 'Sky computation method'
skysub 'Perform sky subtraction?'
skywidth 'Bin width of histogram for sampling sky statistics (in sigma)'
skystat 'Sky correction statistics parameter'
skylower 'Lower limit of usable data for sky (always in electrons)'
skyupper 'Upper limit of usable data for sky (always in electrons)'
skyclip 'Number of clipping iterations'
skylsigma 'Lower side clipping factor (in sigma)'
skyusigma 'Upper side clipping factor (in sigma)'
skymask_cat 'Catalog file listing image masks'
use_static 'Use static mask for skymatch computations?'
sky_bits 'Integer mask bit values considered good pixels in DQ array'
skyfile 'Name of file with user-computed sky values'
skyuser 'KEYWORD indicating a sky subtraction value if done by user'
in_memory 'Optimize for speed or for memory use'
========== ===================================================================
The output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted.
"""
if input is not None:
inputDict['input']=input
inputDict['output']=None
inputDict['updatewcs']=False
inputDict['group']=group
else:
print("Please supply an input image", file=sys.stderr)
raise ValueError
configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj,outExt=outExt) | Perform sky subtraction on input list of images
Parameters
----------
input : str or list of str
a python list of image filenames, or just a single filename
configObj : configObject
an instance of configObject
inputDict : dict, optional
an optional list of parameters specified by the user
outExt : str
The extension of the output image. If the output already exists
then the input image is overwritten
Notes
-----
These are parameters that the configObj should contain by default,
they can be altered on the fly using the inputDict
Parameters that should be in configobj:
========== ===================================================================
Name Definition
========== ===================================================================
skymethod 'Sky computation method'
skysub 'Perform sky subtraction?'
skywidth 'Bin width of histogram for sampling sky statistics (in sigma)'
skystat 'Sky correction statistics parameter'
skylower 'Lower limit of usable data for sky (always in electrons)'
skyupper 'Upper limit of usable data for sky (always in electrons)'
skyclip 'Number of clipping iterations'
skylsigma 'Lower side clipping factor (in sigma)'
skyusigma 'Upper side clipping factor (in sigma)'
skymask_cat 'Catalog file listing image masks'
use_static 'Use static mask for skymatch computations?'
sky_bits 'Integer mask bit values considered good pixels in DQ array'
skyfile 'Name of file with user-computed sky values'
skyuser 'KEYWORD indicating a sky subtraction value if done by user'
in_memory 'Optimize for speed or for memory use'
========== ===================================================================
The output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted. | Below is the the instruction that describes the task:
### Input:
Perform sky subtraction on input list of images
Parameters
----------
input : str or list of str
a python list of image filenames, or just a single filename
configObj : configObject
an instance of configObject
inputDict : dict, optional
an optional list of parameters specified by the user
outExt : str
The extension of the output image. If the output already exists
then the input image is overwritten
Notes
-----
These are parameters that the configObj should contain by default,
they can be altered on the fly using the inputDict
Parameters that should be in configobj:
========== ===================================================================
Name Definition
========== ===================================================================
skymethod 'Sky computation method'
skysub 'Perform sky subtraction?'
skywidth 'Bin width of histogram for sampling sky statistics (in sigma)'
skystat 'Sky correction statistics parameter'
skylower 'Lower limit of usable data for sky (always in electrons)'
skyupper 'Upper limit of usable data for sky (always in electrons)'
skyclip 'Number of clipping iterations'
skylsigma 'Lower side clipping factor (in sigma)'
skyusigma 'Upper side clipping factor (in sigma)'
skymask_cat 'Catalog file listing image masks'
use_static 'Use static mask for skymatch computations?'
sky_bits 'Integer mask bit values considered good pixels in DQ array'
skyfile 'Name of file with user-computed sky values'
skyuser 'KEYWORD indicating a sky subtraction value if done by user'
in_memory 'Optimize for speed or for memory use'
========== ===================================================================
The output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted.
### Response:
def sky(input=None,outExt=None,configObj=None, group=None, editpars=False, **inputDict):
"""
Perform sky subtraction on input list of images
Parameters
----------
input : str or list of str
a python list of image filenames, or just a single filename
configObj : configObject
an instance of configObject
inputDict : dict, optional
an optional list of parameters specified by the user
outExt : str
The extension of the output image. If the output already exists
then the input image is overwritten
Notes
-----
These are parameters that the configObj should contain by default,
they can be altered on the fly using the inputDict
Parameters that should be in configobj:
========== ===================================================================
Name Definition
========== ===================================================================
skymethod 'Sky computation method'
skysub 'Perform sky subtraction?'
skywidth 'Bin width of histogram for sampling sky statistics (in sigma)'
skystat 'Sky correction statistics parameter'
skylower 'Lower limit of usable data for sky (always in electrons)'
skyupper 'Upper limit of usable data for sky (always in electrons)'
skyclip 'Number of clipping iterations'
skylsigma 'Lower side clipping factor (in sigma)'
skyusigma 'Upper side clipping factor (in sigma)'
skymask_cat 'Catalog file listing image masks'
use_static 'Use static mask for skymatch computations?'
sky_bits 'Integer mask bit values considered good pixels in DQ array'
skyfile 'Name of file with user-computed sky values'
skyuser 'KEYWORD indicating a sky subtraction value if done by user'
in_memory 'Optimize for speed or for memory use'
========== ===================================================================
The output from sky subtraction is a copy of the original input file
where all the science data extensions have been sky subtracted.
"""
if input is not None:
inputDict['input']=input
inputDict['output']=None
inputDict['updatewcs']=False
inputDict['group']=group
else:
print("Please supply an input image", file=sys.stderr)
raise ValueError
configObj = util.getDefaultConfigObj(__taskname__,configObj,inputDict,loadOnly=(not editpars))
if configObj is None:
return
if not editpars:
run(configObj,outExt=outExt) |
def main(self):
"""
python -m utool SetupRepo.main --modname=sklearn --repo=scikit-learn --codedir=~/code -w
python -m utool SetupRepo.main --repo=ubelt --codedir=~/code --modname=ubelt -w
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_project import * # NOQA
>>> SetupRepo().main()
"""
self.regencmd = self.regenfmt.format(cmd='main', **self.__dict__)
import utool as ut
self.ensure_text(
fname=join(self.modname, '__main__.py'),
chmod='+x',
text=ut.codeblock(
r'''
# STARTBLOCK
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initially Generated By:
{regencmd}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
def {modname}_main():
ignore_prefix = []
ignore_suffix = []
import utool as ut
ut.main_function_tester('{modname}', ignore_prefix, ignore_suffix)
if __name__ == '__main__':
"""
Usage:
python -m {modname} <funcname>
"""
print('Running {modname} main')
{modname}_main()
# ENDBLOCK
'''
)
) | python -m utool SetupRepo.main --modname=sklearn --repo=scikit-learn --codedir=~/code -w
python -m utool SetupRepo.main --repo=ubelt --codedir=~/code --modname=ubelt -w
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_project import * # NOQA
>>> SetupRepo().main() | Below is the the instruction that describes the task:
### Input:
python -m utool SetupRepo.main --modname=sklearn --repo=scikit-learn --codedir=~/code -w
python -m utool SetupRepo.main --repo=ubelt --codedir=~/code --modname=ubelt -w
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_project import * # NOQA
>>> SetupRepo().main()
### Response:
def main(self):
"""
python -m utool SetupRepo.main --modname=sklearn --repo=scikit-learn --codedir=~/code -w
python -m utool SetupRepo.main --repo=ubelt --codedir=~/code --modname=ubelt -w
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_project import * # NOQA
>>> SetupRepo().main()
"""
self.regencmd = self.regenfmt.format(cmd='main', **self.__dict__)
import utool as ut
self.ensure_text(
fname=join(self.modname, '__main__.py'),
chmod='+x',
text=ut.codeblock(
r'''
# STARTBLOCK
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initially Generated By:
{regencmd}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
def {modname}_main():
ignore_prefix = []
ignore_suffix = []
import utool as ut
ut.main_function_tester('{modname}', ignore_prefix, ignore_suffix)
if __name__ == '__main__':
"""
Usage:
python -m {modname} <funcname>
"""
print('Running {modname} main')
{modname}_main()
# ENDBLOCK
'''
)
) |
def bbox_to_slices(bbox):
r"""
Given a tuple containing bounding box coordinates, return a tuple of slice
objects.
A bounding box in the form of a straight list is returned by several
functions in skimage, but these cannot be used to direct index into an
image. This function returns a tuples of slices can be, such as:
``im[bbox_to_slices([xmin, ymin, xmax, ymax])]``.
Parameters
----------
bbox : tuple of ints
The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,
``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the
``zmin`` and ``zmax`` entries.
Returns
-------
slices : tuple
A tuple of slice objects that can be used to directly index into a
larger image.
"""
if len(bbox) == 4:
ret = (slice(bbox[0], bbox[2]),
slice(bbox[1], bbox[3]))
else:
ret = (slice(bbox[0], bbox[3]),
slice(bbox[1], bbox[4]),
slice(bbox[2], bbox[5]))
return ret | r"""
Given a tuple containing bounding box coordinates, return a tuple of slice
objects.
A bounding box in the form of a straight list is returned by several
functions in skimage, but these cannot be used to direct index into an
image. This function returns a tuples of slices can be, such as:
``im[bbox_to_slices([xmin, ymin, xmax, ymax])]``.
Parameters
----------
bbox : tuple of ints
The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,
``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the
``zmin`` and ``zmax`` entries.
Returns
-------
slices : tuple
A tuple of slice objects that can be used to directly index into a
larger image. | Below is the the instruction that describes the task:
### Input:
r"""
Given a tuple containing bounding box coordinates, return a tuple of slice
objects.
A bounding box in the form of a straight list is returned by several
functions in skimage, but these cannot be used to direct index into an
image. This function returns a tuples of slices can be, such as:
``im[bbox_to_slices([xmin, ymin, xmax, ymax])]``.
Parameters
----------
bbox : tuple of ints
The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,
``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the
``zmin`` and ``zmax`` entries.
Returns
-------
slices : tuple
A tuple of slice objects that can be used to directly index into a
larger image.
### Response:
def bbox_to_slices(bbox):
r"""
Given a tuple containing bounding box coordinates, return a tuple of slice
objects.
A bounding box in the form of a straight list is returned by several
functions in skimage, but these cannot be used to direct index into an
image. This function returns a tuples of slices can be, such as:
``im[bbox_to_slices([xmin, ymin, xmax, ymax])]``.
Parameters
----------
bbox : tuple of ints
The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,
``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the
``zmin`` and ``zmax`` entries.
Returns
-------
slices : tuple
A tuple of slice objects that can be used to directly index into a
larger image.
"""
if len(bbox) == 4:
ret = (slice(bbox[0], bbox[2]),
slice(bbox[1], bbox[3]))
else:
ret = (slice(bbox[0], bbox[3]),
slice(bbox[1], bbox[4]),
slice(bbox[2], bbox[5]))
return ret |
def handle_privmsg(self, params):
"""
Handle sending a private message to a user or channel.
"""
target, sep, msg = params.partition(' ')
if not msg:
raise IRCError.from_name(
'needmoreparams',
'PRIVMSG :Not enough parameters')
message = ':%s PRIVMSG %s %s' % (self.client_ident(), target, msg)
if target.startswith('#') or target.startswith('$'):
# Message to channel. Check if the channel exists.
channel = self.server.channels.get(target)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
if channel.name not in self.channels:
# The user isn't in the channel.
raise IRCError.from_name(
'cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
self._send_to_others(message, channel)
else:
# Message to user
client = self.server.clients.get(target, None)
if not client:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
client.send_queue.append(message) | Handle sending a private message to a user or channel. | Below is the the instruction that describes the task:
### Input:
Handle sending a private message to a user or channel.
### Response:
def handle_privmsg(self, params):
"""
Handle sending a private message to a user or channel.
"""
target, sep, msg = params.partition(' ')
if not msg:
raise IRCError.from_name(
'needmoreparams',
'PRIVMSG :Not enough parameters')
message = ':%s PRIVMSG %s %s' % (self.client_ident(), target, msg)
if target.startswith('#') or target.startswith('$'):
# Message to channel. Check if the channel exists.
channel = self.server.channels.get(target)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
if channel.name not in self.channels:
# The user isn't in the channel.
raise IRCError.from_name(
'cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
self._send_to_others(message, channel)
else:
# Message to user
client = self.server.clients.get(target, None)
if not client:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
client.send_queue.append(message) |
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP | Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty | Below is the the instruction that describes the task:
### Input:
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
### Response:
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP |
def __output_thread(self):
"Output thread"
while self.alive:
instructions = self.__get_instructions()
self.__process_instructions(instructions) | Output thread | Below is the the instruction that describes the task:
### Input:
Output thread
### Response:
def __output_thread(self):
"Output thread"
while self.alive:
instructions = self.__get_instructions()
self.__process_instructions(instructions) |
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
"""
orig = {}
for xml_file in self.filename.glob('*.xml'):
if xml_file.stem[0] != '.':
orig[xml_file.stem] = parse_xml(str(xml_file))
signals = sorted(self.filename.glob('signal*.bin'))
for signal in signals:
block_hdr, i_data = read_all_block_hdr(signal)
self._signal.append(signal)
self._block_hdr.append(block_hdr)
self._i_data.append(i_data)
n_samples = asarray([x['n_samples'][0] for x in block_hdr], 'q')
self._n_samples.append(n_samples)
try:
subj_id = orig['subject'][0][0]['name']
except KeyError:
subj_id = ''
try:
start_time = datetime.strptime(orig['info'][0]['recordTime'][:26],
'%Y-%m-%dT%H:%M:%S.%f')
except KeyError:
start_time = DEFAULT_DATETIME
self.start_time = start_time
videos = (list(self.filename.glob('*.mp4')) + # as described in specs
list(self.filename.glob('*.mov'))) # actual example
videos = [x for x in videos if x.stem[0] != '.'] # remove hidden files
if len(videos) > 1:
lg.warning('More than one video present: ' + ', '.join(videos))
self._videos = videos
# it only works if they have all the same sampling frequency
s_freq = [x[0]['freq'][0] for x in self._block_hdr]
assert all([x == s_freq[0] for x in s_freq])
SIGNAL = 0
s_freq = self._block_hdr[SIGNAL][0]['freq'][0]
n_samples = sum(self._n_samples[SIGNAL])
chan_name, self._nchan_signal1 = _read_chan_name(orig)
self._orig = orig
return subj_id, start_time, s_freq, chan_name, n_samples, orig | Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header | Below is the the instruction that describes the task:
### Input:
Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
### Response:
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
"""
orig = {}
for xml_file in self.filename.glob('*.xml'):
if xml_file.stem[0] != '.':
orig[xml_file.stem] = parse_xml(str(xml_file))
signals = sorted(self.filename.glob('signal*.bin'))
for signal in signals:
block_hdr, i_data = read_all_block_hdr(signal)
self._signal.append(signal)
self._block_hdr.append(block_hdr)
self._i_data.append(i_data)
n_samples = asarray([x['n_samples'][0] for x in block_hdr], 'q')
self._n_samples.append(n_samples)
try:
subj_id = orig['subject'][0][0]['name']
except KeyError:
subj_id = ''
try:
start_time = datetime.strptime(orig['info'][0]['recordTime'][:26],
'%Y-%m-%dT%H:%M:%S.%f')
except KeyError:
start_time = DEFAULT_DATETIME
self.start_time = start_time
videos = (list(self.filename.glob('*.mp4')) + # as described in specs
list(self.filename.glob('*.mov'))) # actual example
videos = [x for x in videos if x.stem[0] != '.'] # remove hidden files
if len(videos) > 1:
lg.warning('More than one video present: ' + ', '.join(videos))
self._videos = videos
# it only works if they have all the same sampling frequency
s_freq = [x[0]['freq'][0] for x in self._block_hdr]
assert all([x == s_freq[0] for x in s_freq])
SIGNAL = 0
s_freq = self._block_hdr[SIGNAL][0]['freq'][0]
n_samples = sum(self._n_samples[SIGNAL])
chan_name, self._nchan_signal1 = _read_chan_name(orig)
self._orig = orig
return subj_id, start_time, s_freq, chan_name, n_samples, orig |
def wait_for_task_property(service, task, prop, timeout_sec=120):
"""Waits for a task to have the specified property"""
return time_wait(lambda: task_property_present_predicate(service, task, prop), timeout_seconds=timeout_sec) | Waits for a task to have the specified property | Below is the the instruction that describes the task:
### Input:
Waits for a task to have the specified property
### Response:
def wait_for_task_property(service, task, prop, timeout_sec=120):
"""Waits for a task to have the specified property"""
return time_wait(lambda: task_property_present_predicate(service, task, prop), timeout_seconds=timeout_sec) |
def updateArgs(self, namespace, updates):
"""Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode}
"""
namespace = self._fixNS(namespace)
for k, v in updates.iteritems():
self.setArg(namespace, k, v) | Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode} | Below is the the instruction that describes the task:
### Input:
Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode}
### Response:
def updateArgs(self, namespace, updates):
"""Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode}
"""
namespace = self._fixNS(namespace)
for k, v in updates.iteritems():
self.setArg(namespace, k, v) |
def decode(self, encoded, parentFieldName=""):
"""See the function description in base.py"""
if parentFieldName != "":
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: ([[0, 0]], "input")}, [fieldName]) | See the function description in base.py | Below is the the instruction that describes the task:
### Input:
See the function description in base.py
### Response:
def decode(self, encoded, parentFieldName=""):
"""See the function description in base.py"""
if parentFieldName != "":
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: ([[0, 0]], "input")}, [fieldName]) |
def delete_source_map(srcmap_file, names, logger=None):
"""Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted.
"""
with fits.open(srcmap_file) as hdulist:
hdunames = [hdu.name.upper() for hdu in hdulist]
if not isinstance(names, list):
names = [names]
for name in names:
if not name.upper() in hdunames:
continue
del hdulist[name.upper()]
hdulist.writeto(srcmap_file, overwrite=True) | Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted. | Below is the the instruction that describes the task:
### Input:
Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted.
### Response:
def delete_source_map(srcmap_file, names, logger=None):
"""Delete a map from a binned analysis source map file if it exists.
Parameters
----------
srcmap_file : str
Path to the source map file.
names : list
List of HDU keys of source maps to be deleted.
"""
with fits.open(srcmap_file) as hdulist:
hdunames = [hdu.name.upper() for hdu in hdulist]
if not isinstance(names, list):
names = [names]
for name in names:
if not name.upper() in hdunames:
continue
del hdulist[name.upper()]
hdulist.writeto(srcmap_file, overwrite=True) |
def get_msd_plot(self, plt=None, mode="specie"):
"""
Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
mode (str): Determines type of msd plot. By "species", "sites",
or direction (default). If mode = "mscd", the smoothed mscd vs.
time will be plotted.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8, plt=plt)
if np.max(self.dt) > 100000:
plot_dt = self.dt / 1000
unit = 'ps'
else:
plot_dt = self.dt
unit = 'fs'
if mode == "species":
for sp in sorted(self.structure.composition.keys()):
indices = [i for i, site in enumerate(self.structure) if
site.specie == sp]
sd = np.average(self.sq_disp_ions[indices, :], axis=0)
plt.plot(plot_dt, sd, label=sp.__str__())
plt.legend(loc=2, prop={"size": 20})
elif mode == "sites":
for i, site in enumerate(self.structure):
sd = self.sq_disp_ions[i, :]
plt.plot(plot_dt, sd, label="%s - %d" % (
site.specie.__str__(), i))
plt.legend(loc=2, prop={"size": 20})
elif mode == "mscd":
plt.plot(plot_dt, self.mscd, 'r')
plt.legend(["Overall"], loc=2, prop={"size": 20})
else:
# Handle default / invalid mode case
plt.plot(plot_dt, self.msd, 'k')
plt.plot(plot_dt, self.msd_components[:, 0], 'r')
plt.plot(plot_dt, self.msd_components[:, 1], 'g')
plt.plot(plot_dt, self.msd_components[:, 2], 'b')
plt.legend(["Overall", "a", "b", "c"], loc=2, prop={"size": 20})
plt.xlabel("Timestep ({})".format(unit))
if mode == "mscd":
plt.ylabel("MSCD ($\\AA^2$)")
else:
plt.ylabel("MSD ($\\AA^2$)")
plt.tight_layout()
return plt | Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
mode (str): Determines type of msd plot. By "species", "sites",
or direction (default). If mode = "mscd", the smoothed mscd vs.
time will be plotted. | Below is the the instruction that describes the task:
### Input:
Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
mode (str): Determines type of msd plot. By "species", "sites",
or direction (default). If mode = "mscd", the smoothed mscd vs.
time will be plotted.
### Response:
def get_msd_plot(self, plt=None, mode="specie"):
"""
Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
mode (str): Determines type of msd plot. By "species", "sites",
or direction (default). If mode = "mscd", the smoothed mscd vs.
time will be plotted.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8, plt=plt)
if np.max(self.dt) > 100000:
plot_dt = self.dt / 1000
unit = 'ps'
else:
plot_dt = self.dt
unit = 'fs'
if mode == "species":
for sp in sorted(self.structure.composition.keys()):
indices = [i for i, site in enumerate(self.structure) if
site.specie == sp]
sd = np.average(self.sq_disp_ions[indices, :], axis=0)
plt.plot(plot_dt, sd, label=sp.__str__())
plt.legend(loc=2, prop={"size": 20})
elif mode == "sites":
for i, site in enumerate(self.structure):
sd = self.sq_disp_ions[i, :]
plt.plot(plot_dt, sd, label="%s - %d" % (
site.specie.__str__(), i))
plt.legend(loc=2, prop={"size": 20})
elif mode == "mscd":
plt.plot(plot_dt, self.mscd, 'r')
plt.legend(["Overall"], loc=2, prop={"size": 20})
else:
# Handle default / invalid mode case
plt.plot(plot_dt, self.msd, 'k')
plt.plot(plot_dt, self.msd_components[:, 0], 'r')
plt.plot(plot_dt, self.msd_components[:, 1], 'g')
plt.plot(plot_dt, self.msd_components[:, 2], 'b')
plt.legend(["Overall", "a", "b", "c"], loc=2, prop={"size": 20})
plt.xlabel("Timestep ({})".format(unit))
if mode == "mscd":
plt.ylabel("MSCD ($\\AA^2$)")
else:
plt.ylabel("MSD ($\\AA^2$)")
plt.tight_layout()
return plt |
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about emails
Level 2 not implemented
Level 3 not implemented
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
email = {}
# First level granularity
email[Email.EMAIL_ID] = []
email[Email.EMAIL_EVENT] = []
email[Email.EMAIL_DATE] = []
email[Email.EMAIL_OWNER] = []
email[Email.EMAIL_SUBJECT] = []
email[Email.EMAIL_BODY] = []
email[Email.EMAIL_ORIGIN] = []
events = pandas.DataFrame()
for item in self.items:
origin = item["origin"]
email_data = item["data"]
if granularity == 1:
# Changeset submission date: filling a new event
email[Email.EMAIL_ID].append(email_data["Message-ID"])
email[Email.EMAIL_EVENT].append(Email.EVENT_OPEN)
try:
email[Email.EMAIL_DATE].append(str_to_datetime(email_data["Date"], ignoretz=True))
except KeyError:
email[Email.EMAIL_DATE].append(str_to_datetime("1970-01-01"))
email[Email.EMAIL_OWNER].append(email_data["From"])
email[Email.EMAIL_SUBJECT].append(email_data["Subject"])
try:
email[Email.EMAIL_BODY].append(email_data["body"]["plain"])
except KeyError:
email[Email.EMAIL_BODY].append("None")
email[Email.EMAIL_ORIGIN].append(origin)
if granularity == 2:
# TDB
pass
if granularity == 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
events[Email.EMAIL_ID] = email[Email.EMAIL_ID]
events[Email.EMAIL_EVENT] = email[Email.EMAIL_EVENT]
events[Email.EMAIL_DATE] = email[Email.EMAIL_DATE]
events[Email.EMAIL_OWNER] = email[Email.EMAIL_OWNER]
events[Email.EMAIL_SUBJECT] = email[Email.EMAIL_SUBJECT]
events[Email.EMAIL_BODY] = email[Email.EMAIL_BODY]
events[Email.EMAIL_ORIGIN] = email[Email.EMAIL_ORIGIN]
return events | This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about emails
Level 2 not implemented
Level 3 not implemented
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about emails
Level 2 not implemented
Level 3 not implemented
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
### Response:
def eventize(self, granularity):
""" This splits the JSON information found at self.events into the
several events. For this there are three different levels of time
consuming actions: 1-soft, 2-medium and 3-hard.
Level 1 provides events about emails
Level 2 not implemented
Level 3 not implemented
:param granularity: Levels of time consuming actions to calculate events
:type granularity: integer
:returns: Pandas dataframe with splitted events.
:rtype: pandas.DataFrame
"""
email = {}
# First level granularity
email[Email.EMAIL_ID] = []
email[Email.EMAIL_EVENT] = []
email[Email.EMAIL_DATE] = []
email[Email.EMAIL_OWNER] = []
email[Email.EMAIL_SUBJECT] = []
email[Email.EMAIL_BODY] = []
email[Email.EMAIL_ORIGIN] = []
events = pandas.DataFrame()
for item in self.items:
origin = item["origin"]
email_data = item["data"]
if granularity == 1:
# Changeset submission date: filling a new event
email[Email.EMAIL_ID].append(email_data["Message-ID"])
email[Email.EMAIL_EVENT].append(Email.EVENT_OPEN)
try:
email[Email.EMAIL_DATE].append(str_to_datetime(email_data["Date"], ignoretz=True))
except KeyError:
email[Email.EMAIL_DATE].append(str_to_datetime("1970-01-01"))
email[Email.EMAIL_OWNER].append(email_data["From"])
email[Email.EMAIL_SUBJECT].append(email_data["Subject"])
try:
email[Email.EMAIL_BODY].append(email_data["body"]["plain"])
except KeyError:
email[Email.EMAIL_BODY].append("None")
email[Email.EMAIL_ORIGIN].append(origin)
if granularity == 2:
# TDB
pass
if granularity == 3:
# TDB
pass
# Done in this way to have an order (and not a direct cast)
events[Email.EMAIL_ID] = email[Email.EMAIL_ID]
events[Email.EMAIL_EVENT] = email[Email.EMAIL_EVENT]
events[Email.EMAIL_DATE] = email[Email.EMAIL_DATE]
events[Email.EMAIL_OWNER] = email[Email.EMAIL_OWNER]
events[Email.EMAIL_SUBJECT] = email[Email.EMAIL_SUBJECT]
events[Email.EMAIL_BODY] = email[Email.EMAIL_BODY]
events[Email.EMAIL_ORIGIN] = email[Email.EMAIL_ORIGIN]
return events |
def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request(
'GET',
'outcomes/{}'.format(outcome_id)
)
return Outcome(self.__requester, response.json()) | Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome` | Below is the the instruction that describes the task:
### Input:
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
### Response:
def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request(
'GET',
'outcomes/{}'.format(outcome_id)
)
return Outcome(self.__requester, response.json()) |
def data_json(self, pretty=False):
"""Returns the data as a valid JSON string."""
if pretty:
return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(self.data) | Returns the data as a valid JSON string. | Below is the the instruction that describes the task:
### Input:
Returns the data as a valid JSON string.
### Response:
def data_json(self, pretty=False):
"""Returns the data as a valid JSON string."""
if pretty:
return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(self.data) |
def header_encode(self, string):
"""Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome.
"""
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
return string
return encoder_module.header_encode(header_bytes, codec) | Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome. | Below is the the instruction that describes the task:
### Input:
Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome.
### Response:
def header_encode(self, string):
"""Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome.
"""
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
return string
return encoder_module.header_encode(header_bytes, codec) |
def get_objective_objective_bank_session(self):
"""Gets the session for retrieving objective to objective bank mappings.
return: (osid.learning.ObjectiveObjectiveBankSession) - an
``ObjectiveObjectiveBankSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_objective_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank()`` is ``true``.*
"""
if not self.supports_objective_objective_bank():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveObjectiveBankSession(runtime=self._runtime) | Gets the session for retrieving objective to objective bank mappings.
return: (osid.learning.ObjectiveObjectiveBankSession) - an
``ObjectiveObjectiveBankSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_objective_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the session for retrieving objective to objective bank mappings.
return: (osid.learning.ObjectiveObjectiveBankSession) - an
``ObjectiveObjectiveBankSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_objective_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank()`` is ``true``.*
### Response:
def get_objective_objective_bank_session(self):
"""Gets the session for retrieving objective to objective bank mappings.
return: (osid.learning.ObjectiveObjectiveBankSession) - an
``ObjectiveObjectiveBankSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_objective_objective_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_objective_bank()`` is ``true``.*
"""
if not self.supports_objective_objective_bank():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveObjectiveBankSession(runtime=self._runtime) |
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True | Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful. | Below is the the instruction that describes the task:
### Input:
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
### Response:
def buildSubscriptionList(self):
"""
Hits Google Reader for a users's alphabetically ordered list of feeds.
Returns true if succesful.
"""
self._clearLists()
unreadById = {}
if not self.userId:
self.getUserInfo()
unreadJson = self.httpGet(ReaderUrl.UNREAD_COUNT_URL, { 'output': 'json', })
unreadCounts = json.loads(unreadJson, strict=False)['unreadcounts']
for unread in unreadCounts:
unreadById[unread['id']] = unread['count']
feedsJson = self.httpGet(ReaderUrl.SUBSCRIPTION_LIST_URL, { 'output': 'json', })
subscriptions = json.loads(feedsJson, strict=False)['subscriptions']
for sub in subscriptions:
categories = []
if 'categories' in sub:
for hCategory in sub['categories']:
cId = hCategory['id']
if not cId in self.categoriesById:
category = Category(self, hCategory['label'], cId)
self._addCategory(category)
categories.append(self.categoriesById[cId])
try:
feed = self.getFeed(sub['id'])
if not feed:
raise
if not feed.title:
feed.title = sub['title']
for category in categories:
feed.addCategory(category)
feed.unread = unreadById.get(sub['id'], 0)
except:
feed = Feed(self,
sub['title'],
sub['id'],
sub.get('htmlUrl', None),
unreadById.get(sub['id'], 0),
categories)
if not categories:
self.orphanFeeds.append(feed)
self._addFeed(feed)
specialUnreads = [id for id in unreadById
if id.find('user/%s/state/com.google/' % self.userId) != -1]
for type in self.specialFeeds:
feed = self.specialFeeds[type]
feed.unread = 0
for id in specialUnreads:
if id.endswith('/%s' % type):
feed.unread = unreadById.get(id, 0)
break
return True |
def main():
'''Main routine.'''
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit("Error: Expecting azurermconfig.json in current folder")
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# list locations
locations = azurerm.list_locations(access_token, subscription_id)
for location in locations['value']:
print(location['name']
+ ', Display Name: ' + location['displayName']
+ ', Coords: ' + location['latitude']
+ ', ' + location['longitude']) | Main routine. | Below is the the instruction that describes the task:
### Input:
Main routine.
### Response:
def main():
'''Main routine.'''
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit("Error: Expecting azurermconfig.json in current folder")
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# list locations
locations = azurerm.list_locations(access_token, subscription_id)
for location in locations['value']:
print(location['name']
+ ', Display Name: ' + location['displayName']
+ ', Coords: ' + location['latitude']
+ ', ' + location['longitude']) |
def scalar(value):
"""
Take return a value[0] if `value` is a list of length 1
"""
if isinstance(value, (list, tuple)) and len(value) == 1:
return value[0]
return value | Take return a value[0] if `value` is a list of length 1 | Below is the the instruction that describes the task:
### Input:
Take return a value[0] if `value` is a list of length 1
### Response:
def scalar(value):
"""
Take return a value[0] if `value` is a list of length 1
"""
if isinstance(value, (list, tuple)) and len(value) == 1:
return value[0]
return value |
def save(self, replace=True):
"""
Saves this item to SDB.
:param bool replace: If ``True``, delete any attributes on the remote
SDB item that have a ``None`` value on this object.
"""
self.domain.put_attributes(self.name, self, replace)
# Delete any attributes set to "None"
if replace:
del_attrs = []
for name in self:
if self[name] == None:
del_attrs.append(name)
if len(del_attrs) > 0:
self.domain.delete_attributes(self.name, del_attrs) | Saves this item to SDB.
:param bool replace: If ``True``, delete any attributes on the remote
SDB item that have a ``None`` value on this object. | Below is the the instruction that describes the task:
### Input:
Saves this item to SDB.
:param bool replace: If ``True``, delete any attributes on the remote
SDB item that have a ``None`` value on this object.
### Response:
def save(self, replace=True):
"""
Saves this item to SDB.
:param bool replace: If ``True``, delete any attributes on the remote
SDB item that have a ``None`` value on this object.
"""
self.domain.put_attributes(self.name, self, replace)
# Delete any attributes set to "None"
if replace:
del_attrs = []
for name in self:
if self[name] == None:
del_attrs.append(name)
if len(del_attrs) > 0:
self.domain.delete_attributes(self.name, del_attrs) |
def shouldContinue(self):
""" Decide whether to start generating features or return early.
Returns a boolean: True to proceed, False to skip.
Sublcasses may override this to skip generation based on the presence
or lack of other required pieces of font data.
"""
if not self.context.todo:
self.log.debug("No features to be generated; skipped")
return False
return True | Decide whether to start generating features or return early.
Returns a boolean: True to proceed, False to skip.
Sublcasses may override this to skip generation based on the presence
or lack of other required pieces of font data. | Below is the the instruction that describes the task:
### Input:
Decide whether to start generating features or return early.
Returns a boolean: True to proceed, False to skip.
Sublcasses may override this to skip generation based on the presence
or lack of other required pieces of font data.
### Response:
def shouldContinue(self):
""" Decide whether to start generating features or return early.
Returns a boolean: True to proceed, False to skip.
Sublcasses may override this to skip generation based on the presence
or lack of other required pieces of font data.
"""
if not self.context.todo:
self.log.debug("No features to be generated; skipped")
return False
return True |
def md5_checksum(file_path, chunk_bytes=4194304):
"""Return the MD5 checksum (hex digest) of the file"""
with open(file_path, "rb") as infile:
checksum = hashlib.md5()
while 1:
data = infile.read(chunk_bytes)
if not data:
break
checksum.update(data)
return checksum.hexdigest() | Return the MD5 checksum (hex digest) of the file | Below is the the instruction that describes the task:
### Input:
Return the MD5 checksum (hex digest) of the file
### Response:
def md5_checksum(file_path, chunk_bytes=4194304):
"""Return the MD5 checksum (hex digest) of the file"""
with open(file_path, "rb") as infile:
checksum = hashlib.md5()
while 1:
data = infile.read(chunk_bytes)
if not data:
break
checksum.update(data)
return checksum.hexdigest() |
def on_press(self, window, key, scancode, action, mods):
"""
Key handler for key presses.
"""
# controls for moving position
if key == glfw.KEY_W:
self.pos[0] -= self._pos_step # dec x
elif key == glfw.KEY_S:
self.pos[0] += self._pos_step # inc x
elif key == glfw.KEY_A:
self.pos[1] -= self._pos_step # dec y
elif key == glfw.KEY_D:
self.pos[1] += self._pos_step # inc y
elif key == glfw.KEY_F:
self.pos[2] -= self._pos_step # dec z
elif key == glfw.KEY_R:
self.pos[2] += self._pos_step # inc z
# controls for moving orientation
elif key == glfw.KEY_Z:
drot = rotation_matrix(angle=0.1, direction=[1., 0., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates x
elif key == glfw.KEY_X:
drot = rotation_matrix(angle=-0.1, direction=[1., 0., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates x
elif key == glfw.KEY_T:
drot = rotation_matrix(angle=0.1, direction=[0., 1., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates y
elif key == glfw.KEY_G:
drot = rotation_matrix(angle=-0.1, direction=[0., 1., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates y
elif key == glfw.KEY_C:
drot = rotation_matrix(angle=0.1, direction=[0., 0., 1.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates z
elif key == glfw.KEY_V:
drot = rotation_matrix(angle=-0.1, direction=[0., 0., 1.])[:3, :3]
self.rotation = self.rotation.dot(drot) | Key handler for key presses. | Below is the the instruction that describes the task:
### Input:
Key handler for key presses.
### Response:
def on_press(self, window, key, scancode, action, mods):
"""
Key handler for key presses.
"""
# controls for moving position
if key == glfw.KEY_W:
self.pos[0] -= self._pos_step # dec x
elif key == glfw.KEY_S:
self.pos[0] += self._pos_step # inc x
elif key == glfw.KEY_A:
self.pos[1] -= self._pos_step # dec y
elif key == glfw.KEY_D:
self.pos[1] += self._pos_step # inc y
elif key == glfw.KEY_F:
self.pos[2] -= self._pos_step # dec z
elif key == glfw.KEY_R:
self.pos[2] += self._pos_step # inc z
# controls for moving orientation
elif key == glfw.KEY_Z:
drot = rotation_matrix(angle=0.1, direction=[1., 0., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates x
elif key == glfw.KEY_X:
drot = rotation_matrix(angle=-0.1, direction=[1., 0., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates x
elif key == glfw.KEY_T:
drot = rotation_matrix(angle=0.1, direction=[0., 1., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates y
elif key == glfw.KEY_G:
drot = rotation_matrix(angle=-0.1, direction=[0., 1., 0.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates y
elif key == glfw.KEY_C:
drot = rotation_matrix(angle=0.1, direction=[0., 0., 1.])[:3, :3]
self.rotation = self.rotation.dot(drot) # rotates z
elif key == glfw.KEY_V:
drot = rotation_matrix(angle=-0.1, direction=[0., 0., 1.])[:3, :3]
self.rotation = self.rotation.dot(drot) |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Systemd journal file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
file_header_map = self._GetDataTypeMap('systemd_journal_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if file_header.signature != self._FILE_SIGNATURE:
raise errors.UnableToParseFile('Invalid file signature.')
if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES:
raise errors.UnableToParseFile(
'Unsupported file header size: {0:d}.'.format(
file_header.header_size))
data_hash_table_end_offset = (
file_header.data_hash_table_offset +
file_header.data_hash_table_size)
field_hash_table_end_offset = (
file_header.field_hash_table_offset +
file_header.field_hash_table_size)
self._maximum_journal_file_offset = max(
data_hash_table_end_offset, field_hash_table_end_offset)
entry_object_offsets = self._ParseEntryObjectOffsets(
file_object, file_header.entry_array_offset)
for entry_object_offset in entry_object_offsets:
if entry_object_offset == 0:
continue
try:
fields = self._ParseJournalEntry(file_object, entry_object_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse journal entry at offset: 0x{0:08x} with '
'error: {1!s}').format(entry_object_offset, exception))
return
event_data = SystemdJournalEventData()
event_data.body = fields.get('MESSAGE', None)
event_data.hostname = fields.get('_HOSTNAME', None)
event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None)
if event_data.reporter and event_data.reporter != 'kernel':
event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None))
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=fields['real_time'])
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a Systemd journal file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses a Systemd journal file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
### Response:
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Systemd journal file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
file_header_map = self._GetDataTypeMap('systemd_journal_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if file_header.signature != self._FILE_SIGNATURE:
raise errors.UnableToParseFile('Invalid file signature.')
if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES:
raise errors.UnableToParseFile(
'Unsupported file header size: {0:d}.'.format(
file_header.header_size))
data_hash_table_end_offset = (
file_header.data_hash_table_offset +
file_header.data_hash_table_size)
field_hash_table_end_offset = (
file_header.field_hash_table_offset +
file_header.field_hash_table_size)
self._maximum_journal_file_offset = max(
data_hash_table_end_offset, field_hash_table_end_offset)
entry_object_offsets = self._ParseEntryObjectOffsets(
file_object, file_header.entry_array_offset)
for entry_object_offset in entry_object_offsets:
if entry_object_offset == 0:
continue
try:
fields = self._ParseJournalEntry(file_object, entry_object_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse journal entry at offset: 0x{0:08x} with '
'error: {1!s}').format(entry_object_offset, exception))
return
event_data = SystemdJournalEventData()
event_data.body = fields.get('MESSAGE', None)
event_data.hostname = fields.get('_HOSTNAME', None)
event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None)
if event_data.reporter and event_data.reporter != 'kernel':
event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None))
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=fields['real_time'])
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def best_item_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False):
'''Returns just the best item, or ``None``'''
match = best_match_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess)
if match:
return match[0]
return None | Returns just the best item, or ``None`` | Below is the the instruction that describes the task:
### Input:
Returns just the best item, or ``None``
### Response:
def best_item_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False):
'''Returns just the best item, or ``None``'''
match = best_match_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess)
if match:
return match[0]
return None |
def debug(self):
'''Retrieve the debug information from the charmstore.'''
url = '{}/debug/status'.format(self.url)
data = self._get(url)
return data.json() | Retrieve the debug information from the charmstore. | Below is the the instruction that describes the task:
### Input:
Retrieve the debug information from the charmstore.
### Response:
def debug(self):
'''Retrieve the debug information from the charmstore.'''
url = '{}/debug/status'.format(self.url)
data = self._get(url)
return data.json() |
def query_int_attribute(self, target, display_mask, attr):
"""Return the value of an integer attribute"""
reply = NVCtrlQueryAttributeReplyRequest(display=self.display,
opcode=self.display.get_extension_major(extname),
target_id=target.id(),
target_type=target.type(),
display_mask=display_mask,
attr=attr)
if not reply._data.get('flags'):
return None
return int(reply._data.get('value')) | Return the value of an integer attribute | Below is the the instruction that describes the task:
### Input:
Return the value of an integer attribute
### Response:
def query_int_attribute(self, target, display_mask, attr):
"""Return the value of an integer attribute"""
reply = NVCtrlQueryAttributeReplyRequest(display=self.display,
opcode=self.display.get_extension_major(extname),
target_id=target.id(),
target_type=target.type(),
display_mask=display_mask,
attr=attr)
if not reply._data.get('flags'):
return None
return int(reply._data.get('value')) |
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding'] | Get file encoding of a text file | Below is the the instruction that describes the task:
### Input:
Get file encoding of a text file
### Response:
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding'] |
def normalize_layout(l):
"""Make sure all the spots in a layout are where you can click.
Returns a copy of the layout with all spot coordinates are
normalized to within (0.0, 0.98).
"""
xs = []
ys = []
ks = []
for (k, (x, y)) in l.items():
xs.append(x)
ys.append(y)
ks.append(k)
minx = np.min(xs)
maxx = np.max(xs)
try:
xco = 0.98 / (maxx - minx)
xnorm = np.multiply(np.subtract(xs, [minx] * len(xs)), xco)
except ZeroDivisionError:
xnorm = np.array([0.5] * len(xs))
miny = np.min(ys)
maxy = np.max(ys)
try:
yco = 0.98 / (maxy - miny)
ynorm = np.multiply(np.subtract(ys, [miny] * len(ys)), yco)
except ZeroDivisionError:
ynorm = np.array([0.5] * len(ys))
return dict(zip(ks, zip(map(float, xnorm), map(float, ynorm)))) | Make sure all the spots in a layout are where you can click.
Returns a copy of the layout with all spot coordinates are
normalized to within (0.0, 0.98). | Below is the the instruction that describes the task:
### Input:
Make sure all the spots in a layout are where you can click.
Returns a copy of the layout with all spot coordinates are
normalized to within (0.0, 0.98).
### Response:
def normalize_layout(l):
"""Make sure all the spots in a layout are where you can click.
Returns a copy of the layout with all spot coordinates are
normalized to within (0.0, 0.98).
"""
xs = []
ys = []
ks = []
for (k, (x, y)) in l.items():
xs.append(x)
ys.append(y)
ks.append(k)
minx = np.min(xs)
maxx = np.max(xs)
try:
xco = 0.98 / (maxx - minx)
xnorm = np.multiply(np.subtract(xs, [minx] * len(xs)), xco)
except ZeroDivisionError:
xnorm = np.array([0.5] * len(xs))
miny = np.min(ys)
maxy = np.max(ys)
try:
yco = 0.98 / (maxy - miny)
ynorm = np.multiply(np.subtract(ys, [miny] * len(ys)), yco)
except ZeroDivisionError:
ynorm = np.array([0.5] * len(ys))
return dict(zip(ks, zip(map(float, xnorm), map(float, ynorm)))) |
def create_assignment( # pylint: disable=too-many-arguments
self,
name,
short_name,
weight,
max_points,
due_date_str,
gradebook_id='',
**kwargs
):
"""Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
}
"""
data = {
'name': name,
'shortName': short_name,
'weight': weight,
'graderVisible': False,
'gradingSchemeType': 'NUMERIC',
'gradebookId': gradebook_id or self.gradebook_id,
'maxPointsTotal': max_points,
'dueDateString': due_date_str
}
data.update(kwargs)
log.info("Creating assignment %s", name)
response = self.post('assignment', data)
log.debug('Received response data: %s', response)
return response | Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
} | Below is the the instruction that describes the task:
### Input:
Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
}
### Response:
def create_assignment( # pylint: disable=too-many-arguments
self,
name,
short_name,
weight,
max_points,
due_date_str,
gradebook_id='',
**kwargs
):
"""Create a new assignment.
Create a new assignment. By default, assignments are created
under the `Uncategorized` category.
Args:
name (str): descriptive assignment name,
i.e. ``new NUMERIC SIMPLE ASSIGNMENT``
short_name (str): short name of assignment, one word of
no more than 5 characters, i.e. ``SAnew``
weight (str): floating point value for weight, i.e. ``1.0``
max_points (str): floating point value for maximum point
total, i.e. ``100.0``
due_date_str (str): due date as string in ``mm-dd-yyyy``
format, i.e. ``08-21-2011``
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary containing additional parameters,
i.e. ``graderVisible``, ``totalAverage``, and ``categoryId``.
For example:
.. code-block:: python
{
u'graderVisible': True,
u'totalAverage': None
u'categoryId': 1007964,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing ``data``, ``status`` and ``message``
for example:
.. code-block:: python
{
u'data':
{
u'assignmentId': 18490492,
u'categoryId': 1293820,
u'description': u'',
u'dueDate': 1312171200000,
u'dueDateString': u'08-01-2011',
u'gradebookId': 1293808,
u'graderVisible': False,
u'gradingSchemeId': 18490493,
u'gradingSchemeType': u'NUMERIC',
u'isComposite': False,
u'isHomework': False,
u'maxPointsTotal': 100.0,
u'name': u'new NUMERIC SIMPLE ASSIGNMENT',
u'numStudentGradesToBeApproved': 0,
u'numStudentsToBeGraded': 614,
u'shortName': u'SAnew',
u'userDeleted': False,
u'weight': 1.0
},
u'message': u'assignment is created successfully',
u'status': 1
}
"""
data = {
'name': name,
'shortName': short_name,
'weight': weight,
'graderVisible': False,
'gradingSchemeType': 'NUMERIC',
'gradebookId': gradebook_id or self.gradebook_id,
'maxPointsTotal': max_points,
'dueDateString': due_date_str
}
data.update(kwargs)
log.info("Creating assignment %s", name)
response = self.post('assignment', data)
log.debug('Received response data: %s', response)
return response |
def respond(request, code):
"""
Responds to the request with the given response code.
If ``next`` is in the form, it will redirect instead.
"""
redirect = request.GET.get('next', request.POST.get('next'))
if redirect:
return HttpResponseRedirect(redirect)
return type('Response%d' % code, (HttpResponse, ), {'status_code': code})() | Responds to the request with the given response code.
If ``next`` is in the form, it will redirect instead. | Below is the the instruction that describes the task:
### Input:
Responds to the request with the given response code.
If ``next`` is in the form, it will redirect instead.
### Response:
def respond(request, code):
"""
Responds to the request with the given response code.
If ``next`` is in the form, it will redirect instead.
"""
redirect = request.GET.get('next', request.POST.get('next'))
if redirect:
return HttpResponseRedirect(redirect)
return type('Response%d' % code, (HttpResponse, ), {'status_code': code})() |
def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
'''
Delete target group.
name
(string) - The Amazon Resource Name (ARN) of the resource.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
check-target:
boto_elb2.delete_targets_group:
- name: myALB
- protocol: https
- port: 443
- vpc_id: myVPC
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if not __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile):
ret['result'] = True
ret['comment'] = 'Target Group {0} does not exists'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Target Group {0} will be deleted'.format(name)
return ret
state = __salt__['boto_elbv2.delete_target_group'](name,
region=region,
key=key,
keyid=keyid,
profile=profile)
if state:
ret['result'] = True
ret['changes']['target_group'] = name
ret['comment'] = 'Target Group {0} deleted'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Target Group {0} deletion failed'.format(name)
return ret | Delete target group.
name
(string) - The Amazon Resource Name (ARN) of the resource.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
check-target:
boto_elb2.delete_targets_group:
- name: myALB
- protocol: https
- port: 443
- vpc_id: myVPC | Below is the the instruction that describes the task:
### Input:
Delete target group.
name
(string) - The Amazon Resource Name (ARN) of the resource.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
check-target:
boto_elb2.delete_targets_group:
- name: myALB
- protocol: https
- port: 443
- vpc_id: myVPC
### Response:
def delete_target_group(name, region=None, key=None, keyid=None, profile=None):
'''
Delete target group.
name
(string) - The Amazon Resource Name (ARN) of the resource.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
check-target:
boto_elb2.delete_targets_group:
- name: myALB
- protocol: https
- port: 443
- vpc_id: myVPC
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if not __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile):
ret['result'] = True
ret['comment'] = 'Target Group {0} does not exists'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Target Group {0} will be deleted'.format(name)
return ret
state = __salt__['boto_elbv2.delete_target_group'](name,
region=region,
key=key,
keyid=keyid,
profile=profile)
if state:
ret['result'] = True
ret['changes']['target_group'] = name
ret['comment'] = 'Target Group {0} deleted'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Target Group {0} deletion failed'.format(name)
return ret |
def time_slides_vacuum(time_slides, verbose = False):
"""
Given a dictionary mapping time slide IDs to instrument-->offset
mappings, for example as returned by the as_dict() method of the
TimeSlideTable class in pycbc_glue.ligolw.lsctables or by the
load_time_slides() function in this module, construct and return a
mapping indicating time slide equivalences. This can be used to
delete redundant time slides from a time slide table, and then also
used via the applyKeyMapping() method of pycbc_glue.ligolw.table.Table
instances to update cross references (for example in the
coinc_event table).
Example:
>>> slides = {"time_slide_id:0": {"H1": 0, "H2": 0},
"time_slide_id:1": {"H1": 10, "H2": 10}, "time_slide_id:2": {"H1":
0, "H2": 10}}
>>> time_slides_vacuum(slides)
{'time_slide_id:1': 'time_slide_id:0'}
indicating that time_slide_id:1 describes a time slide that is
equivalent to time_slide_id:0. The calling code could use this
information to delete time_slide_id:1 from the time_slide table,
and replace references to that ID in other tables with references
to time_slide_id:0.
"""
# convert offsets to deltas
time_slides = dict((time_slide_id, offsetvect.deltas) for time_slide_id, offsetvect in time_slides.items())
if verbose:
progressbar = ProgressBar(max = len(time_slides))
else:
progressbar = None
# old --> new mapping
mapping = {}
# while there are time slide offset dictionaries remaining
while time_slides:
# pick an ID/offset dictionary pair at random
id1, deltas1 = time_slides.popitem()
# for every other ID/offset dictionary pair in the time
# slides
ids_to_delete = []
for id2, deltas2 in time_slides.items():
# if the relative offset dictionaries are
# equivalent record in the old --> new mapping
if deltas2 == deltas1:
mapping[id2] = id1
ids_to_delete.append(id2)
for id2 in ids_to_delete:
time_slides.pop(id2)
if progressbar is not None:
progressbar.update(progressbar.max - len(time_slides))
# done
del progressbar
return mapping | Given a dictionary mapping time slide IDs to instrument-->offset
mappings, for example as returned by the as_dict() method of the
TimeSlideTable class in pycbc_glue.ligolw.lsctables or by the
load_time_slides() function in this module, construct and return a
mapping indicating time slide equivalences. This can be used to
delete redundant time slides from a time slide table, and then also
used via the applyKeyMapping() method of pycbc_glue.ligolw.table.Table
instances to update cross references (for example in the
coinc_event table).
Example:
>>> slides = {"time_slide_id:0": {"H1": 0, "H2": 0},
"time_slide_id:1": {"H1": 10, "H2": 10}, "time_slide_id:2": {"H1":
0, "H2": 10}}
>>> time_slides_vacuum(slides)
{'time_slide_id:1': 'time_slide_id:0'}
indicating that time_slide_id:1 describes a time slide that is
equivalent to time_slide_id:0. The calling code could use this
information to delete time_slide_id:1 from the time_slide table,
and replace references to that ID in other tables with references
to time_slide_id:0. | Below is the the instruction that describes the task:
### Input:
Given a dictionary mapping time slide IDs to instrument-->offset
mappings, for example as returned by the as_dict() method of the
TimeSlideTable class in pycbc_glue.ligolw.lsctables or by the
load_time_slides() function in this module, construct and return a
mapping indicating time slide equivalences. This can be used to
delete redundant time slides from a time slide table, and then also
used via the applyKeyMapping() method of pycbc_glue.ligolw.table.Table
instances to update cross references (for example in the
coinc_event table).
Example:
>>> slides = {"time_slide_id:0": {"H1": 0, "H2": 0},
"time_slide_id:1": {"H1": 10, "H2": 10}, "time_slide_id:2": {"H1":
0, "H2": 10}}
>>> time_slides_vacuum(slides)
{'time_slide_id:1': 'time_slide_id:0'}
indicating that time_slide_id:1 describes a time slide that is
equivalent to time_slide_id:0. The calling code could use this
information to delete time_slide_id:1 from the time_slide table,
and replace references to that ID in other tables with references
to time_slide_id:0.
### Response:
def time_slides_vacuum(time_slides, verbose = False):
"""
Given a dictionary mapping time slide IDs to instrument-->offset
mappings, for example as returned by the as_dict() method of the
TimeSlideTable class in pycbc_glue.ligolw.lsctables or by the
load_time_slides() function in this module, construct and return a
mapping indicating time slide equivalences. This can be used to
delete redundant time slides from a time slide table, and then also
used via the applyKeyMapping() method of pycbc_glue.ligolw.table.Table
instances to update cross references (for example in the
coinc_event table).
Example:
>>> slides = {"time_slide_id:0": {"H1": 0, "H2": 0},
"time_slide_id:1": {"H1": 10, "H2": 10}, "time_slide_id:2": {"H1":
0, "H2": 10}}
>>> time_slides_vacuum(slides)
{'time_slide_id:1': 'time_slide_id:0'}
indicating that time_slide_id:1 describes a time slide that is
equivalent to time_slide_id:0. The calling code could use this
information to delete time_slide_id:1 from the time_slide table,
and replace references to that ID in other tables with references
to time_slide_id:0.
"""
# convert offsets to deltas
time_slides = dict((time_slide_id, offsetvect.deltas) for time_slide_id, offsetvect in time_slides.items())
if verbose:
progressbar = ProgressBar(max = len(time_slides))
else:
progressbar = None
# old --> new mapping
mapping = {}
# while there are time slide offset dictionaries remaining
while time_slides:
# pick an ID/offset dictionary pair at random
id1, deltas1 = time_slides.popitem()
# for every other ID/offset dictionary pair in the time
# slides
ids_to_delete = []
for id2, deltas2 in time_slides.items():
# if the relative offset dictionaries are
# equivalent record in the old --> new mapping
if deltas2 == deltas1:
mapping[id2] = id1
ids_to_delete.append(id2)
for id2 in ids_to_delete:
time_slides.pop(id2)
if progressbar is not None:
progressbar.update(progressbar.max - len(time_slides))
# done
del progressbar
return mapping |
def storm_relative_helicity(u, v, heights, depth, bottom=0 * units.m,
storm_u=0 * units('m/s'), storm_v=0 * units('m/s')):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (storm_relative_u[1:] * storm_relative_v[:-1]
- storm_relative_u[:-1] * storm_relative_v[1:])
# Need to manually check for masked value because sum() on masked array with non-default
# mask will return a masked value rather than 0. See numpy/numpy#11736
positive_srh = int_layers[int_layers.magnitude > 0.].sum()
if np.ma.is_masked(positive_srh):
positive_srh = 0.0 * units('meter**2 / second**2')
negative_srh = int_layers[int_layers.magnitude < 0.].sum()
if np.ma.is_masked(negative_srh):
negative_srh = 0.0 * units('meter**2 / second**2')
return (positive_srh.to('meter ** 2 / second ** 2'),
negative_srh.to('meter ** 2 / second ** 2'),
(positive_srh + negative_srh).to('meter ** 2 / second ** 2')) | r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity | Below is the the instruction that describes the task:
### Input:
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
### Response:
def storm_relative_helicity(u, v, heights, depth, bottom=0 * units.m,
storm_u=0 * units('m/s'), storm_v=0 * units('m/s')):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (storm_relative_u[1:] * storm_relative_v[:-1]
- storm_relative_u[:-1] * storm_relative_v[1:])
# Need to manually check for masked value because sum() on masked array with non-default
# mask will return a masked value rather than 0. See numpy/numpy#11736
positive_srh = int_layers[int_layers.magnitude > 0.].sum()
if np.ma.is_masked(positive_srh):
positive_srh = 0.0 * units('meter**2 / second**2')
negative_srh = int_layers[int_layers.magnitude < 0.].sum()
if np.ma.is_masked(negative_srh):
negative_srh = 0.0 * units('meter**2 / second**2')
return (positive_srh.to('meter ** 2 / second ** 2'),
negative_srh.to('meter ** 2 / second ** 2'),
(positive_srh + negative_srh).to('meter ** 2 / second ** 2')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.