code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate | Print out some tweets | Below is the the instruction that describes the task:
### Input:
Print out some tweets
### Response:
def on_status(self, status):
"""Print out some tweets"""
self.out.write(json.dumps(status))
self.out.write(os.linesep)
self.received += 1
return not self.terminate |
def delete_store_credit_by_id(cls, store_credit_id, **kwargs):
"""Delete StoreCredit
Delete an instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
else:
(data) = cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
return data | Delete StoreCredit
Delete an instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete StoreCredit
Delete an instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_store_credit_by_id(cls, store_credit_id, **kwargs):
"""Delete StoreCredit
Delete an instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
else:
(data) = cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
return data |
def _generate_index(root, folder, paths,
bots_index=False, bots_index_paths=()):
"""Generates the index file for the specified folder"""
# Determine the namespaces listed here (as sub folders)
# and the files (.html files) that we should link to
namespaces = []
files = []
INDEX = 'index.html'
BOT_INDEX = 'botindex.html'
for item in (bots_index_paths or folder.iterdir()):
if item.is_dir():
namespaces.append(item)
elif item.name not in (INDEX, BOT_INDEX):
files.append(item)
# Now that everything is setup, write the index.html file
filename = folder / (BOT_INDEX if bots_index else INDEX)
with DocsWriter(root, filename, _get_path_for_type) as docs:
# Title should be the current folder name
docs.write_head(str(folder).replace(os.path.sep, '/').title(),
css_path=paths['css'],
default_css=paths['default_css'])
docs.set_menu_separator(paths['arrow'])
_build_menu(docs)
docs.write_title(str(filename.parent.relative_to(root))
.replace(os.path.sep, '/').title())
if bots_index:
docs.write_text('These are the methods that you may be able to '
'use as a bot. Click <a href="{}">here</a> to '
'view them all.'.format(INDEX))
else:
docs.write_text('Click <a href="{}">here</a> to view the methods '
'that you can use as a bot.'.format(BOT_INDEX))
if namespaces:
docs.write_title('Namespaces', level=3)
docs.begin_table(4)
namespaces.sort()
for namespace in namespaces:
# For every namespace, also write the index of it
namespace_paths = []
if bots_index:
for item in bots_index_paths:
if item.parent == namespace:
namespace_paths.append(item)
_generate_index(root, namespace, paths,
bots_index, namespace_paths)
docs.add_row(
namespace.stem.title(),
link=namespace / (BOT_INDEX if bots_index else INDEX))
docs.end_table()
docs.write_title('Available items')
docs.begin_table(2)
files = [(f, _find_title(f)) for f in files]
files.sort(key=lambda t: t[1])
for file, title in files:
docs.add_row(title, link=file)
docs.end_table()
docs.end_body() | Generates the index file for the specified folder | Below is the the instruction that describes the task:
### Input:
Generates the index file for the specified folder
### Response:
def _generate_index(root, folder, paths,
bots_index=False, bots_index_paths=()):
"""Generates the index file for the specified folder"""
# Determine the namespaces listed here (as sub folders)
# and the files (.html files) that we should link to
namespaces = []
files = []
INDEX = 'index.html'
BOT_INDEX = 'botindex.html'
for item in (bots_index_paths or folder.iterdir()):
if item.is_dir():
namespaces.append(item)
elif item.name not in (INDEX, BOT_INDEX):
files.append(item)
# Now that everything is setup, write the index.html file
filename = folder / (BOT_INDEX if bots_index else INDEX)
with DocsWriter(root, filename, _get_path_for_type) as docs:
# Title should be the current folder name
docs.write_head(str(folder).replace(os.path.sep, '/').title(),
css_path=paths['css'],
default_css=paths['default_css'])
docs.set_menu_separator(paths['arrow'])
_build_menu(docs)
docs.write_title(str(filename.parent.relative_to(root))
.replace(os.path.sep, '/').title())
if bots_index:
docs.write_text('These are the methods that you may be able to '
'use as a bot. Click <a href="{}">here</a> to '
'view them all.'.format(INDEX))
else:
docs.write_text('Click <a href="{}">here</a> to view the methods '
'that you can use as a bot.'.format(BOT_INDEX))
if namespaces:
docs.write_title('Namespaces', level=3)
docs.begin_table(4)
namespaces.sort()
for namespace in namespaces:
# For every namespace, also write the index of it
namespace_paths = []
if bots_index:
for item in bots_index_paths:
if item.parent == namespace:
namespace_paths.append(item)
_generate_index(root, namespace, paths,
bots_index, namespace_paths)
docs.add_row(
namespace.stem.title(),
link=namespace / (BOT_INDEX if bots_index else INDEX))
docs.end_table()
docs.write_title('Available items')
docs.begin_table(2)
files = [(f, _find_title(f)) for f in files]
files.sort(key=lambda t: t[1])
for file, title in files:
docs.add_row(title, link=file)
docs.end_table()
docs.end_body() |
def meminfo():
"""Get the amount of memory and swap, Mebibytes"""
f = open("/proc/meminfo")
hwinfo = {}
for line in f.readlines():
meml = line.split()
if (meml[0] == "MemTotal:"):
mem = int(meml[1])
hwinfo["Mem_MiB"] = mem/1024
elif (meml[0] == "SwapTotal:"):
swap = int(meml[1])
hwinfo["Swap_MiB"] = swap/1024
f.close()
return hwinfo | Get the amount of memory and swap, Mebibytes | Below is the the instruction that describes the task:
### Input:
Get the amount of memory and swap, Mebibytes
### Response:
def meminfo():
"""Get the amount of memory and swap, Mebibytes"""
f = open("/proc/meminfo")
hwinfo = {}
for line in f.readlines():
meml = line.split()
if (meml[0] == "MemTotal:"):
mem = int(meml[1])
hwinfo["Mem_MiB"] = mem/1024
elif (meml[0] == "SwapTotal:"):
swap = int(meml[1])
hwinfo["Swap_MiB"] = swap/1024
f.close()
return hwinfo |
def before_render(self):
"""Before template render hook
"""
# Render the Add button if the user has the AddClient permission
if check_permission(AddMethod, self.context):
self.context_actions[_("Add")] = {
"url": "createObject?type_name=Method",
"icon": "++resource++bika.lims.images/add.png"
}
# Don't allow any context actions on the Methods folder
self.request.set("disable_border", 1) | Before template render hook | Below is the the instruction that describes the task:
### Input:
Before template render hook
### Response:
def before_render(self):
"""Before template render hook
"""
# Render the Add button if the user has the AddClient permission
if check_permission(AddMethod, self.context):
self.context_actions[_("Add")] = {
"url": "createObject?type_name=Method",
"icon": "++resource++bika.lims.images/add.png"
}
# Don't allow any context actions on the Methods folder
self.request.set("disable_border", 1) |
def binpath(*paths):
'''Like os.path.join but acts relative to this packages bin path.'''
package_root = os.path.dirname(__file__)
return os.path.normpath(os.path.join(package_root, 'bin', *paths)) | Like os.path.join but acts relative to this packages bin path. | Below is the the instruction that describes the task:
### Input:
Like os.path.join but acts relative to this packages bin path.
### Response:
def binpath(*paths):
'''Like os.path.join but acts relative to this packages bin path.'''
package_root = os.path.dirname(__file__)
return os.path.normpath(os.path.join(package_root, 'bin', *paths)) |
def up_down_capture(returns, factor_returns, **kwargs):
"""
Computes the ratio of up_capture to down_capture.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
up_down_capture : float
the updown capture ratio
"""
return (up_capture(returns, factor_returns, **kwargs) /
down_capture(returns, factor_returns, **kwargs)) | Computes the ratio of up_capture to down_capture.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
up_down_capture : float
the updown capture ratio | Below is the the instruction that describes the task:
### Input:
Computes the ratio of up_capture to down_capture.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
up_down_capture : float
the updown capture ratio
### Response:
def up_down_capture(returns, factor_returns, **kwargs):
"""
Computes the ratio of up_capture to down_capture.
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
up_down_capture : float
the updown capture ratio
"""
return (up_capture(returns, factor_returns, **kwargs) /
down_capture(returns, factor_returns, **kwargs)) |
def hasoriginal(self,allowempty=False):
"""Does the correction record the old annotations prior to correction?"""
for e in self.select(Original,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | Does the correction record the old annotations prior to correction? | Below is the the instruction that describes the task:
### Input:
Does the correction record the old annotations prior to correction?
### Response:
def hasoriginal(self,allowempty=False):
"""Does the correction record the old annotations prior to correction?"""
for e in self.select(Original,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False |
def handle_molecular_activity_default(_: str, __: int, tokens: ParseResults) -> ParseResults:
"""Handle a BEL 2.0 style molecular activity with BEL default names."""
upgraded = language.activity_labels[tokens[0]]
tokens[NAMESPACE] = BEL_DEFAULT_NAMESPACE
tokens[NAME] = upgraded
return tokens | Handle a BEL 2.0 style molecular activity with BEL default names. | Below is the the instruction that describes the task:
### Input:
Handle a BEL 2.0 style molecular activity with BEL default names.
### Response:
def handle_molecular_activity_default(_: str, __: int, tokens: ParseResults) -> ParseResults:
"""Handle a BEL 2.0 style molecular activity with BEL default names."""
upgraded = language.activity_labels[tokens[0]]
tokens[NAMESPACE] = BEL_DEFAULT_NAMESPACE
tokens[NAME] = upgraded
return tokens |
def get_data_dict_from_config(self, config_dict):
"""Return a dictionary of data inferred from config_dict."""
return {
key: self.parsedpage.get_filtered_values_by_selector(
item_dict['selector'],
item_dict.get('regex_filter', None),
item_dict.get('regex_group', 1)
)
for key, item_dict in config_dict.iteritems()
if item_dict.get('selector', None) is not None
} | Return a dictionary of data inferred from config_dict. | Below is the the instruction that describes the task:
### Input:
Return a dictionary of data inferred from config_dict.
### Response:
def get_data_dict_from_config(self, config_dict):
"""Return a dictionary of data inferred from config_dict."""
return {
key: self.parsedpage.get_filtered_values_by_selector(
item_dict['selector'],
item_dict.get('regex_filter', None),
item_dict.get('regex_group', 1)
)
for key, item_dict in config_dict.iteritems()
if item_dict.get('selector', None) is not None
} |
def _get_matplot_dict(self, option, prop, defdict):
"""Returns a copy of the settings dictionary for the specified option in
curargs with update values where the value is replaced by the key from
the relevant default dictionary.
:arg option: the key in self.curargs to update.
:arg defdict: the default dictionary whose keys should be used when values match.
"""
cargs = self.curargs[option]
result = cargs.copy()
for varname in cargs:
if prop in cargs[varname]:
name = cargs[varname][prop]
for key, val in list(defdict.items()):
if val == name:
cargs[varname][prop] = key
break
return result | Returns a copy of the settings dictionary for the specified option in
curargs with update values where the value is replaced by the key from
the relevant default dictionary.
:arg option: the key in self.curargs to update.
:arg defdict: the default dictionary whose keys should be used when values match. | Below is the the instruction that describes the task:
### Input:
Returns a copy of the settings dictionary for the specified option in
curargs with update values where the value is replaced by the key from
the relevant default dictionary.
:arg option: the key in self.curargs to update.
:arg defdict: the default dictionary whose keys should be used when values match.
### Response:
def _get_matplot_dict(self, option, prop, defdict):
"""Returns a copy of the settings dictionary for the specified option in
curargs with update values where the value is replaced by the key from
the relevant default dictionary.
:arg option: the key in self.curargs to update.
:arg defdict: the default dictionary whose keys should be used when values match.
"""
cargs = self.curargs[option]
result = cargs.copy()
for varname in cargs:
if prop in cargs[varname]:
name = cargs[varname][prop]
for key, val in list(defdict.items()):
if val == name:
cargs[varname][prop] = key
break
return result |
def tokenize(self, text, context=0, skip_style_tags=False):
"""Build a list of tokens from a string of wikicode and return it."""
split = self.regex.split(text)
self._text = [segment for segment in split if segment]
self._head = self._global = self._depth = 0
self._bad_routes = set()
self._skip_style_tags = skip_style_tags
try:
tokens = self._parse(context)
except BadRoute: # pragma: no cover (untestable/exceptional case)
raise ParserError("Python tokenizer exited with BadRoute")
if self._stacks: # pragma: no cover (untestable/exceptional case)
err = "Python tokenizer exited with non-empty token stack"
raise ParserError(err)
return tokens | Build a list of tokens from a string of wikicode and return it. | Below is the the instruction that describes the task:
### Input:
Build a list of tokens from a string of wikicode and return it.
### Response:
def tokenize(self, text, context=0, skip_style_tags=False):
"""Build a list of tokens from a string of wikicode and return it."""
split = self.regex.split(text)
self._text = [segment for segment in split if segment]
self._head = self._global = self._depth = 0
self._bad_routes = set()
self._skip_style_tags = skip_style_tags
try:
tokens = self._parse(context)
except BadRoute: # pragma: no cover (untestable/exceptional case)
raise ParserError("Python tokenizer exited with BadRoute")
if self._stacks: # pragma: no cover (untestable/exceptional case)
err = "Python tokenizer exited with non-empty token stack"
raise ParserError(err)
return tokens |
def get_player(self, guild_id: int) -> Player:
"""
Gets a Player object from a guild ID.
Parameters
----------
guild_id : int
Discord guild ID.
Returns
-------
Player
Raises
------
KeyError
If that guild does not have a Player, e.g. is not connected to any
voice channel.
"""
if guild_id in self._player_dict:
return self._player_dict[guild_id]
raise KeyError("No such player for that guild.") | Gets a Player object from a guild ID.
Parameters
----------
guild_id : int
Discord guild ID.
Returns
-------
Player
Raises
------
KeyError
If that guild does not have a Player, e.g. is not connected to any
voice channel. | Below is the the instruction that describes the task:
### Input:
Gets a Player object from a guild ID.
Parameters
----------
guild_id : int
Discord guild ID.
Returns
-------
Player
Raises
------
KeyError
If that guild does not have a Player, e.g. is not connected to any
voice channel.
### Response:
def get_player(self, guild_id: int) -> Player:
"""
Gets a Player object from a guild ID.
Parameters
----------
guild_id : int
Discord guild ID.
Returns
-------
Player
Raises
------
KeyError
If that guild does not have a Player, e.g. is not connected to any
voice channel.
"""
if guild_id in self._player_dict:
return self._player_dict[guild_id]
raise KeyError("No such player for that guild.") |
def assert_between(lower_bound, upper_bound, expr, msg_fmt="{msg}"):
"""Fail if an expression is not between certain bounds (inclusive).
>>> assert_between(5, 15, 5)
>>> assert_between(5, 15, 15)
>>> assert_between(5, 15, 4.9)
Traceback (most recent call last):
...
AssertionError: 4.9 is not between 5 and 15
The following msg_fmt arguments are supported:
* msg - the default error message
* lower - lower bound
* upper - upper bound
* expr - tested expression
"""
if not lower_bound <= expr <= upper_bound:
msg = "{!r} is not between {} and {}".format(
expr, lower_bound, upper_bound
)
fail(
msg_fmt.format(
msg=msg, lower=lower_bound, upper=upper_bound, expr=expr
)
) | Fail if an expression is not between certain bounds (inclusive).
>>> assert_between(5, 15, 5)
>>> assert_between(5, 15, 15)
>>> assert_between(5, 15, 4.9)
Traceback (most recent call last):
...
AssertionError: 4.9 is not between 5 and 15
The following msg_fmt arguments are supported:
* msg - the default error message
* lower - lower bound
* upper - upper bound
* expr - tested expression | Below is the the instruction that describes the task:
### Input:
Fail if an expression is not between certain bounds (inclusive).
>>> assert_between(5, 15, 5)
>>> assert_between(5, 15, 15)
>>> assert_between(5, 15, 4.9)
Traceback (most recent call last):
...
AssertionError: 4.9 is not between 5 and 15
The following msg_fmt arguments are supported:
* msg - the default error message
* lower - lower bound
* upper - upper bound
* expr - tested expression
### Response:
def assert_between(lower_bound, upper_bound, expr, msg_fmt="{msg}"):
"""Fail if an expression is not between certain bounds (inclusive).
>>> assert_between(5, 15, 5)
>>> assert_between(5, 15, 15)
>>> assert_between(5, 15, 4.9)
Traceback (most recent call last):
...
AssertionError: 4.9 is not between 5 and 15
The following msg_fmt arguments are supported:
* msg - the default error message
* lower - lower bound
* upper - upper bound
* expr - tested expression
"""
if not lower_bound <= expr <= upper_bound:
msg = "{!r} is not between {} and {}".format(
expr, lower_bound, upper_bound
)
fail(
msg_fmt.format(
msg=msg, lower=lower_bound, upper=upper_bound, expr=expr
)
) |
def _UpdateYear(self, mediator, month):
"""Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
month (int): month observed by the parser, where January is 1.
"""
if not self._year_use:
self._year_use = mediator.GetEstimatedYear()
if not self._maximum_year:
self._maximum_year = mediator.GetLatestYear()
if not self._last_month:
self._last_month = month
return
# Some syslog daemons allow out-of-order sequences, so allow some leeway
# to not cause Apr->May->Apr to cause the year to increment.
# See http://bugzilla.adiscon.com/show_bug.cgi?id=527
if self._last_month > (month + 1):
if self._year_use != self._maximum_year:
self._year_use += 1
self._last_month = month | Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
month (int): month observed by the parser, where January is 1. | Below is the the instruction that describes the task:
### Input:
Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
month (int): month observed by the parser, where January is 1.
### Response:
def _UpdateYear(self, mediator, month):
"""Updates the year to use for events, based on last observed month.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
month (int): month observed by the parser, where January is 1.
"""
if not self._year_use:
self._year_use = mediator.GetEstimatedYear()
if not self._maximum_year:
self._maximum_year = mediator.GetLatestYear()
if not self._last_month:
self._last_month = month
return
# Some syslog daemons allow out-of-order sequences, so allow some leeway
# to not cause Apr->May->Apr to cause the year to increment.
# See http://bugzilla.adiscon.com/show_bug.cgi?id=527
if self._last_month > (month + 1):
if self._year_use != self._maximum_year:
self._year_use += 1
self._last_month = month |
def create_collection(cls, href, collection=None, props=None):
"""Create a collection.
If the collection already exists and neither ``collection`` nor
``props`` are set, this method shouldn't do anything. Otherwise the
existing collection must be replaced.
``collection`` is a list of vobject components.
``props`` are metadata values for the collection.
``props["tag"]`` is the type of collection (VCALENDAR or
VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the
collection.
"""
# Path should already be sanitized
attributes = _get_attributes_from_path(href)
if len(attributes) <= 1:
raise PrincipalNotAllowedError
# Try to infer tag
if not props:
props = {}
if not props.get("tag") and collection:
props["tag"] = collection[0].name
# Try first getting the collection if exists, or create a new one otherwise.
try:
self = cls(href, principal=False, tag=props.get("tag"))
except api.exceptions.DoesNotExist:
user_path = posixpath.join('/', cls.user)
collection_name = hashlib.sha256(str(time.time()).encode()).hexdigest()
sane_path = posixpath.join(user_path, collection_name)
if props.get("tag") == "VCALENDAR":
inst = api.Calendar.create(cls.etesync, collection_name, None)
elif props.get("tag") == "VADDRESSBOOK":
inst = api.AddressBook.create(cls.etesync, collection_name, None)
else:
raise RuntimeError("Bad tag.")
inst.save()
self = cls(sane_path, principal=False)
self.set_meta(props)
if collection:
if props.get("tag") == "VCALENDAR":
collection, = collection
items = []
for content in ("vevent", "vtodo", "vjournal"):
items.extend(
getattr(collection, "%s_list" % content, []))
items_by_uid = groupby(sorted(items, key=get_uid), get_uid)
vobject_items = {}
for uid, items in items_by_uid:
new_collection = vobject.iCalendar()
for item in items:
new_collection.add(item)
href = self._find_available_file_name(
vobject_items.get)
vobject_items[href] = new_collection
self.upload_all_nonatomic(vobject_items)
elif props.get("tag") == "VADDRESSBOOK":
vobject_items = {}
for card in collection:
href = self._find_available_file_name(
vobject_items.get)
vobject_items[href] = card
self.upload_all_nonatomic(vobject_items)
return self | Create a collection.
If the collection already exists and neither ``collection`` nor
``props`` are set, this method shouldn't do anything. Otherwise the
existing collection must be replaced.
``collection`` is a list of vobject components.
``props`` are metadata values for the collection.
``props["tag"]`` is the type of collection (VCALENDAR or
VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the
collection. | Below is the the instruction that describes the task:
### Input:
Create a collection.
If the collection already exists and neither ``collection`` nor
``props`` are set, this method shouldn't do anything. Otherwise the
existing collection must be replaced.
``collection`` is a list of vobject components.
``props`` are metadata values for the collection.
``props["tag"]`` is the type of collection (VCALENDAR or
VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the
collection.
### Response:
def create_collection(cls, href, collection=None, props=None):
"""Create a collection.
If the collection already exists and neither ``collection`` nor
``props`` are set, this method shouldn't do anything. Otherwise the
existing collection must be replaced.
``collection`` is a list of vobject components.
``props`` are metadata values for the collection.
``props["tag"]`` is the type of collection (VCALENDAR or
VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the
collection.
"""
# Path should already be sanitized
attributes = _get_attributes_from_path(href)
if len(attributes) <= 1:
raise PrincipalNotAllowedError
# Try to infer tag
if not props:
props = {}
if not props.get("tag") and collection:
props["tag"] = collection[0].name
# Try first getting the collection if exists, or create a new one otherwise.
try:
self = cls(href, principal=False, tag=props.get("tag"))
except api.exceptions.DoesNotExist:
user_path = posixpath.join('/', cls.user)
collection_name = hashlib.sha256(str(time.time()).encode()).hexdigest()
sane_path = posixpath.join(user_path, collection_name)
if props.get("tag") == "VCALENDAR":
inst = api.Calendar.create(cls.etesync, collection_name, None)
elif props.get("tag") == "VADDRESSBOOK":
inst = api.AddressBook.create(cls.etesync, collection_name, None)
else:
raise RuntimeError("Bad tag.")
inst.save()
self = cls(sane_path, principal=False)
self.set_meta(props)
if collection:
if props.get("tag") == "VCALENDAR":
collection, = collection
items = []
for content in ("vevent", "vtodo", "vjournal"):
items.extend(
getattr(collection, "%s_list" % content, []))
items_by_uid = groupby(sorted(items, key=get_uid), get_uid)
vobject_items = {}
for uid, items in items_by_uid:
new_collection = vobject.iCalendar()
for item in items:
new_collection.add(item)
href = self._find_available_file_name(
vobject_items.get)
vobject_items[href] = new_collection
self.upload_all_nonatomic(vobject_items)
elif props.get("tag") == "VADDRESSBOOK":
vobject_items = {}
for card in collection:
href = self._find_available_file_name(
vobject_items.get)
vobject_items[href] = card
self.upload_all_nonatomic(vobject_items)
return self |
def tree(node, formatter=None, prefix=None, postfix=None, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
:param callable formatter: A callable that takes a single argument, the key,
that formats the key in the tree.
:param callable prefix: A callable that takes a single argument, the key,
that adds any additional text before the formatted key.
:param callable postfix: A callable that takes a single argument, the key,
that adds any additional text after the formatted key.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
k = formatter(key) if formatter else key
pre = prefix(key) if prefix else ''
post = postfix(key) if postfix else ''
space = elbow_joint if current == length else tee_joint
yield ' {space} {prefix}{key}{postfix}'.format(space=space, key=k, prefix=pre, postfix=post)
if value:
for e in tree(value, formatter=formatter, prefix=prefix, postfix=postfix, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e | Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
:param callable formatter: A callable that takes a single argument, the key,
that formats the key in the tree.
:param callable prefix: A callable that takes a single argument, the key,
that adds any additional text before the formatted key.
:param callable postfix: A callable that takes a single argument, the key,
that adds any additional text after the formatted key. | Below is the the instruction that describes the task:
### Input:
Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
:param callable formatter: A callable that takes a single argument, the key,
that formats the key in the tree.
:param callable prefix: A callable that takes a single argument, the key,
that adds any additional text before the formatted key.
:param callable postfix: A callable that takes a single argument, the key,
that adds any additional text after the formatted key.
### Response:
def tree(node, formatter=None, prefix=None, postfix=None, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
:param callable formatter: A callable that takes a single argument, the key,
that formats the key in the tree.
:param callable prefix: A callable that takes a single argument, the key,
that adds any additional text before the formatted key.
:param callable postfix: A callable that takes a single argument, the key,
that adds any additional text after the formatted key.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
k = formatter(key) if formatter else key
pre = prefix(key) if prefix else ''
post = postfix(key) if postfix else ''
space = elbow_joint if current == length else tee_joint
yield ' {space} {prefix}{key}{postfix}'.format(space=space, key=k, prefix=pre, postfix=post)
if value:
for e in tree(value, formatter=formatter, prefix=prefix, postfix=postfix, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e |
def onExpandKeyEvent(self, keyEvent):
"""One of expand selection key events"""
if self._start is None:
currentBlockText = self._qpart.textCursor().block().text()
line = self._qpart.cursorPosition[0]
visibleColumn = self._realToVisibleColumn(currentBlockText, self._qpart.cursorPosition[1])
self._start = (line, visibleColumn)
modifiersWithoutAltShift = keyEvent.modifiers() & ( ~ (Qt.AltModifier | Qt.ShiftModifier))
newEvent = QKeyEvent(keyEvent.type(),
keyEvent.key(),
modifiersWithoutAltShift,
keyEvent.text(),
keyEvent.isAutoRepeat(),
keyEvent.count())
self._qpart.cursorPositionChanged.disconnect(self._reset)
self._qpart.selectionChanged.disconnect(self._reset)
super(self._qpart.__class__, self._qpart).keyPressEvent(newEvent)
self._qpart.cursorPositionChanged.connect(self._reset)
self._qpart.selectionChanged.connect(self._reset) | One of expand selection key events | Below is the the instruction that describes the task:
### Input:
One of expand selection key events
### Response:
def onExpandKeyEvent(self, keyEvent):
"""One of expand selection key events"""
if self._start is None:
currentBlockText = self._qpart.textCursor().block().text()
line = self._qpart.cursorPosition[0]
visibleColumn = self._realToVisibleColumn(currentBlockText, self._qpart.cursorPosition[1])
self._start = (line, visibleColumn)
modifiersWithoutAltShift = keyEvent.modifiers() & ( ~ (Qt.AltModifier | Qt.ShiftModifier))
newEvent = QKeyEvent(keyEvent.type(),
keyEvent.key(),
modifiersWithoutAltShift,
keyEvent.text(),
keyEvent.isAutoRepeat(),
keyEvent.count())
self._qpart.cursorPositionChanged.disconnect(self._reset)
self._qpart.selectionChanged.disconnect(self._reset)
super(self._qpart.__class__, self._qpart).keyPressEvent(newEvent)
self._qpart.cursorPositionChanged.connect(self._reset)
self._qpart.selectionChanged.connect(self._reset) |
def copy(src_parent, src_idx, dest_parent, dest_idx):
"""Copy an item."""
if isinstance(dest_parent, list):
dest_idx = int(dest_idx)
dest_parent[dest_idx] = get_child(src_parent, src_idx) | Copy an item. | Below is the the instruction that describes the task:
### Input:
Copy an item.
### Response:
def copy(src_parent, src_idx, dest_parent, dest_idx):
"""Copy an item."""
if isinstance(dest_parent, list):
dest_idx = int(dest_idx)
dest_parent[dest_idx] = get_child(src_parent, src_idx) |
def middleware(self, *args, **kwargs):
"""Decorate and register middleware
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The middleware function to use as the decorator
:rtype: fn
"""
kwargs.setdefault('priority', 5)
kwargs.setdefault('relative', None)
kwargs.setdefault('attach_to', None)
kwargs['with_context'] = True # This is the whole point of this plugin
plugin = self.plugin
reg = self.reg
if len(args) == 1 and callable(args[0]):
middle_f = args[0]
return plugin._add_new_middleware(reg, middle_f, **kwargs)
def wrapper(middle_f):
nonlocal plugin, reg
nonlocal args, kwargs
return plugin._add_new_middleware(reg, middle_f, *args, **kwargs)
return wrapper | Decorate and register middleware
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The middleware function to use as the decorator
:rtype: fn | Below is the the instruction that describes the task:
### Input:
Decorate and register middleware
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The middleware function to use as the decorator
:rtype: fn
### Response:
def middleware(self, *args, **kwargs):
"""Decorate and register middleware
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The middleware function to use as the decorator
:rtype: fn
"""
kwargs.setdefault('priority', 5)
kwargs.setdefault('relative', None)
kwargs.setdefault('attach_to', None)
kwargs['with_context'] = True # This is the whole point of this plugin
plugin = self.plugin
reg = self.reg
if len(args) == 1 and callable(args[0]):
middle_f = args[0]
return plugin._add_new_middleware(reg, middle_f, **kwargs)
def wrapper(middle_f):
nonlocal plugin, reg
nonlocal args, kwargs
return plugin._add_new_middleware(reg, middle_f, *args, **kwargs)
return wrapper |
def _search_folder_for_item_or_folder(name, folder_id):
"""
Find an item or folder matching the name. A folder will be found first if
both are present.
:param name: The name of the resource
:type name: string
:param folder_id: The folder to search within
:type folder_id: int | long
:returns: A tuple indicating whether the resource is an item an the id of
said resource. i.e. (True, item_id) or (False, folder_id). Note that in
the event that we do not find a result return (False, -1)
:rtype: (bool, int | long)
"""
session.token = verify_credentials()
children = session.communicator.folder_children(session.token, folder_id)
for folder in children['folders']:
if folder['name'] == name:
return False, folder['folder_id'] # Found a folder
for item in children['items']:
if item['name'] == name:
return True, item['item_id'] # Found an item
return False, -1 | Find an item or folder matching the name. A folder will be found first if
both are present.
:param name: The name of the resource
:type name: string
:param folder_id: The folder to search within
:type folder_id: int | long
:returns: A tuple indicating whether the resource is an item an the id of
said resource. i.e. (True, item_id) or (False, folder_id). Note that in
the event that we do not find a result return (False, -1)
:rtype: (bool, int | long) | Below is the the instruction that describes the task:
### Input:
Find an item or folder matching the name. A folder will be found first if
both are present.
:param name: The name of the resource
:type name: string
:param folder_id: The folder to search within
:type folder_id: int | long
:returns: A tuple indicating whether the resource is an item an the id of
said resource. i.e. (True, item_id) or (False, folder_id). Note that in
the event that we do not find a result return (False, -1)
:rtype: (bool, int | long)
### Response:
def _search_folder_for_item_or_folder(name, folder_id):
"""
Find an item or folder matching the name. A folder will be found first if
both are present.
:param name: The name of the resource
:type name: string
:param folder_id: The folder to search within
:type folder_id: int | long
:returns: A tuple indicating whether the resource is an item an the id of
said resource. i.e. (True, item_id) or (False, folder_id). Note that in
the event that we do not find a result return (False, -1)
:rtype: (bool, int | long)
"""
session.token = verify_credentials()
children = session.communicator.folder_children(session.token, folder_id)
for folder in children['folders']:
if folder['name'] == name:
return False, folder['folder_id'] # Found a folder
for item in children['items']:
if item['name'] == name:
return True, item['item_id'] # Found an item
return False, -1 |
def run(self):
"""Starts Pyro naming server with command line arguments
(see pyro documentation)"""
args = []
for arg in self.args:
args.append(arg)
Pyro.naming.main(args) | Starts Pyro naming server with command line arguments
(see pyro documentation) | Below is the the instruction that describes the task:
### Input:
Starts Pyro naming server with command line arguments
(see pyro documentation)
### Response:
def run(self):
"""Starts Pyro naming server with command line arguments
(see pyro documentation)"""
args = []
for arg in self.args:
args.append(arg)
Pyro.naming.main(args) |
def query(self, sql_query, columns = None, parameters = None):
"""
If columns evaluates to true, sql_query must contain
"${columns}" (only once) and this substring will be replaced
by an SQL representation of the list of columns, correctly
escaped for the backend.
The resulting string is passed to the DBAPI2.0 .execute()
method of the underlying DBAPI2.0 cursor, alongside with
parameters.
If ''not columns'', the sql_query string will be passed
unchanged to the DBAPI2.0 .execute() method.
"""
tmp_query = self.__preparequery(sql_query, columns)
if self.__methods[METHOD_MODULE].paramstyle in ('format', 'pyformat'):
tmp_query = qmark_to_format(tmp_query)
"""
if self.__methods[METHOD_MODULE].paramstyle == "qmark":
if parameters is not None:
tmp_query = tmp_query % parameters
parameters = None
"""
try:
if parameters is None:
self.__dbapi2_cursor.execute(tmp_query)
else:
self.__dbapi2_cursor.execute(tmp_query, parameters)
except Exception, e:
# try to reconnect
self.__connection.reconnect(tmp_query, self.__log_reconnect)
self.__dbapi2_cursor = self.__connection._get_raw_cursor()
if parameters is None:
self.__dbapi2_cursor.execute(tmp_query)
else:
self.__dbapi2_cursor.execute(tmp_query, parameters) | If columns evaluates to true, sql_query must contain
"${columns}" (only once) and this substring will be replaced
by an SQL representation of the list of columns, correctly
escaped for the backend.
The resulting string is passed to the DBAPI2.0 .execute()
method of the underlying DBAPI2.0 cursor, alongside with
parameters.
If ''not columns'', the sql_query string will be passed
unchanged to the DBAPI2.0 .execute() method. | Below is the the instruction that describes the task:
### Input:
If columns evaluates to true, sql_query must contain
"${columns}" (only once) and this substring will be replaced
by an SQL representation of the list of columns, correctly
escaped for the backend.
The resulting string is passed to the DBAPI2.0 .execute()
method of the underlying DBAPI2.0 cursor, alongside with
parameters.
If ''not columns'', the sql_query string will be passed
unchanged to the DBAPI2.0 .execute() method.
### Response:
def query(self, sql_query, columns = None, parameters = None):
"""
If columns evaluates to true, sql_query must contain
"${columns}" (only once) and this substring will be replaced
by an SQL representation of the list of columns, correctly
escaped for the backend.
The resulting string is passed to the DBAPI2.0 .execute()
method of the underlying DBAPI2.0 cursor, alongside with
parameters.
If ''not columns'', the sql_query string will be passed
unchanged to the DBAPI2.0 .execute() method.
"""
tmp_query = self.__preparequery(sql_query, columns)
if self.__methods[METHOD_MODULE].paramstyle in ('format', 'pyformat'):
tmp_query = qmark_to_format(tmp_query)
"""
if self.__methods[METHOD_MODULE].paramstyle == "qmark":
if parameters is not None:
tmp_query = tmp_query % parameters
parameters = None
"""
try:
if parameters is None:
self.__dbapi2_cursor.execute(tmp_query)
else:
self.__dbapi2_cursor.execute(tmp_query, parameters)
except Exception, e:
# try to reconnect
self.__connection.reconnect(tmp_query, self.__log_reconnect)
self.__dbapi2_cursor = self.__connection._get_raw_cursor()
if parameters is None:
self.__dbapi2_cursor.execute(tmp_query)
else:
self.__dbapi2_cursor.execute(tmp_query, parameters) |
def _is_molecule_linear(self, mol):
"""
Is the molecule a linear one
Args:
mol: The molecule. OpenBabel OBMol object.
Returns:
Boolean value.
"""
if mol.NumAtoms() < 3:
return True
a1 = mol.GetAtom(1)
a2 = mol.GetAtom(2)
for i in range(3, mol.NumAtoms()+1):
angle = float(mol.GetAtom(i).GetAngle(a2, a1))
if angle < 0.0:
angle = -angle
if angle > 90.0:
angle = 180.0 - angle
if angle > self._angle_tolerance:
return False
return True | Is the molecule a linear one
Args:
mol: The molecule. OpenBabel OBMol object.
Returns:
Boolean value. | Below is the the instruction that describes the task:
### Input:
Is the molecule a linear one
Args:
mol: The molecule. OpenBabel OBMol object.
Returns:
Boolean value.
### Response:
def _is_molecule_linear(self, mol):
"""
Is the molecule a linear one
Args:
mol: The molecule. OpenBabel OBMol object.
Returns:
Boolean value.
"""
if mol.NumAtoms() < 3:
return True
a1 = mol.GetAtom(1)
a2 = mol.GetAtom(2)
for i in range(3, mol.NumAtoms()+1):
angle = float(mol.GetAtom(i).GetAngle(a2, a1))
if angle < 0.0:
angle = -angle
if angle > 90.0:
angle = 180.0 - angle
if angle > self._angle_tolerance:
return False
return True |
def set_right_table(self, table):
"""
Sets the right table for this join clause and try to automatically set the condition
if one isn't specified
"""
self.right_table = table
if self.left_table is None:
return
# find table prefix
if type(self.left_table) is ModelTable and type(self.right_table) is ModelTable:
# loop through fields to find the field for this model
# check if this join type is for a related field
for field in self.get_all_related_objects(self.left_table):
related_model = field.model
if hasattr(field, 'related_model'):
related_model = field.related_model
if related_model == self.right_table.model:
if self.right_table.field_prefix is None:
self.right_table.field_prefix = field.get_accessor_name()
if len(self.right_table.field_prefix) > 4 and self.right_table.field_prefix[-4:] == '_set':
self.right_table.field_prefix = self.right_table.field_prefix[:-4]
return
# check if this join type is for a foreign key
for field in self.left_table.model._meta.fields:
if (
field.get_internal_type() == 'OneToOneField' or
field.get_internal_type() == 'ForeignKey'
):
if field.remote_field.model == self.right_table.model:
if self.right_table.field_prefix is None:
self.right_table.field_prefix = field.name
return | Sets the right table for this join clause and try to automatically set the condition
if one isn't specified | Below is the the instruction that describes the task:
### Input:
Sets the right table for this join clause and try to automatically set the condition
if one isn't specified
### Response:
def set_right_table(self, table):
"""
Sets the right table for this join clause and try to automatically set the condition
if one isn't specified
"""
self.right_table = table
if self.left_table is None:
return
# find table prefix
if type(self.left_table) is ModelTable and type(self.right_table) is ModelTable:
# loop through fields to find the field for this model
# check if this join type is for a related field
for field in self.get_all_related_objects(self.left_table):
related_model = field.model
if hasattr(field, 'related_model'):
related_model = field.related_model
if related_model == self.right_table.model:
if self.right_table.field_prefix is None:
self.right_table.field_prefix = field.get_accessor_name()
if len(self.right_table.field_prefix) > 4 and self.right_table.field_prefix[-4:] == '_set':
self.right_table.field_prefix = self.right_table.field_prefix[:-4]
return
# check if this join type is for a foreign key
for field in self.left_table.model._meta.fields:
if (
field.get_internal_type() == 'OneToOneField' or
field.get_internal_type() == 'ForeignKey'
):
if field.remote_field.model == self.right_table.model:
if self.right_table.field_prefix is None:
self.right_table.field_prefix = field.name
return |
def pipe(self, transformer):
"""Pipe this stream to another."""
if self.next:
return
stream = Stream()
self.next = stream
stream.prev = self
self.transformer = transformer
transformer.stream = self
transformer.piped()
for file in self.files:
future = asyncio.ensure_future(self.transformer.transform(file))
future.add_done_callback(self.handle_transform)
self.onpiped.set_result(None)
self.flush_if_ended()
return stream | Pipe this stream to another. | Below is the the instruction that describes the task:
### Input:
Pipe this stream to another.
### Response:
def pipe(self, transformer):
"""Pipe this stream to another."""
if self.next:
return
stream = Stream()
self.next = stream
stream.prev = self
self.transformer = transformer
transformer.stream = self
transformer.piped()
for file in self.files:
future = asyncio.ensure_future(self.transformer.transform(file))
future.add_done_callback(self.handle_transform)
self.onpiped.set_result(None)
self.flush_if_ended()
return stream |
def set_prompt(self, prompt_command="", position=0):
""" writes the prompt line """
self.description_docs = u'{}'.format(prompt_command)
self.cli.current_buffer.reset(
initial_document=Document(
self.description_docs,
cursor_position=position))
self.cli.request_redraw() | writes the prompt line | Below is the the instruction that describes the task:
### Input:
writes the prompt line
### Response:
def set_prompt(self, prompt_command="", position=0):
""" writes the prompt line """
self.description_docs = u'{}'.format(prompt_command)
self.cli.current_buffer.reset(
initial_document=Document(
self.description_docs,
cursor_position=position))
self.cli.request_redraw() |
def month_crumb(date):
"""
Crumb for a month.
"""
year = date.strftime('%Y')
month = date.strftime('%m')
month_text = DateFormat(date).format('F').capitalize()
return Crumb(month_text, reverse('zinnia:entry_archive_month',
args=[year, month])) | Crumb for a month. | Below is the the instruction that describes the task:
### Input:
Crumb for a month.
### Response:
def month_crumb(date):
"""
Crumb for a month.
"""
year = date.strftime('%Y')
month = date.strftime('%m')
month_text = DateFormat(date).format('F').capitalize()
return Crumb(month_text, reverse('zinnia:entry_archive_month',
args=[year, month])) |
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top) | Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...> | Below is the the instruction that describes the task:
### Input:
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
### Response:
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top) |
def normalize_array(a,direction="column"):
"""
Normalizes an array to sum to one, either column wise,
or row wise or the full array.
*Directions*
Column-wise - 0 default
Row-wise - 1 default
All - 2 default
"""
b = a.copy()
if(direction == "column"):
sums = np.sum(b,0)
return np.nan_to_num(b/sums)
elif(direction == "row"):
sums =np.sum(b,1)
return np.nan_to_num((b.transpose() / sums).transpose())
elif(direction == "all"):
sums = np.sum(b)
return np.nan_to_num(b / sums)
else:
print "Error non existing normalization"
return b | Normalizes an array to sum to one, either column wise,
or row wise or the full array.
*Directions*
Column-wise - 0 default
Row-wise - 1 default
All - 2 default | Below is the the instruction that describes the task:
### Input:
Normalizes an array to sum to one, either column wise,
or row wise or the full array.
*Directions*
Column-wise - 0 default
Row-wise - 1 default
All - 2 default
### Response:
def normalize_array(a,direction="column"):
"""
Normalizes an array to sum to one, either column wise,
or row wise or the full array.
*Directions*
Column-wise - 0 default
Row-wise - 1 default
All - 2 default
"""
b = a.copy()
if(direction == "column"):
sums = np.sum(b,0)
return np.nan_to_num(b/sums)
elif(direction == "row"):
sums =np.sum(b,1)
return np.nan_to_num((b.transpose() / sums).transpose())
elif(direction == "all"):
sums = np.sum(b)
return np.nan_to_num(b / sums)
else:
print "Error non existing normalization"
return b |
def isel(self, indexers=None, drop=False, **indexers_kwargs):
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
**indexers_kwarg : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'isel')
indexers_list = self._validate_indexers(indexers)
variables = OrderedDict()
indexes = OrderedDict()
for name, var in self.variables.items():
var_indexers = {k: v for k, v in indexers_list if k in var.dims}
if drop and name in var_indexers:
continue # drop this variable
if name in self.indexes:
new_var, new_index = isel_variable_and_index(
name, var, self.indexes[name], var_indexers)
if new_index is not None:
indexes[name] = new_index
else:
new_var = var.isel(indexers=var_indexers)
variables[name] = new_var
coord_names = set(variables).intersection(self._coord_names)
selected = self._replace_with_new_dims(
variables, coord_names, indexes)
# Extract coordinates from indexers
coord_vars, new_indexes = (
selected._get_indexers_coords_and_indexes(indexers))
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = (set(variables)
.intersection(self._coord_names)
.union(coord_vars))
return self._replace_with_new_dims(
variables, coord_names, indexes=indexes) | Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
**indexers_kwarg : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel | Below is the the instruction that describes the task:
### Input:
Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
**indexers_kwarg : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
### Response:
def isel(self, indexers=None, drop=False, **indexers_kwargs):
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
**indexers_kwarg : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'isel')
indexers_list = self._validate_indexers(indexers)
variables = OrderedDict()
indexes = OrderedDict()
for name, var in self.variables.items():
var_indexers = {k: v for k, v in indexers_list if k in var.dims}
if drop and name in var_indexers:
continue # drop this variable
if name in self.indexes:
new_var, new_index = isel_variable_and_index(
name, var, self.indexes[name], var_indexers)
if new_index is not None:
indexes[name] = new_index
else:
new_var = var.isel(indexers=var_indexers)
variables[name] = new_var
coord_names = set(variables).intersection(self._coord_names)
selected = self._replace_with_new_dims(
variables, coord_names, indexes)
# Extract coordinates from indexers
coord_vars, new_indexes = (
selected._get_indexers_coords_and_indexes(indexers))
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = (set(variables)
.intersection(self._coord_names)
.union(coord_vars))
return self._replace_with_new_dims(
variables, coord_names, indexes=indexes) |
def get(self, params=None):
"""Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('get', url=self.endpoint, params=params) | Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data. | Below is the the instruction that describes the task:
### Input:
Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data.
### Response:
def get(self, params=None):
"""Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('get', url=self.endpoint, params=params) |
async def _get_response(self, message):
"""
Get response running the view with await syntax if it is a
coroutine function, otherwise just run it the normal way.
"""
view = self.discovery_view(message)
if not view:
return
if inspect.iscoroutinefunction(view):
response = await view(message)
else:
response = view(message)
return self.prepare_response(response, message) | Get response running the view with await syntax if it is a
coroutine function, otherwise just run it the normal way. | Below is the the instruction that describes the task:
### Input:
Get response running the view with await syntax if it is a
coroutine function, otherwise just run it the normal way.
### Response:
async def _get_response(self, message):
"""
Get response running the view with await syntax if it is a
coroutine function, otherwise just run it the normal way.
"""
view = self.discovery_view(message)
if not view:
return
if inspect.iscoroutinefunction(view):
response = await view(message)
else:
response = view(message)
return self.prepare_response(response, message) |
def _process_figure(value, fmt):
"""Processes the figure. Returns a dict containing figure properties."""
# pylint: disable=global-statement
global Nreferences # Global references counter
global has_unnumbered_figures # Flags unnumbered figures were found
global cursec # Current section
# Parse the image
attrs, caption = value[0]['c'][:2]
# Initialize the return value
fig = {'is_unnumbered': False,
'is_unreferenceable': False,
'is_tagged': False,
'attrs': attrs}
# Bail out if the label does not conform
if not LABEL_PATTERN.match(attrs[0]):
has_unnumbered_figures = True
fig['is_unnumbered'] = True
fig['is_unreferenceable'] = True
return fig
# Process unreferenceable figures
if attrs[0] == 'fig:': # Make up a unique description
attrs[0] = attrs[0] + str(uuid.uuid4())
fig['is_unreferenceable'] = True
unreferenceable.append(attrs[0])
# For html, hard-code in the section numbers as tags
kvs = PandocAttributes(attrs, 'pandoc').kvs
if numbersections and fmt in ['html', 'html5'] and 'tag' not in kvs:
if kvs['secno'] != cursec:
cursec = kvs['secno']
Nreferences = 1
kvs['tag'] = cursec + '.' + str(Nreferences)
Nreferences += 1
# Save to the global references tracker
fig['is_tagged'] = 'tag' in kvs
if fig['is_tagged']:
# Remove any surrounding quotes
if kvs['tag'][0] == '"' and kvs['tag'][-1] == '"':
kvs['tag'] = kvs['tag'].strip('"')
elif kvs['tag'][0] == "'" and kvs['tag'][-1] == "'":
kvs['tag'] = kvs['tag'].strip("'")
references[attrs[0]] = kvs['tag']
else:
Nreferences += 1
references[attrs[0]] = Nreferences
# Adjust caption depending on the output format
if fmt in ['latex', 'beamer']: # Append a \label if this is referenceable
if not fig['is_unreferenceable']:
value[0]['c'][1] += [RawInline('tex', r'\label{%s}'%attrs[0])]
else: # Hard-code in the caption name and number/tag
if isinstance(references[attrs[0]], int): # Numbered reference
value[0]['c'][1] = [RawInline('html', r'<span>'),
Str(captionname), Space(),
Str('%d:'%references[attrs[0]]),
RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space(), Str('%d:'%references[attrs[0]])]
value[0]['c'][1] += [Space()] + list(caption)
else: # Tagged reference
assert isinstance(references[attrs[0]], STRTYPES)
text = references[attrs[0]]
if text.startswith('$') and text.endswith('$'): # Math
math = text.replace(' ', r'\ ')[1:-1]
els = [Math({"t":"InlineMath", "c":[]}, math), Str(':')]
else: # Text
els = [Str(text+':')]
value[0]['c'][1] = \
[RawInline('html', r'<span>'), Str(captionname), Space()] + \
els + [RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space()] + els
value[0]['c'][1] += [Space()] + list(caption)
return fig | Processes the figure. Returns a dict containing figure properties. | Below is the the instruction that describes the task:
### Input:
Processes the figure. Returns a dict containing figure properties.
### Response:
def _process_figure(value, fmt):
"""Processes the figure. Returns a dict containing figure properties."""
# pylint: disable=global-statement
global Nreferences # Global references counter
global has_unnumbered_figures # Flags unnumbered figures were found
global cursec # Current section
# Parse the image
attrs, caption = value[0]['c'][:2]
# Initialize the return value
fig = {'is_unnumbered': False,
'is_unreferenceable': False,
'is_tagged': False,
'attrs': attrs}
# Bail out if the label does not conform
if not LABEL_PATTERN.match(attrs[0]):
has_unnumbered_figures = True
fig['is_unnumbered'] = True
fig['is_unreferenceable'] = True
return fig
# Process unreferenceable figures
if attrs[0] == 'fig:': # Make up a unique description
attrs[0] = attrs[0] + str(uuid.uuid4())
fig['is_unreferenceable'] = True
unreferenceable.append(attrs[0])
# For html, hard-code in the section numbers as tags
kvs = PandocAttributes(attrs, 'pandoc').kvs
if numbersections and fmt in ['html', 'html5'] and 'tag' not in kvs:
if kvs['secno'] != cursec:
cursec = kvs['secno']
Nreferences = 1
kvs['tag'] = cursec + '.' + str(Nreferences)
Nreferences += 1
# Save to the global references tracker
fig['is_tagged'] = 'tag' in kvs
if fig['is_tagged']:
# Remove any surrounding quotes
if kvs['tag'][0] == '"' and kvs['tag'][-1] == '"':
kvs['tag'] = kvs['tag'].strip('"')
elif kvs['tag'][0] == "'" and kvs['tag'][-1] == "'":
kvs['tag'] = kvs['tag'].strip("'")
references[attrs[0]] = kvs['tag']
else:
Nreferences += 1
references[attrs[0]] = Nreferences
# Adjust caption depending on the output format
if fmt in ['latex', 'beamer']: # Append a \label if this is referenceable
if not fig['is_unreferenceable']:
value[0]['c'][1] += [RawInline('tex', r'\label{%s}'%attrs[0])]
else: # Hard-code in the caption name and number/tag
if isinstance(references[attrs[0]], int): # Numbered reference
value[0]['c'][1] = [RawInline('html', r'<span>'),
Str(captionname), Space(),
Str('%d:'%references[attrs[0]]),
RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space(), Str('%d:'%references[attrs[0]])]
value[0]['c'][1] += [Space()] + list(caption)
else: # Tagged reference
assert isinstance(references[attrs[0]], STRTYPES)
text = references[attrs[0]]
if text.startswith('$') and text.endswith('$'): # Math
math = text.replace(' ', r'\ ')[1:-1]
els = [Math({"t":"InlineMath", "c":[]}, math), Str(':')]
else: # Text
els = [Str(text+':')]
value[0]['c'][1] = \
[RawInline('html', r'<span>'), Str(captionname), Space()] + \
els + [RawInline('html', r'</span>')] \
if fmt in ['html', 'html5'] else \
[Str(captionname), Space()] + els
value[0]['c'][1] += [Space()] + list(caption)
return fig |
def create_vlan(self, id_vlan):
""" Set column 'ativada = 1'.
:param id_vlan: VLAN identifier.
:return: None
"""
vlan_map = dict()
vlan_map['vlan_id'] = id_vlan
code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'vlan/create/')
return self.response(code, xml) | Set column 'ativada = 1'.
:param id_vlan: VLAN identifier.
:return: None | Below is the the instruction that describes the task:
### Input:
Set column 'ativada = 1'.
:param id_vlan: VLAN identifier.
:return: None
### Response:
def create_vlan(self, id_vlan):
""" Set column 'ativada = 1'.
:param id_vlan: VLAN identifier.
:return: None
"""
vlan_map = dict()
vlan_map['vlan_id'] = id_vlan
code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'vlan/create/')
return self.response(code, xml) |
def minimize(grad_and_hessian_loss_fn,
x_start,
tolerance,
l1_regularizer,
l2_regularizer=None,
maximum_iterations=1,
maximum_full_sweeps_per_iteration=1,
learning_rate=None,
name=None):
"""Minimize using Hessian-informed proximal gradient descent.
This function solves the regularized minimization problem
```none
argmin{ Loss(x)
+ l1_regularizer * ||x||_1
+ l2_regularizer * ||x||_2**2
: x in R^n }
```
where `Loss` is a convex C^2 function (typically, `Loss` is the negative log
likelihood of a model and `x` is a vector of model coefficients). The `Loss`
function does not need to be supplied directly, but this optimizer does need a
way to compute the gradient and Hessian of the Loss function at a given value
of `x`. The gradient and Hessian are often computationally expensive, and
this optimizer calls them relatively few times compared with other algorithms.
Args:
grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor`
of the same shape and dtype as `x_start` and returns the triple
`(gradient_unregularized_loss, hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle)` as defined in the argument spec of
`minimize_one_step`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial
value of the argument to the `Loss` function.
tolerance: scalar, `float` `Tensor` representing the tolerance for each
optimization step; see the `tolerance` argument of
`minimize_one_step`.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above).
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_iterations: Python integer specifying the maximum number of
iterations of the outer loop of the optimizer. After this many iterations
of the outer loop, the algorithm will terminate even if the return value
`optimal_x` has not converged.
Default value: `1`.
maximum_full_sweeps_per_iteration: Python integer specifying the maximum
number of sweeps allowed in each iteration of the outer loop of the
optimizer. Passed as the `maximum_full_sweeps` argument to
`minimize_one_step`.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize"`.
Returns:
x: `Tensor` of the same shape and dtype as `x_start`, representing the
(batches of) computed values of `x` which minimizes `Loss(x)`.
is_converged: scalar, `bool` `Tensor` indicating whether the minimization
procedure converged within the specified number of iterations across all
batches. Here convergence means that an iteration of the inner loop
(`minimize_one_step`) returns `True` for its `is_converged` output value.
iter: scalar, `int` `Tensor` indicating the actual number of iterations of
the outer loop of the optimizer completed (i.e., number of calls to
`minimize_one_step` before achieving convergence).
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
"""
graph_deps = [
x_start,
l1_regularizer,
l2_regularizer,
maximum_iterations,
maximum_full_sweeps_per_iteration,
tolerance,
learning_rate,
],
with tf.compat.v1.name_scope(name, 'minimize', graph_deps):
def _loop_cond(x_start, converged, iter_):
del x_start
return tf.logical_and(iter_ < maximum_iterations,
tf.logical_not(converged))
def _loop_body(x_start, converged, iter_): # pylint: disable=missing-docstring
g, h_outer, h_middle = grad_and_hessian_loss_fn(x_start)
x_start, converged, _ = minimize_one_step(
gradient_unregularized_loss=g,
hessian_unregularized_loss_outer=h_outer,
hessian_unregularized_loss_middle=h_middle,
x_start=x_start,
l1_regularizer=l1_regularizer,
l2_regularizer=l2_regularizer,
maximum_full_sweeps=maximum_full_sweeps_per_iteration,
tolerance=tolerance,
learning_rate=learning_rate)
return x_start, converged, iter_ + 1
return tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[
x_start,
tf.zeros([], np.bool, name='converged'),
tf.zeros([], np.int32, name='iter'),
]) | Minimize using Hessian-informed proximal gradient descent.
This function solves the regularized minimization problem
```none
argmin{ Loss(x)
+ l1_regularizer * ||x||_1
+ l2_regularizer * ||x||_2**2
: x in R^n }
```
where `Loss` is a convex C^2 function (typically, `Loss` is the negative log
likelihood of a model and `x` is a vector of model coefficients). The `Loss`
function does not need to be supplied directly, but this optimizer does need a
way to compute the gradient and Hessian of the Loss function at a given value
of `x`. The gradient and Hessian are often computationally expensive, and
this optimizer calls them relatively few times compared with other algorithms.
Args:
grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor`
of the same shape and dtype as `x_start` and returns the triple
`(gradient_unregularized_loss, hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle)` as defined in the argument spec of
`minimize_one_step`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial
value of the argument to the `Loss` function.
tolerance: scalar, `float` `Tensor` representing the tolerance for each
optimization step; see the `tolerance` argument of
`minimize_one_step`.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above).
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_iterations: Python integer specifying the maximum number of
iterations of the outer loop of the optimizer. After this many iterations
of the outer loop, the algorithm will terminate even if the return value
`optimal_x` has not converged.
Default value: `1`.
maximum_full_sweeps_per_iteration: Python integer specifying the maximum
number of sweeps allowed in each iteration of the outer loop of the
optimizer. Passed as the `maximum_full_sweeps` argument to
`minimize_one_step`.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize"`.
Returns:
x: `Tensor` of the same shape and dtype as `x_start`, representing the
(batches of) computed values of `x` which minimizes `Loss(x)`.
is_converged: scalar, `bool` `Tensor` indicating whether the minimization
procedure converged within the specified number of iterations across all
batches. Here convergence means that an iteration of the inner loop
(`minimize_one_step`) returns `True` for its `is_converged` output value.
iter: scalar, `int` `Tensor` indicating the actual number of iterations of
the outer loop of the optimizer completed (i.e., number of calls to
`minimize_one_step` before achieving convergence).
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf | Below is the the instruction that describes the task:
### Input:
Minimize using Hessian-informed proximal gradient descent.
This function solves the regularized minimization problem
```none
argmin{ Loss(x)
+ l1_regularizer * ||x||_1
+ l2_regularizer * ||x||_2**2
: x in R^n }
```
where `Loss` is a convex C^2 function (typically, `Loss` is the negative log
likelihood of a model and `x` is a vector of model coefficients). The `Loss`
function does not need to be supplied directly, but this optimizer does need a
way to compute the gradient and Hessian of the Loss function at a given value
of `x`. The gradient and Hessian are often computationally expensive, and
this optimizer calls them relatively few times compared with other algorithms.
Args:
grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor`
of the same shape and dtype as `x_start` and returns the triple
`(gradient_unregularized_loss, hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle)` as defined in the argument spec of
`minimize_one_step`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial
value of the argument to the `Loss` function.
tolerance: scalar, `float` `Tensor` representing the tolerance for each
optimization step; see the `tolerance` argument of
`minimize_one_step`.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above).
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_iterations: Python integer specifying the maximum number of
iterations of the outer loop of the optimizer. After this many iterations
of the outer loop, the algorithm will terminate even if the return value
`optimal_x` has not converged.
Default value: `1`.
maximum_full_sweeps_per_iteration: Python integer specifying the maximum
number of sweeps allowed in each iteration of the outer loop of the
optimizer. Passed as the `maximum_full_sweeps` argument to
`minimize_one_step`.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize"`.
Returns:
x: `Tensor` of the same shape and dtype as `x_start`, representing the
(batches of) computed values of `x` which minimizes `Loss(x)`.
is_converged: scalar, `bool` `Tensor` indicating whether the minimization
procedure converged within the specified number of iterations across all
batches. Here convergence means that an iteration of the inner loop
(`minimize_one_step`) returns `True` for its `is_converged` output value.
iter: scalar, `int` `Tensor` indicating the actual number of iterations of
the outer loop of the optimizer completed (i.e., number of calls to
`minimize_one_step` before achieving convergence).
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
### Response:
def minimize(grad_and_hessian_loss_fn,
x_start,
tolerance,
l1_regularizer,
l2_regularizer=None,
maximum_iterations=1,
maximum_full_sweeps_per_iteration=1,
learning_rate=None,
name=None):
"""Minimize using Hessian-informed proximal gradient descent.
This function solves the regularized minimization problem
```none
argmin{ Loss(x)
+ l1_regularizer * ||x||_1
+ l2_regularizer * ||x||_2**2
: x in R^n }
```
where `Loss` is a convex C^2 function (typically, `Loss` is the negative log
likelihood of a model and `x` is a vector of model coefficients). The `Loss`
function does not need to be supplied directly, but this optimizer does need a
way to compute the gradient and Hessian of the Loss function at a given value
of `x`. The gradient and Hessian are often computationally expensive, and
this optimizer calls them relatively few times compared with other algorithms.
Args:
grad_and_hessian_loss_fn: callable that takes as input a (batch of) `Tensor`
of the same shape and dtype as `x_start` and returns the triple
`(gradient_unregularized_loss, hessian_unregularized_loss_outer,
hessian_unregularized_loss_middle)` as defined in the argument spec of
`minimize_one_step`.
x_start: (Batch of) vector-shaped, `float` `Tensor` representing the initial
value of the argument to the `Loss` function.
tolerance: scalar, `float` `Tensor` representing the tolerance for each
optimization step; see the `tolerance` argument of
`minimize_one_step`.
l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1
regularization term (see equation above).
l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2
regularization term (see equation above).
Default value: `None` (i.e., no L2 regularization).
maximum_iterations: Python integer specifying the maximum number of
iterations of the outer loop of the optimizer. After this many iterations
of the outer loop, the algorithm will terminate even if the return value
`optimal_x` has not converged.
Default value: `1`.
maximum_full_sweeps_per_iteration: Python integer specifying the maximum
number of sweeps allowed in each iteration of the outer loop of the
optimizer. Passed as the `maximum_full_sweeps` argument to
`minimize_one_step`.
Default value: `1`.
learning_rate: scalar, `float` `Tensor` representing a multiplicative factor
used to dampen the proximal gradient descent steps.
Default value: `None` (i.e., factor is conceptually `1`).
name: Python string representing the name of the TensorFlow operation.
The default name is `"minimize"`.
Returns:
x: `Tensor` of the same shape and dtype as `x_start`, representing the
(batches of) computed values of `x` which minimizes `Loss(x)`.
is_converged: scalar, `bool` `Tensor` indicating whether the minimization
procedure converged within the specified number of iterations across all
batches. Here convergence means that an iteration of the inner loop
(`minimize_one_step`) returns `True` for its `is_converged` output value.
iter: scalar, `int` `Tensor` indicating the actual number of iterations of
the outer loop of the optimizer completed (i.e., number of calls to
`minimize_one_step` before achieving convergence).
#### References
[1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths
for Generalized Linear Models via Coordinate Descent. _Journal of
Statistical Software_, 33(1), 2010.
https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf
[2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for
L1-regularized Logistic Regression. _Journal of Machine Learning
Research_, 13, 2012.
http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf
"""
graph_deps = [
x_start,
l1_regularizer,
l2_regularizer,
maximum_iterations,
maximum_full_sweeps_per_iteration,
tolerance,
learning_rate,
],
with tf.compat.v1.name_scope(name, 'minimize', graph_deps):
def _loop_cond(x_start, converged, iter_):
del x_start
return tf.logical_and(iter_ < maximum_iterations,
tf.logical_not(converged))
def _loop_body(x_start, converged, iter_): # pylint: disable=missing-docstring
g, h_outer, h_middle = grad_and_hessian_loss_fn(x_start)
x_start, converged, _ = minimize_one_step(
gradient_unregularized_loss=g,
hessian_unregularized_loss_outer=h_outer,
hessian_unregularized_loss_middle=h_middle,
x_start=x_start,
l1_regularizer=l1_regularizer,
l2_regularizer=l2_regularizer,
maximum_full_sweeps=maximum_full_sweeps_per_iteration,
tolerance=tolerance,
learning_rate=learning_rate)
return x_start, converged, iter_ + 1
return tf.while_loop(
cond=_loop_cond,
body=_loop_body,
loop_vars=[
x_start,
tf.zeros([], np.bool, name='converged'),
tf.zeros([], np.int32, name='iter'),
]) |
def unused_by(self, bundle):
"""
Indicates that this reference is not being used anymore by the given
bundle.
This method should only be used by the framework.
:param bundle: A bundle that used this reference
"""
if bundle is None or bundle is self.__bundle:
# Ignore
return
with self.__usage_lock:
try:
if not self.__using_bundles[bundle].dec():
# This bundle has cleaner all of its usages of this
# reference
del self.__using_bundles[bundle]
except KeyError:
# Ignore error
pass | Indicates that this reference is not being used anymore by the given
bundle.
This method should only be used by the framework.
:param bundle: A bundle that used this reference | Below is the the instruction that describes the task:
### Input:
Indicates that this reference is not being used anymore by the given
bundle.
This method should only be used by the framework.
:param bundle: A bundle that used this reference
### Response:
def unused_by(self, bundle):
"""
Indicates that this reference is not being used anymore by the given
bundle.
This method should only be used by the framework.
:param bundle: A bundle that used this reference
"""
if bundle is None or bundle is self.__bundle:
# Ignore
return
with self.__usage_lock:
try:
if not self.__using_bundles[bundle].dec():
# This bundle has cleaner all of its usages of this
# reference
del self.__using_bundles[bundle]
except KeyError:
# Ignore error
pass |
def shoot(hdf5_file_name, minPts, sample_ID = 0, random_state = None, verbose = True):
"""Perform DBSCAN clustering with parameters 'minPts' and 'eps'
(as determined by a prior call to 'load' from this module).
If multiple subsamples of the dataset were provided in a preliminary call to 'load',
'sample_ID' specifies which one of those subsamples is to undergo DBSCAN clustering.
Parameters
----------
hdf5_file_name : file object or string
The handle or name of an HDF5 file where any array needed for DBSCAN and too large to fit into memory
is to be stored. Procedure 'shoot' relies on arrays stored in this data structure by a previous
call to 'load' (see corresponding documentation)
sample_ID : int, optional (default = 0)
Identifies the particular set of selected data-points on which to perform DBSCAN.
If not subsamples were provided in the call to 'load', the whole dataset will be subjected to DBSCAN clustering.
minPts : int
The number of points within a 'eps'-radius hypershpere for this region to qualify as dense.
random_state: np.RandomState, optional (default = None)
The generator used to reorder the samples. If None at input, will be set to np.random.
verbose : Boolean, optional (default = True)
Whether to display messages concerning the status of the computations and the time it took to complete
each major stage of the algorithm.
Returns
-------
core_samples : array of shape (n_core_samples, )
Indices of the core samples.
labels : array of shape (N_samples, )
Holds the cluster labels of each sample. The points considered as noise have entries -1.
The points not initially selected for clustering (i.e. not listed in 'subsampled_indices', if the latter
has been provided in the call to 'load' from this module) are labelled -2.
References
----------
Ester, M., H. P. Kriegel, J. Sander and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
fileh = tables.open_file(hdf5_file_name, mode = 'r+')
neighborhoods_indices = fileh.root.DBSCAN_group.neighborhoods_indices
neighborhoods_indptr = fileh.root.DBSCAN_group.neighborhoods_indptr[:]
neighbors_counts = fileh.root.DBSCAN_group.neighbors_counts[sample_ID]
subsampled_indices = fileh.root.DBSCAN_group.subsamples_matrix[sample_ID]
N_samples = neighborhoods_indptr.size - 1
N_runs, N_subsamples = fileh.root.DBSCAN_group.subsamples_matrix.shape
if not isinstance(sample_ID, int):
raise ValueError("\nERROR: DBSCAN_multiplex @ shoot:\n"
"'sample_ID' must be an integer identifying the set of subsampled indices "
"on which to perform DBSCAN clustering\n")
if (sample_ID < 0) or (sample_ID >= N_runs):
raise ValueError("\nERROR: DBSCAN_multiplex @ shoot:\n"
"'sample_ID' must belong to the interval [0; {}].\n".format(N_runs - 1))
# points that have not been sampled are labelled with -2
labels = np.full(N_samples, -2, dtype = int)
# among the points selected for clustering,
# all are initally characterized as noise
labels[subsampled_indices] = - 1
random_state = check_random_state(random_state)
core_samples = np.flatnonzero(neighbors_counts >= minPts)
index_order = np.take(core_samples, random_state.permutation(core_samples.size))
cluster_ID = 0
# Look at all the selected samples, see if they qualify as core samples
# Create a new cluster from those core samples
for index in index_order:
if labels[index] not in {-1, -2}:
continue
labels[index] = cluster_ID
candidates = [index]
while len(candidates) > 0:
candidate_neighbors = np.zeros(0, dtype = np.int32)
for k in candidates:
candidate_neighbors = np.append(candidate_neighbors,
neighborhoods_indices[neighborhoods_indptr[k]: neighborhoods_indptr[k+1]])
candidate_neighbors = np.unique(candidate_neighbors)
candidate_neighbors = np.intersect1d(candidate_neighbors, subsampled_indices, assume_unique = True)
not_noise_anymore = np.compress(np.take(labels, candidate_neighbors) == -1, candidate_neighbors)
labels[not_noise_anymore] = cluster_ID
# Eliminate as potential candidates the points that have already
# been used to expand the current cluster by a trail
# of density-reachable points
candidates = np.intersect1d(not_noise_anymore, core_samples, assume_unique = True)
cluster_ID += 1
# Done with building this cluster.
# "cluster_ID" is now labelling the next cluster.
fileh.close()
gc.collect()
return core_samples, labels | Perform DBSCAN clustering with parameters 'minPts' and 'eps'
(as determined by a prior call to 'load' from this module).
If multiple subsamples of the dataset were provided in a preliminary call to 'load',
'sample_ID' specifies which one of those subsamples is to undergo DBSCAN clustering.
Parameters
----------
hdf5_file_name : file object or string
The handle or name of an HDF5 file where any array needed for DBSCAN and too large to fit into memory
is to be stored. Procedure 'shoot' relies on arrays stored in this data structure by a previous
call to 'load' (see corresponding documentation)
sample_ID : int, optional (default = 0)
Identifies the particular set of selected data-points on which to perform DBSCAN.
If not subsamples were provided in the call to 'load', the whole dataset will be subjected to DBSCAN clustering.
minPts : int
The number of points within a 'eps'-radius hypershpere for this region to qualify as dense.
random_state: np.RandomState, optional (default = None)
The generator used to reorder the samples. If None at input, will be set to np.random.
verbose : Boolean, optional (default = True)
Whether to display messages concerning the status of the computations and the time it took to complete
each major stage of the algorithm.
Returns
-------
core_samples : array of shape (n_core_samples, )
Indices of the core samples.
labels : array of shape (N_samples, )
Holds the cluster labels of each sample. The points considered as noise have entries -1.
The points not initially selected for clustering (i.e. not listed in 'subsampled_indices', if the latter
has been provided in the call to 'load' from this module) are labelled -2.
References
----------
Ester, M., H. P. Kriegel, J. Sander and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 | Below is the the instruction that describes the task:
### Input:
Perform DBSCAN clustering with parameters 'minPts' and 'eps'
(as determined by a prior call to 'load' from this module).
If multiple subsamples of the dataset were provided in a preliminary call to 'load',
'sample_ID' specifies which one of those subsamples is to undergo DBSCAN clustering.
Parameters
----------
hdf5_file_name : file object or string
The handle or name of an HDF5 file where any array needed for DBSCAN and too large to fit into memory
is to be stored. Procedure 'shoot' relies on arrays stored in this data structure by a previous
call to 'load' (see corresponding documentation)
sample_ID : int, optional (default = 0)
Identifies the particular set of selected data-points on which to perform DBSCAN.
If not subsamples were provided in the call to 'load', the whole dataset will be subjected to DBSCAN clustering.
minPts : int
The number of points within a 'eps'-radius hypershpere for this region to qualify as dense.
random_state: np.RandomState, optional (default = None)
The generator used to reorder the samples. If None at input, will be set to np.random.
verbose : Boolean, optional (default = True)
Whether to display messages concerning the status of the computations and the time it took to complete
each major stage of the algorithm.
Returns
-------
core_samples : array of shape (n_core_samples, )
Indices of the core samples.
labels : array of shape (N_samples, )
Holds the cluster labels of each sample. The points considered as noise have entries -1.
The points not initially selected for clustering (i.e. not listed in 'subsampled_indices', if the latter
has been provided in the call to 'load' from this module) are labelled -2.
References
----------
Ester, M., H. P. Kriegel, J. Sander and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
### Response:
def shoot(hdf5_file_name, minPts, sample_ID = 0, random_state = None, verbose = True):
"""Perform DBSCAN clustering with parameters 'minPts' and 'eps'
(as determined by a prior call to 'load' from this module).
If multiple subsamples of the dataset were provided in a preliminary call to 'load',
'sample_ID' specifies which one of those subsamples is to undergo DBSCAN clustering.
Parameters
----------
hdf5_file_name : file object or string
The handle or name of an HDF5 file where any array needed for DBSCAN and too large to fit into memory
is to be stored. Procedure 'shoot' relies on arrays stored in this data structure by a previous
call to 'load' (see corresponding documentation)
sample_ID : int, optional (default = 0)
Identifies the particular set of selected data-points on which to perform DBSCAN.
If not subsamples were provided in the call to 'load', the whole dataset will be subjected to DBSCAN clustering.
minPts : int
The number of points within a 'eps'-radius hypershpere for this region to qualify as dense.
random_state: np.RandomState, optional (default = None)
The generator used to reorder the samples. If None at input, will be set to np.random.
verbose : Boolean, optional (default = True)
Whether to display messages concerning the status of the computations and the time it took to complete
each major stage of the algorithm.
Returns
-------
core_samples : array of shape (n_core_samples, )
Indices of the core samples.
labels : array of shape (N_samples, )
Holds the cluster labels of each sample. The points considered as noise have entries -1.
The points not initially selected for clustering (i.e. not listed in 'subsampled_indices', if the latter
has been provided in the call to 'load' from this module) are labelled -2.
References
----------
Ester, M., H. P. Kriegel, J. Sander and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
fileh = tables.open_file(hdf5_file_name, mode = 'r+')
neighborhoods_indices = fileh.root.DBSCAN_group.neighborhoods_indices
neighborhoods_indptr = fileh.root.DBSCAN_group.neighborhoods_indptr[:]
neighbors_counts = fileh.root.DBSCAN_group.neighbors_counts[sample_ID]
subsampled_indices = fileh.root.DBSCAN_group.subsamples_matrix[sample_ID]
N_samples = neighborhoods_indptr.size - 1
N_runs, N_subsamples = fileh.root.DBSCAN_group.subsamples_matrix.shape
if not isinstance(sample_ID, int):
raise ValueError("\nERROR: DBSCAN_multiplex @ shoot:\n"
"'sample_ID' must be an integer identifying the set of subsampled indices "
"on which to perform DBSCAN clustering\n")
if (sample_ID < 0) or (sample_ID >= N_runs):
raise ValueError("\nERROR: DBSCAN_multiplex @ shoot:\n"
"'sample_ID' must belong to the interval [0; {}].\n".format(N_runs - 1))
# points that have not been sampled are labelled with -2
labels = np.full(N_samples, -2, dtype = int)
# among the points selected for clustering,
# all are initally characterized as noise
labels[subsampled_indices] = - 1
random_state = check_random_state(random_state)
core_samples = np.flatnonzero(neighbors_counts >= minPts)
index_order = np.take(core_samples, random_state.permutation(core_samples.size))
cluster_ID = 0
# Look at all the selected samples, see if they qualify as core samples
# Create a new cluster from those core samples
for index in index_order:
if labels[index] not in {-1, -2}:
continue
labels[index] = cluster_ID
candidates = [index]
while len(candidates) > 0:
candidate_neighbors = np.zeros(0, dtype = np.int32)
for k in candidates:
candidate_neighbors = np.append(candidate_neighbors,
neighborhoods_indices[neighborhoods_indptr[k]: neighborhoods_indptr[k+1]])
candidate_neighbors = np.unique(candidate_neighbors)
candidate_neighbors = np.intersect1d(candidate_neighbors, subsampled_indices, assume_unique = True)
not_noise_anymore = np.compress(np.take(labels, candidate_neighbors) == -1, candidate_neighbors)
labels[not_noise_anymore] = cluster_ID
# Eliminate as potential candidates the points that have already
# been used to expand the current cluster by a trail
# of density-reachable points
candidates = np.intersect1d(not_noise_anymore, core_samples, assume_unique = True)
cluster_ID += 1
# Done with building this cluster.
# "cluster_ID" is now labelling the next cluster.
fileh.close()
gc.collect()
return core_samples, labels |
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in self.items()}
return DataFrame(data, index=self.index, columns=self.columns) | Convert to dense DataFrame
Returns
-------
df : DataFrame | Below is the the instruction that describes the task:
### Input:
Convert to dense DataFrame
Returns
-------
df : DataFrame
### Response:
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in self.items()}
return DataFrame(data, index=self.index, columns=self.columns) |
def with_name(self, name):
"""Return a new URL with name (last part of path) replaced.
Query and fragment parts are cleaned up.
Name is encoded if needed.
"""
# N.B. DOES cleanup query/fragment
if not isinstance(name, str):
raise TypeError("Invalid name type")
if "/" in name:
raise ValueError("Slash in name is not allowed")
name = self._PATH_QUOTER(name)
if name in (".", ".."):
raise ValueError(". and .. values are forbidden")
parts = list(self.raw_parts)
if self.is_absolute():
if len(parts) == 1:
parts.append(name)
else:
parts[-1] = name
parts[0] = "" # replace leading '/'
else:
parts[-1] = name
if parts[0] == "/":
parts[0] = "" # replace leading '/'
return URL(
self._val._replace(path="/".join(parts), query="", fragment=""),
encoded=True,
) | Return a new URL with name (last part of path) replaced.
Query and fragment parts are cleaned up.
Name is encoded if needed. | Below is the the instruction that describes the task:
### Input:
Return a new URL with name (last part of path) replaced.
Query and fragment parts are cleaned up.
Name is encoded if needed.
### Response:
def with_name(self, name):
"""Return a new URL with name (last part of path) replaced.
Query and fragment parts are cleaned up.
Name is encoded if needed.
"""
# N.B. DOES cleanup query/fragment
if not isinstance(name, str):
raise TypeError("Invalid name type")
if "/" in name:
raise ValueError("Slash in name is not allowed")
name = self._PATH_QUOTER(name)
if name in (".", ".."):
raise ValueError(". and .. values are forbidden")
parts = list(self.raw_parts)
if self.is_absolute():
if len(parts) == 1:
parts.append(name)
else:
parts[-1] = name
parts[0] = "" # replace leading '/'
else:
parts[-1] = name
if parts[0] == "/":
parts[0] = "" # replace leading '/'
return URL(
self._val._replace(path="/".join(parts), query="", fragment=""),
encoded=True,
) |
def merge_items(items):
# type: (List[AbstractType]) -> List[AbstractType]
"""Merge union items that can be merged."""
result = []
while items:
item = items.pop()
merged = None
for i, other in enumerate(items):
merged = merged_type(item, other)
if merged:
break
if merged:
del items[i]
items.append(merged)
else:
result.append(item)
return list(reversed(result)) | Merge union items that can be merged. | Below is the the instruction that describes the task:
### Input:
Merge union items that can be merged.
### Response:
def merge_items(items):
# type: (List[AbstractType]) -> List[AbstractType]
"""Merge union items that can be merged."""
result = []
while items:
item = items.pop()
merged = None
for i, other in enumerate(items):
merged = merged_type(item, other)
if merged:
break
if merged:
del items[i]
items.append(merged)
else:
result.append(item)
return list(reversed(result)) |
def write_to(self, group, append=False):
"""Write stored features to a given group"""
if self.sparsetodense:
self.data = [x.todense() if sp.issparse(x) else x
for x in self.data]
nframes = sum([d.shape[0] for d in self.data])
dim = self._group_dim(group)
feats = np.concatenate(self.data, axis=0)
if append:
nframes_group = group[self.name].shape[0]
group[self.name].resize(nframes_group + nframes, axis=0)
if dim == 1:
group[self.name][nframes_group:] = feats
else:
group[self.name][nframes_group:, :] = feats
else:
group[self.name].resize(nframes, axis=0)
group[self.name][...] = feats if dim == 1 else feats | Write stored features to a given group | Below is the the instruction that describes the task:
### Input:
Write stored features to a given group
### Response:
def write_to(self, group, append=False):
"""Write stored features to a given group"""
if self.sparsetodense:
self.data = [x.todense() if sp.issparse(x) else x
for x in self.data]
nframes = sum([d.shape[0] for d in self.data])
dim = self._group_dim(group)
feats = np.concatenate(self.data, axis=0)
if append:
nframes_group = group[self.name].shape[0]
group[self.name].resize(nframes_group + nframes, axis=0)
if dim == 1:
group[self.name][nframes_group:] = feats
else:
group[self.name][nframes_group:, :] = feats
else:
group[self.name].resize(nframes, axis=0)
group[self.name][...] = feats if dim == 1 else feats |
def set_schedule(self, data):
"""Sets the schedule for the given day. """
value = Schedule.build(data)
self._conn.make_request(PROP_WRITE_HANDLE, value) | Sets the schedule for the given day. | Below is the the instruction that describes the task:
### Input:
Sets the schedule for the given day.
### Response:
def set_schedule(self, data):
"""Sets the schedule for the given day. """
value = Schedule.build(data)
self._conn.make_request(PROP_WRITE_HANDLE, value) |
def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while state.in_queue.qsize():
obj = state.in_queue.get()
if isinstance(obj, MPImageData):
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if self.need_redraw:
self.redraw() | the redraw timer ensures we show new map tiles as they
are downloaded | Below is the the instruction that describes the task:
### Input:
the redraw timer ensures we show new map tiles as they
are downloaded
### Response:
def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while state.in_queue.qsize():
obj = state.in_queue.get()
if isinstance(obj, MPImageData):
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if self.need_redraw:
self.redraw() |
def get_wrap_size_limit(self, output_size, conf_req=True, qop_req=C.GSS_C_QOP_DEFAULT):
"""
Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of
the resulting wrapped token (message plus wrapping overhead) is no more than a given
maximum output size.
:param output_size: The maximum output size (in bytes) of a wrapped token
:type output_size: int
:param conf_req: Whether to calculate the wrapping overhead for confidentiality protection
(if True) or just integrity protection (if False).
:type conf_req: bool
:returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap`
:rtype: int
"""
minor_status = ffi.new('OM_uint32[1]')
max_input_size = ffi.new('OM_uint32[1]')
retval = C.gss_wrap_size_limit(
minor_status,
self._ctx[0],
ffi.cast('int', conf_req),
ffi.cast('gss_qop_t', qop_req),
ffi.cast('OM_uint32', output_size),
max_input_size
)
if GSS_ERROR(retval):
if minor_status[0] and self.mech_type:
raise _exception_for_status(retval, minor_status[0], self.mech_type)
else:
raise _exception_for_status(retval, minor_status[0])
return max_input_size[0] | Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of
the resulting wrapped token (message plus wrapping overhead) is no more than a given
maximum output size.
:param output_size: The maximum output size (in bytes) of a wrapped token
:type output_size: int
:param conf_req: Whether to calculate the wrapping overhead for confidentiality protection
(if True) or just integrity protection (if False).
:type conf_req: bool
:returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap`
:rtype: int | Below is the the instruction that describes the task:
### Input:
Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of
the resulting wrapped token (message plus wrapping overhead) is no more than a given
maximum output size.
:param output_size: The maximum output size (in bytes) of a wrapped token
:type output_size: int
:param conf_req: Whether to calculate the wrapping overhead for confidentiality protection
(if True) or just integrity protection (if False).
:type conf_req: bool
:returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap`
:rtype: int
### Response:
def get_wrap_size_limit(self, output_size, conf_req=True, qop_req=C.GSS_C_QOP_DEFAULT):
"""
Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of
the resulting wrapped token (message plus wrapping overhead) is no more than a given
maximum output size.
:param output_size: The maximum output size (in bytes) of a wrapped token
:type output_size: int
:param conf_req: Whether to calculate the wrapping overhead for confidentiality protection
(if True) or just integrity protection (if False).
:type conf_req: bool
:returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap`
:rtype: int
"""
minor_status = ffi.new('OM_uint32[1]')
max_input_size = ffi.new('OM_uint32[1]')
retval = C.gss_wrap_size_limit(
minor_status,
self._ctx[0],
ffi.cast('int', conf_req),
ffi.cast('gss_qop_t', qop_req),
ffi.cast('OM_uint32', output_size),
max_input_size
)
if GSS_ERROR(retval):
if minor_status[0] and self.mech_type:
raise _exception_for_status(retval, minor_status[0], self.mech_type)
else:
raise _exception_for_status(retval, minor_status[0])
return max_input_size[0] |
def _create_evidence(self, edge_id):
"""Create Evidence object for a specific edge/Statement in the network.
Parameters
----------
edge_id : int
ID of the edge in the underlying NDEx network.
"""
pmids = None
edge_attr = self._edge_attributes.get(edge_id)
if edge_attr:
pmids = edge_attr.get('pmids')
if not pmids:
return [Evidence(source_api='ndex',
source_id=self._network_info['externalId'],
annotations={'edge_id': edge_id})]
else:
evidence = []
for pmid in pmids:
evidence.append(
Evidence(source_api='ndex',
source_id=self._network_info['externalId'],
pmid=pmid,
annotations={'edge_id': edge_id}))
return evidence | Create Evidence object for a specific edge/Statement in the network.
Parameters
----------
edge_id : int
ID of the edge in the underlying NDEx network. | Below is the the instruction that describes the task:
### Input:
Create Evidence object for a specific edge/Statement in the network.
Parameters
----------
edge_id : int
ID of the edge in the underlying NDEx network.
### Response:
def _create_evidence(self, edge_id):
"""Create Evidence object for a specific edge/Statement in the network.
Parameters
----------
edge_id : int
ID of the edge in the underlying NDEx network.
"""
pmids = None
edge_attr = self._edge_attributes.get(edge_id)
if edge_attr:
pmids = edge_attr.get('pmids')
if not pmids:
return [Evidence(source_api='ndex',
source_id=self._network_info['externalId'],
annotations={'edge_id': edge_id})]
else:
evidence = []
for pmid in pmids:
evidence.append(
Evidence(source_api='ndex',
source_id=self._network_info['externalId'],
pmid=pmid,
annotations={'edge_id': edge_id}))
return evidence |
def choose_args(metadata, config):
"""
Choose database connection arguments.
"""
return dict(
connect_args=choose_connect_args(metadata, config),
echo=config.echo,
max_overflow=config.max_overflow,
pool_size=config.pool_size,
pool_timeout=config.pool_timeout,
) | Choose database connection arguments. | Below is the the instruction that describes the task:
### Input:
Choose database connection arguments.
### Response:
def choose_args(metadata, config):
"""
Choose database connection arguments.
"""
return dict(
connect_args=choose_connect_args(metadata, config),
echo=config.echo,
max_overflow=config.max_overflow,
pool_size=config.pool_size,
pool_timeout=config.pool_timeout,
) |
def __setkey(key):
"""
Set up the key schedule from the encryption key.
"""
global C, D, KS, E
shifts = (1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1)
# First, generate C and D by permuting the key. The lower order bit of each
# 8-bit char is not used, so C and D are only 28 bits apiece.
for i in range(28):
C[i] = key[PC1_C[i] - 1]
D[i] = key[PC1_D[i] - 1]
for i in range(16):
# rotate
for k in range(shifts[i]):
temp = C[0]
for j in range(27):
C[j] = C[j + 1]
C[27] = temp
temp = D[0]
for j in range(27):
D[j] = D[j + 1]
D[27] = temp
# get Ki. Note C and D are concatenated
for j in range(24):
KS[i][j] = C[PC2_C[j] - 1]
KS[i][j + 24] = D[PC2_D[j] - 28 - 1]
# load E with the initial E bit selections
for i in range(48):
E[i] = e2[i] | Set up the key schedule from the encryption key. | Below is the the instruction that describes the task:
### Input:
Set up the key schedule from the encryption key.
### Response:
def __setkey(key):
"""
Set up the key schedule from the encryption key.
"""
global C, D, KS, E
shifts = (1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1)
# First, generate C and D by permuting the key. The lower order bit of each
# 8-bit char is not used, so C and D are only 28 bits apiece.
for i in range(28):
C[i] = key[PC1_C[i] - 1]
D[i] = key[PC1_D[i] - 1]
for i in range(16):
# rotate
for k in range(shifts[i]):
temp = C[0]
for j in range(27):
C[j] = C[j + 1]
C[27] = temp
temp = D[0]
for j in range(27):
D[j] = D[j + 1]
D[27] = temp
# get Ki. Note C and D are concatenated
for j in range(24):
KS[i][j] = C[PC2_C[j] - 1]
KS[i][j + 24] = D[PC2_D[j] - 28 - 1]
# load E with the initial E bit selections
for i in range(48):
E[i] = e2[i] |
def recurrence_coefficients(n, alpha, standardization="normal", symbolic=False):
"""Recurrence coefficients for generalized Laguerre polynomials.
vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k
"""
S = sympy.S if symbolic else lambda x: x
sqrt = sympy.sqrt if symbolic else numpy.sqrt
gamma = sympy.gamma if symbolic else scipy.special.gamma
if standardization == "monic":
p0 = 1
a = n * [1]
b = [2 * k + 1 + alpha for k in range(n)]
c = [k * (k + alpha) for k in range(n)]
c[0] = gamma(alpha + 1)
elif standardization == "classical":
p0 = 1
a = [-S(1) / (k + 1) for k in range(n)]
b = [-S(2 * k + 1 + alpha) / (k + 1) for k in range(n)]
c = [S(k + alpha) / (k + 1) for k in range(n)]
c[0] = numpy.nan
else:
assert (
standardization == "normal"
), "Unknown Laguerre standardization '{}'.".format(
standardization
)
p0 = 1 / sqrt(gamma(alpha + 1))
a = [-1 / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)]
b = [-(2 * k + 1 + alpha) / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)]
c = [sqrt(k * S(k + alpha) / ((k + 1) * (k + 1 + alpha))) for k in range(n)]
c[0] = numpy.nan
return p0, numpy.array(a), numpy.array(b), numpy.array(c) | Recurrence coefficients for generalized Laguerre polynomials.
vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k | Below is the the instruction that describes the task:
### Input:
Recurrence coefficients for generalized Laguerre polynomials.
vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k
### Response:
def recurrence_coefficients(n, alpha, standardization="normal", symbolic=False):
"""Recurrence coefficients for generalized Laguerre polynomials.
vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k
"""
S = sympy.S if symbolic else lambda x: x
sqrt = sympy.sqrt if symbolic else numpy.sqrt
gamma = sympy.gamma if symbolic else scipy.special.gamma
if standardization == "monic":
p0 = 1
a = n * [1]
b = [2 * k + 1 + alpha for k in range(n)]
c = [k * (k + alpha) for k in range(n)]
c[0] = gamma(alpha + 1)
elif standardization == "classical":
p0 = 1
a = [-S(1) / (k + 1) for k in range(n)]
b = [-S(2 * k + 1 + alpha) / (k + 1) for k in range(n)]
c = [S(k + alpha) / (k + 1) for k in range(n)]
c[0] = numpy.nan
else:
assert (
standardization == "normal"
), "Unknown Laguerre standardization '{}'.".format(
standardization
)
p0 = 1 / sqrt(gamma(alpha + 1))
a = [-1 / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)]
b = [-(2 * k + 1 + alpha) / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)]
c = [sqrt(k * S(k + alpha) / ((k + 1) * (k + 1 + alpha))) for k in range(n)]
c[0] = numpy.nan
return p0, numpy.array(a), numpy.array(b), numpy.array(c) |
def methodcaller(name, *args):
"""
Upstream bug in python:
https://bugs.python.org/issue26822
"""
func = operator.methodcaller(name, *args)
return lambda obj, **kwargs: func(obj) | Upstream bug in python:
https://bugs.python.org/issue26822 | Below is the the instruction that describes the task:
### Input:
Upstream bug in python:
https://bugs.python.org/issue26822
### Response:
def methodcaller(name, *args):
"""
Upstream bug in python:
https://bugs.python.org/issue26822
"""
func = operator.methodcaller(name, *args)
return lambda obj, **kwargs: func(obj) |
def preserve_context(f):
"""
Package up the given function with the current Eliot context, and then
restore context and call given function when the resulting callable is
run. This allows continuing the action context within a different thread.
The result should only be used once, since it relies on
L{Action.serialize_task_id} whose results should only be deserialized
once.
@param f: A callable.
@return: One-time use callable that calls given function in context of
a child of current Eliot action.
"""
action = current_action()
if action is None:
return f
task_id = action.serialize_task_id()
called = threading.Lock()
def restore_eliot_context(*args, **kwargs):
# Make sure the function has not already been called:
if not called.acquire(False):
raise TooManyCalls(f)
with Action.continue_task(task_id=task_id):
return f(*args, **kwargs)
return restore_eliot_context | Package up the given function with the current Eliot context, and then
restore context and call given function when the resulting callable is
run. This allows continuing the action context within a different thread.
The result should only be used once, since it relies on
L{Action.serialize_task_id} whose results should only be deserialized
once.
@param f: A callable.
@return: One-time use callable that calls given function in context of
a child of current Eliot action. | Below is the the instruction that describes the task:
### Input:
Package up the given function with the current Eliot context, and then
restore context and call given function when the resulting callable is
run. This allows continuing the action context within a different thread.
The result should only be used once, since it relies on
L{Action.serialize_task_id} whose results should only be deserialized
once.
@param f: A callable.
@return: One-time use callable that calls given function in context of
a child of current Eliot action.
### Response:
def preserve_context(f):
"""
Package up the given function with the current Eliot context, and then
restore context and call given function when the resulting callable is
run. This allows continuing the action context within a different thread.
The result should only be used once, since it relies on
L{Action.serialize_task_id} whose results should only be deserialized
once.
@param f: A callable.
@return: One-time use callable that calls given function in context of
a child of current Eliot action.
"""
action = current_action()
if action is None:
return f
task_id = action.serialize_task_id()
called = threading.Lock()
def restore_eliot_context(*args, **kwargs):
# Make sure the function has not already been called:
if not called.acquire(False):
raise TooManyCalls(f)
with Action.continue_task(task_id=task_id):
return f(*args, **kwargs)
return restore_eliot_context |
def angularjs(parser, token):
"""
Conditionally switch between AngularJS and Django variable expansion for ``{{`` and ``}}``
keeping Django's expansion for ``{%`` and ``%}``
Usage::
{% angularjs 1 %} or simply {% angularjs %}
{% process variables through the AngularJS template engine %}
{% endangularjs %}
{% angularjs 0 %}
{% process variables through the Django template engine %}
{% endangularjs %}
Instead of 0 and 1, it is possible to use a context variable.
"""
bits = token.contents.split()
if len(bits) < 2:
bits.append('1')
values = [parser.compile_filter(bit) for bit in bits[1:]]
django_nodelist = parser.parse(('endangularjs',))
angular_nodelist = NodeList()
for node in django_nodelist:
# convert all occurrences of VariableNode into a TextNode using the
# AngularJS double curly bracket notation
if isinstance(node, VariableNode):
# convert Django's array notation into JS array notation
tokens = node.filter_expression.token.split('.')
token = tokens[0]
for part in tokens[1:]:
if part.isdigit():
token += '[%s]' % part
else:
token += '.%s' % part
node = TextNode('{{ %s }}' % token)
angular_nodelist.append(node)
parser.delete_first_token()
return AngularJsNode(django_nodelist, angular_nodelist, values[0]) | Conditionally switch between AngularJS and Django variable expansion for ``{{`` and ``}}``
keeping Django's expansion for ``{%`` and ``%}``
Usage::
{% angularjs 1 %} or simply {% angularjs %}
{% process variables through the AngularJS template engine %}
{% endangularjs %}
{% angularjs 0 %}
{% process variables through the Django template engine %}
{% endangularjs %}
Instead of 0 and 1, it is possible to use a context variable. | Below is the the instruction that describes the task:
### Input:
Conditionally switch between AngularJS and Django variable expansion for ``{{`` and ``}}``
keeping Django's expansion for ``{%`` and ``%}``
Usage::
{% angularjs 1 %} or simply {% angularjs %}
{% process variables through the AngularJS template engine %}
{% endangularjs %}
{% angularjs 0 %}
{% process variables through the Django template engine %}
{% endangularjs %}
Instead of 0 and 1, it is possible to use a context variable.
### Response:
def angularjs(parser, token):
"""
Conditionally switch between AngularJS and Django variable expansion for ``{{`` and ``}}``
keeping Django's expansion for ``{%`` and ``%}``
Usage::
{% angularjs 1 %} or simply {% angularjs %}
{% process variables through the AngularJS template engine %}
{% endangularjs %}
{% angularjs 0 %}
{% process variables through the Django template engine %}
{% endangularjs %}
Instead of 0 and 1, it is possible to use a context variable.
"""
bits = token.contents.split()
if len(bits) < 2:
bits.append('1')
values = [parser.compile_filter(bit) for bit in bits[1:]]
django_nodelist = parser.parse(('endangularjs',))
angular_nodelist = NodeList()
for node in django_nodelist:
# convert all occurrences of VariableNode into a TextNode using the
# AngularJS double curly bracket notation
if isinstance(node, VariableNode):
# convert Django's array notation into JS array notation
tokens = node.filter_expression.token.split('.')
token = tokens[0]
for part in tokens[1:]:
if part.isdigit():
token += '[%s]' % part
else:
token += '.%s' % part
node = TextNode('{{ %s }}' % token)
angular_nodelist.append(node)
parser.delete_first_token()
return AngularJsNode(django_nodelist, angular_nodelist, values[0]) |
def write_locked(*args, **kwargs):
"""Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance object this
decorator is attached to.
"""
def decorator(f):
attr_name = kwargs.get('lock', '_lock')
@six.wraps(f)
def wrapper(self, *args, **kwargs):
rw_lock = getattr(self, attr_name)
with rw_lock.write_lock():
return f(self, *args, **kwargs)
return wrapper
# This is needed to handle when the decorator has args or the decorator
# doesn't have args, python is rather weird here...
if kwargs or not args:
return decorator
else:
if len(args) == 1:
return decorator(args[0])
else:
return decorator | Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance object this
decorator is attached to. | Below is the the instruction that describes the task:
### Input:
Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance object this
decorator is attached to.
### Response:
def write_locked(*args, **kwargs):
"""Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance object this
decorator is attached to.
"""
def decorator(f):
attr_name = kwargs.get('lock', '_lock')
@six.wraps(f)
def wrapper(self, *args, **kwargs):
rw_lock = getattr(self, attr_name)
with rw_lock.write_lock():
return f(self, *args, **kwargs)
return wrapper
# This is needed to handle when the decorator has args or the decorator
# doesn't have args, python is rather weird here...
if kwargs or not args:
return decorator
else:
if len(args) == 1:
return decorator(args[0])
else:
return decorator |
def get_rel_path_fragment(self, doc_id):
"""For `doc_id` returns the path from the
repo to the doc file. This is useful because
(if you know the remote), it lets you construct the full path.
"""
with self._index_lock:
r = self._doc_index[doc_id]
fp = r[-1]
return fp[(len(self.path) + 1):] | For `doc_id` returns the path from the
repo to the doc file. This is useful because
(if you know the remote), it lets you construct the full path. | Below is the the instruction that describes the task:
### Input:
For `doc_id` returns the path from the
repo to the doc file. This is useful because
(if you know the remote), it lets you construct the full path.
### Response:
def get_rel_path_fragment(self, doc_id):
"""For `doc_id` returns the path from the
repo to the doc file. This is useful because
(if you know the remote), it lets you construct the full path.
"""
with self._index_lock:
r = self._doc_index[doc_id]
fp = r[-1]
return fp[(len(self.path) + 1):] |
def _mk_imp_override(srcname, replacement):
"""
Create a simple one for one replacement for a module
:param srcname: The source module name to replace
:param replacement: The object which should act as the replacement
"""
class DummyImporter(object):
def find_module(self, fullname, path):
if fullname == srcname:
return self
return None
def load_module(self, fullname):
if fullname != srcname:
raise ImportError
sys.modules[fullname] = replacement
return replacement
obj = DummyImporter()
sys.meta_path.append(obj) | Create a simple one for one replacement for a module
:param srcname: The source module name to replace
:param replacement: The object which should act as the replacement | Below is the the instruction that describes the task:
### Input:
Create a simple one for one replacement for a module
:param srcname: The source module name to replace
:param replacement: The object which should act as the replacement
### Response:
def _mk_imp_override(srcname, replacement):
"""
Create a simple one for one replacement for a module
:param srcname: The source module name to replace
:param replacement: The object which should act as the replacement
"""
class DummyImporter(object):
def find_module(self, fullname, path):
if fullname == srcname:
return self
return None
def load_module(self, fullname):
if fullname != srcname:
raise ImportError
sys.modules[fullname] = replacement
return replacement
obj = DummyImporter()
sys.meta_path.append(obj) |
def to_bytearray(self):
"""
Convert this frame into its bytearray representation.
:return: bytearray representation of this frame.
"""
header = bytearray(struct.pack(
'>HHHBB',
self.transaction_id,
self.protocol_id,
self.length,
self.unit_id,
self.fcode
))
return header + self.data | Convert this frame into its bytearray representation.
:return: bytearray representation of this frame. | Below is the the instruction that describes the task:
### Input:
Convert this frame into its bytearray representation.
:return: bytearray representation of this frame.
### Response:
def to_bytearray(self):
"""
Convert this frame into its bytearray representation.
:return: bytearray representation of this frame.
"""
header = bytearray(struct.pack(
'>HHHBB',
self.transaction_id,
self.protocol_id,
self.length,
self.unit_id,
self.fcode
))
return header + self.data |
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(':', '.')
try:
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
raise e | Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object | Below is the the instruction that describes the task:
### Input:
Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
### Response:
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(':', '.')
try:
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
raise e |
def geos2cf(area):
"""Return the cf grid mapping for the geos projection."""
proj_dict = area.proj_dict
args = dict(perspective_point_height=proj_dict.get('h'),
latitude_of_projection_origin=proj_dict.get('lat_0'),
longitude_of_projection_origin=proj_dict.get('lon_0'),
grid_mapping_name='geostationary',
semi_major_axis=proj_dict.get('a'),
semi_minor_axis=proj_dict.get('b'),
sweep_axis=proj_dict.get('sweep'),
)
return args | Return the cf grid mapping for the geos projection. | Below is the the instruction that describes the task:
### Input:
Return the cf grid mapping for the geos projection.
### Response:
def geos2cf(area):
"""Return the cf grid mapping for the geos projection."""
proj_dict = area.proj_dict
args = dict(perspective_point_height=proj_dict.get('h'),
latitude_of_projection_origin=proj_dict.get('lat_0'),
longitude_of_projection_origin=proj_dict.get('lon_0'),
grid_mapping_name='geostationary',
semi_major_axis=proj_dict.get('a'),
semi_minor_axis=proj_dict.get('b'),
sweep_axis=proj_dict.get('sweep'),
)
return args |
def updateReplicationMetadata(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
"""See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.updateReplicationMetadataResponse(
pid, replicaMetadata, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns: | Below is the the instruction that describes the task:
### Input:
See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
### Response:
def updateReplicationMetadata(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
"""See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.updateReplicationMetadataResponse(
pid, replicaMetadata, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) |
def get_taskruns(project_id, limit=100, offset=0, last_id=None):
"""Return a list of task runs for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of task runs for the given project ID
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
params = dict(limit=limit, offset=offset)
print(OFFSET_WARNING)
params['project_id'] = project_id
try:
res = _pybossa_req('get', 'taskrun',
params=params)
if type(res).__name__ == 'list':
return [TaskRun(taskrun) for taskrun in res]
else:
raise TypeError
except:
raise | Return a list of task runs for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of task runs for the given project ID | Below is the the instruction that describes the task:
### Input:
Return a list of task runs for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of task runs for the given project ID
### Response:
def get_taskruns(project_id, limit=100, offset=0, last_id=None):
"""Return a list of task runs for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:type offset: integer
:param last_id: id of the last taskrun, used for pagination. If provided, offset is ignored
:type last_id: integer
:rtype: list
:returns: A list of task runs for the given project ID
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
params = dict(limit=limit, offset=offset)
print(OFFSET_WARNING)
params['project_id'] = project_id
try:
res = _pybossa_req('get', 'taskrun',
params=params)
if type(res).__name__ == 'list':
return [TaskRun(taskrun) for taskrun in res]
else:
raise TypeError
except:
raise |
def get_suggestions_with_size(object, size):
""" Gets a list with a certain size of suggestions for an object """
content_type = ContentType.objects.get_for_model(type(object))
try:
return ObjectViewDictionary.objects.filter(
current_object_id=object.id,
current_content_type=content_type).extra(
order_by=['-visits'])[:size]
except:
return ObjectViewDictionary.objects.filter(
current_object_id=object.id,
current_content_type=content_type).extra(order_by=['-visits']) | Gets a list with a certain size of suggestions for an object | Below is the the instruction that describes the task:
### Input:
Gets a list with a certain size of suggestions for an object
### Response:
def get_suggestions_with_size(object, size):
""" Gets a list with a certain size of suggestions for an object """
content_type = ContentType.objects.get_for_model(type(object))
try:
return ObjectViewDictionary.objects.filter(
current_object_id=object.id,
current_content_type=content_type).extra(
order_by=['-visits'])[:size]
except:
return ObjectViewDictionary.objects.filter(
current_object_id=object.id,
current_content_type=content_type).extra(order_by=['-visits']) |
def buy(self, quantity, **kwargs):
""" Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity
"""
self.parent.order("BUY", self, quantity=quantity, **kwargs) | Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity | Below is the the instruction that describes the task:
### Input:
Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity
### Response:
def buy(self, quantity, **kwargs):
""" Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity
"""
self.parent.order("BUY", self, quantity=quantity, **kwargs) |
def get_structure_with_only_magnetic_atoms(self, make_primitive=True):
"""
Returns a Structure with only magnetic atoms present.
:return: Structure
"""
sites = [site for site in self.structure if abs(site.properties["magmom"]) > 0]
structure = Structure.from_sites(sites)
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
return structure | Returns a Structure with only magnetic atoms present.
:return: Structure | Below is the the instruction that describes the task:
### Input:
Returns a Structure with only magnetic atoms present.
:return: Structure
### Response:
def get_structure_with_only_magnetic_atoms(self, make_primitive=True):
"""
Returns a Structure with only magnetic atoms present.
:return: Structure
"""
sites = [site for site in self.structure if abs(site.properties["magmom"]) > 0]
structure = Structure.from_sites(sites)
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
return structure |
def _get_motor_parameters(json_file):
"""Returns a dictionary with joints as keys, and a description (dict) of each joint as value"""
with open(json_file) as motor_fd:
global_config = json.load(motor_fd)
motors = global_config["motors"]
# Returned dict
motor_config = {}
# Add motor to the config
for motor in motors:
motor_config[motor] = motors[motor]
return motor_config | Returns a dictionary with joints as keys, and a description (dict) of each joint as value | Below is the the instruction that describes the task:
### Input:
Returns a dictionary with joints as keys, and a description (dict) of each joint as value
### Response:
def _get_motor_parameters(json_file):
"""Returns a dictionary with joints as keys, and a description (dict) of each joint as value"""
with open(json_file) as motor_fd:
global_config = json.load(motor_fd)
motors = global_config["motors"]
# Returned dict
motor_config = {}
# Add motor to the config
for motor in motors:
motor_config[motor] = motors[motor]
return motor_config |
def market_open(self, session, mins) -> Session:
"""
Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
start_time = self.exch[session][0]
return Session(start_time, shift_time(start_time, int(mins))) | Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time | Below is the the instruction that describes the task:
### Input:
Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time
### Response:
def market_open(self, session, mins) -> Session:
"""
Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
start_time = self.exch[session][0]
return Session(start_time, shift_time(start_time, int(mins))) |
def compute_capture(args):
x, y, w, h, params = args
"""Callable function for the multiprocessing pool."""
return x, y, mandelbrot_capture(x, y, w, h, params) | Callable function for the multiprocessing pool. | Below is the the instruction that describes the task:
### Input:
Callable function for the multiprocessing pool.
### Response:
def compute_capture(args):
x, y, w, h, params = args
"""Callable function for the multiprocessing pool."""
return x, y, mandelbrot_capture(x, y, w, h, params) |
def check_genes(self):
""" Assert that every DNA choice is represented by exactly one gene. """
gene_dna_set = set([g.dna for g in self.genes])
assert gene_dna_set == self.dna_choices_set | Assert that every DNA choice is represented by exactly one gene. | Below is the the instruction that describes the task:
### Input:
Assert that every DNA choice is represented by exactly one gene.
### Response:
def check_genes(self):
""" Assert that every DNA choice is represented by exactly one gene. """
gene_dna_set = set([g.dna for g in self.genes])
assert gene_dna_set == self.dna_choices_set |
def _fetch_metric(self, metric_name):
"""Fetch all the values of a named metric, and add them to _data
"""
request = {
'Namespace': self.CLOUDWATCH_NAMESPACE,
'MetricName': metric_name,
'Dimensions': [
{
'Name': 'TrainingJobName',
'Value': self.name
}
],
'StartTime': self._time_interval['start_time'],
'EndTime': self._time_interval['end_time'],
'Period': self._period,
'Statistics': ['Average'],
}
raw_cwm_data = self._cloudwatch.get_metric_statistics(**request)['Datapoints']
if len(raw_cwm_data) == 0:
logging.warning("Warning: No metrics called %s found" % metric_name)
return
# Process data: normalize to starting time, and sort.
base_time = min(raw_cwm_data, key=lambda pt: pt['Timestamp'])['Timestamp']
all_xy = []
for pt in raw_cwm_data:
y = pt['Average']
x = (pt['Timestamp'] - base_time).total_seconds()
all_xy.append([x, y])
all_xy = sorted(all_xy, key=lambda x: x[0])
# Store everything in _data to make a dataframe from
for elapsed_seconds, value in all_xy:
self._add_single_metric(elapsed_seconds, metric_name, value) | Fetch all the values of a named metric, and add them to _data | Below is the the instruction that describes the task:
### Input:
Fetch all the values of a named metric, and add them to _data
### Response:
def _fetch_metric(self, metric_name):
"""Fetch all the values of a named metric, and add them to _data
"""
request = {
'Namespace': self.CLOUDWATCH_NAMESPACE,
'MetricName': metric_name,
'Dimensions': [
{
'Name': 'TrainingJobName',
'Value': self.name
}
],
'StartTime': self._time_interval['start_time'],
'EndTime': self._time_interval['end_time'],
'Period': self._period,
'Statistics': ['Average'],
}
raw_cwm_data = self._cloudwatch.get_metric_statistics(**request)['Datapoints']
if len(raw_cwm_data) == 0:
logging.warning("Warning: No metrics called %s found" % metric_name)
return
# Process data: normalize to starting time, and sort.
base_time = min(raw_cwm_data, key=lambda pt: pt['Timestamp'])['Timestamp']
all_xy = []
for pt in raw_cwm_data:
y = pt['Average']
x = (pt['Timestamp'] - base_time).total_seconds()
all_xy.append([x, y])
all_xy = sorted(all_xy, key=lambda x: x[0])
# Store everything in _data to make a dataframe from
for elapsed_seconds, value in all_xy:
self._add_single_metric(elapsed_seconds, metric_name, value) |
def conversion_handler(self, name):
u"""
Возвращает обработчик конвертации с указанным именем
:param name: Имя обработчика
:return: callable
"""
try:
handler = self.conversion_table[name]
except KeyError:
raise KeyError((
u'Конвертирующий тип с именем {} отсутствует '
u'в таблице соответствия!'
).format(name))
return handler | u"""
Возвращает обработчик конвертации с указанным именем
:param name: Имя обработчика
:return: callable | Below is the the instruction that describes the task:
### Input:
u"""
Возвращает обработчик конвертации с указанным именем
:param name: Имя обработчика
:return: callable
### Response:
def conversion_handler(self, name):
u"""
Возвращает обработчик конвертации с указанным именем
:param name: Имя обработчика
:return: callable
"""
try:
handler = self.conversion_table[name]
except KeyError:
raise KeyError((
u'Конвертирующий тип с именем {} отсутствует '
u'в таблице соответствия!'
).format(name))
return handler |
def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):
"""
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.file.key.Key`
:returns: An instance of the newly created key object
"""
if key_name == '-':
return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)
else:
dir_name = os.path.dirname(key_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
fp = open(key_name, 'wb')
return Key(self.name, key_name, fp) | Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.file.key.Key`
:returns: An instance of the newly created key object | Below is the the instruction that describes the task:
### Input:
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.file.key.Key`
:returns: An instance of the newly created key object
### Response:
def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):
"""
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.file.key.Key`
:returns: An instance of the newly created key object
"""
if key_name == '-':
return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)
else:
dir_name = os.path.dirname(key_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
fp = open(key_name, 'wb')
return Key(self.name, key_name, fp) |
def taper(self):
"""Taper the spectrum by adding zero throughput to each end.
This is similar to :meth:`TabularSourceSpectrum.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-throughput entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutElement : `TabularSpectralElement`
Tapered spectrum.
"""
OutElement = TabularSpectralElement()
wcopy = N.zeros(self._wavetable.size + 2, dtype=N.float64)
fcopy = N.zeros(self._throughputtable.size + 2, dtype=N.float64)
wcopy[1:-1] = self._wavetable
fcopy[1:-1] = self._throughputtable
fcopy[0] = 0.0
fcopy[-1] = 0.0
# The wavelengths to use for the first and last points are
# calculated by using the same ratio as for the 2 interior points
wcopy[0] = wcopy[1]*wcopy[1]/wcopy[2]
wcopy[-1] = wcopy[-2]*wcopy[-2]/wcopy[-3]
OutElement._wavetable = wcopy
OutElement._throughputtable = fcopy
return OutElement | Taper the spectrum by adding zero throughput to each end.
This is similar to :meth:`TabularSourceSpectrum.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-throughput entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutElement : `TabularSpectralElement`
Tapered spectrum. | Below is the the instruction that describes the task:
### Input:
Taper the spectrum by adding zero throughput to each end.
This is similar to :meth:`TabularSourceSpectrum.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-throughput entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutElement : `TabularSpectralElement`
Tapered spectrum.
### Response:
def taper(self):
"""Taper the spectrum by adding zero throughput to each end.
This is similar to :meth:`TabularSourceSpectrum.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-throughput entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutElement : `TabularSpectralElement`
Tapered spectrum.
"""
OutElement = TabularSpectralElement()
wcopy = N.zeros(self._wavetable.size + 2, dtype=N.float64)
fcopy = N.zeros(self._throughputtable.size + 2, dtype=N.float64)
wcopy[1:-1] = self._wavetable
fcopy[1:-1] = self._throughputtable
fcopy[0] = 0.0
fcopy[-1] = 0.0
# The wavelengths to use for the first and last points are
# calculated by using the same ratio as for the 2 interior points
wcopy[0] = wcopy[1]*wcopy[1]/wcopy[2]
wcopy[-1] = wcopy[-2]*wcopy[-2]/wcopy[-3]
OutElement._wavetable = wcopy
OutElement._throughputtable = fcopy
return OutElement |
def compile_model(self, target_instance_family, input_shape, output_path, framework=None, framework_version=None,
compile_max_run=5 * 60, tags=None, **kwargs):
"""Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
"""
if target_instance_family not in NEO_ALLOWED_TARGET_INSTANCE_FAMILY:
raise ValueError("Please use valid target_instance_family,"
"allowed values: {}".format(NEO_ALLOWED_TARGET_INSTANCE_FAMILY))
if framework and framework not in NEO_ALLOWED_FRAMEWORKS:
raise ValueError("Please use valid framework, allowed values: {}".format(NEO_ALLOWED_FRAMEWORKS))
if (framework is None) != (framework_version is None):
raise ValueError("You should provide framework and framework_version at the same time.")
model = self.create_model(**kwargs)
self._compiled_models[target_instance_family] = model.compile(target_instance_family,
input_shape,
output_path,
self.role,
tags,
self._compilation_job_name(),
compile_max_run,
framework=framework,
framework_version=framework_version)
return self._compiled_models[target_instance_family] | Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details. | Below is the the instruction that describes the task:
### Input:
Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
### Response:
def compile_model(self, target_instance_family, input_shape, output_path, framework=None, framework_version=None,
compile_max_run=5 * 60, tags=None, **kwargs):
"""Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to run your model after compilation, for
example: ml_c5. Allowed strings are: ml_c5, ml_m5, ml_c4, ml_m4, jetsontx1, jetsontx2, ml_p2, ml_p3,
deeplens, rasp3b
input_shape (dict): Specifies the name and shape of the expected inputs for your trained model in json
dictionary form, for example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28], 'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original model. Allowed values: 'mxnet',
'tensorflow', 'pytorch', 'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default: 3 * 60).
After this amount of time Amazon SageMaker Neo terminates the compilation job regardless of its
current status.
tags (list[dict]): List of tags for labeling a compilation job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
"""
if target_instance_family not in NEO_ALLOWED_TARGET_INSTANCE_FAMILY:
raise ValueError("Please use valid target_instance_family,"
"allowed values: {}".format(NEO_ALLOWED_TARGET_INSTANCE_FAMILY))
if framework and framework not in NEO_ALLOWED_FRAMEWORKS:
raise ValueError("Please use valid framework, allowed values: {}".format(NEO_ALLOWED_FRAMEWORKS))
if (framework is None) != (framework_version is None):
raise ValueError("You should provide framework and framework_version at the same time.")
model = self.create_model(**kwargs)
self._compiled_models[target_instance_family] = model.compile(target_instance_family,
input_shape,
output_path,
self.role,
tags,
self._compilation_job_name(),
compile_max_run,
framework=framework,
framework_version=framework_version)
return self._compiled_models[target_instance_family] |
def add_items(self, data_dict, root=None, target=None):
""" add items for tree
:param data_dict: dict of tree data
:param root: treeitemid of tree root
:param target: treectrl obj
"""
if root is None:
print("Warning: TreeCtrl root must be given.")
return
if target is None:
print("Warning: TreeCtrl target must be given.")
return
for k in sorted(data_dict):
if isinstance(data_dict[k], dict):
k_item_root = target.AppendItem(root, k)
self.add_items(data_dict[k], k_item_root, target)
else:
item_val = ' : '.join((k, str(data_dict[k])))
target.AppendItem(root, item_val) | add items for tree
:param data_dict: dict of tree data
:param root: treeitemid of tree root
:param target: treectrl obj | Below is the the instruction that describes the task:
### Input:
add items for tree
:param data_dict: dict of tree data
:param root: treeitemid of tree root
:param target: treectrl obj
### Response:
def add_items(self, data_dict, root=None, target=None):
""" add items for tree
:param data_dict: dict of tree data
:param root: treeitemid of tree root
:param target: treectrl obj
"""
if root is None:
print("Warning: TreeCtrl root must be given.")
return
if target is None:
print("Warning: TreeCtrl target must be given.")
return
for k in sorted(data_dict):
if isinstance(data_dict[k], dict):
k_item_root = target.AppendItem(root, k)
self.add_items(data_dict[k], k_item_root, target)
else:
item_val = ' : '.join((k, str(data_dict[k])))
target.AppendItem(root, item_val) |
def addcomment(self, order_increment_id,
status, comment=None, notify=False):
"""
Add comment to order or change its state
:param order_increment_id: Order ID
TODO: Identify possible values for status
"""
if comment is None:
comment = ""
return bool(self.call(
'sales_order.addComment',
[order_increment_id, status, comment, notify]
)
) | Add comment to order or change its state
:param order_increment_id: Order ID
TODO: Identify possible values for status | Below is the the instruction that describes the task:
### Input:
Add comment to order or change its state
:param order_increment_id: Order ID
TODO: Identify possible values for status
### Response:
def addcomment(self, order_increment_id,
status, comment=None, notify=False):
"""
Add comment to order or change its state
:param order_increment_id: Order ID
TODO: Identify possible values for status
"""
if comment is None:
comment = ""
return bool(self.call(
'sales_order.addComment',
[order_increment_id, status, comment, notify]
)
) |
def is_stream_handler(self, request):
""" Handler for request is stream or not.
:param request: Request object
:return: bool
"""
handler = self.get(request)[0]
if (hasattr(handler, 'view_class') and
hasattr(handler.view_class, request.method.lower())):
handler = getattr(handler.view_class, request.method.lower())
return hasattr(handler, 'is_stream') | Handler for request is stream or not.
:param request: Request object
:return: bool | Below is the the instruction that describes the task:
### Input:
Handler for request is stream or not.
:param request: Request object
:return: bool
### Response:
def is_stream_handler(self, request):
""" Handler for request is stream or not.
:param request: Request object
:return: bool
"""
handler = self.get(request)[0]
if (hasattr(handler, 'view_class') and
hasattr(handler.view_class, request.method.lower())):
handler = getattr(handler.view_class, request.method.lower())
return hasattr(handler, 'is_stream') |
def get_proxy_ticket(self, pgt):
"""Returns proxy ticket given the proxy granting ticket"""
response = requests.get(self.get_proxy_url(pgt))
if response.status_code == 200:
from lxml import etree
root = etree.fromstring(response.content)
tickets = root.xpath(
"//cas:proxyTicket",
namespaces={"cas": "http://www.yale.edu/tp/cas"}
)
if len(tickets) == 1:
return tickets[0].text
errors = root.xpath(
"//cas:authenticationFailure",
namespaces={"cas": "http://www.yale.edu/tp/cas"}
)
if len(errors) == 1:
raise CASError(errors[0].attrib['code'], errors[0].text)
raise CASError("Bad http code %s" % response.status_code) | Returns proxy ticket given the proxy granting ticket | Below is the the instruction that describes the task:
### Input:
Returns proxy ticket given the proxy granting ticket
### Response:
def get_proxy_ticket(self, pgt):
"""Returns proxy ticket given the proxy granting ticket"""
response = requests.get(self.get_proxy_url(pgt))
if response.status_code == 200:
from lxml import etree
root = etree.fromstring(response.content)
tickets = root.xpath(
"//cas:proxyTicket",
namespaces={"cas": "http://www.yale.edu/tp/cas"}
)
if len(tickets) == 1:
return tickets[0].text
errors = root.xpath(
"//cas:authenticationFailure",
namespaces={"cas": "http://www.yale.edu/tp/cas"}
)
if len(errors) == 1:
raise CASError(errors[0].attrib['code'], errors[0].text)
raise CASError("Bad http code %s" % response.status_code) |
def build_url(self):
"""Build appropiate encoded URL.
This implies the same way of searching a torrent as in the page itself.
"""
url = requests.utils.requote_uri(
self.torrent_page + self.string_search)
if self.page == '1337x':
return(url + '/1/')
elif self.page == 'limetorrents':
return(url + '/')
else:
return(url) | Build appropiate encoded URL.
This implies the same way of searching a torrent as in the page itself. | Below is the the instruction that describes the task:
### Input:
Build appropiate encoded URL.
This implies the same way of searching a torrent as in the page itself.
### Response:
def build_url(self):
"""Build appropiate encoded URL.
This implies the same way of searching a torrent as in the page itself.
"""
url = requests.utils.requote_uri(
self.torrent_page + self.string_search)
if self.page == '1337x':
return(url + '/1/')
elif self.page == 'limetorrents':
return(url + '/')
else:
return(url) |
def default_profiler(f, _type, _value):
''' inspects an input frame and pretty prints the following:
<src-path>:<src-line> -> <function-name>
<source-code>
<local-variables>
----------------------------------------
'''
try:
profile_print(
'\n'.join([
get_frame_src(f),
get_locals(f),
'----------------------------------------'
])
)
except:
pass | inspects an input frame and pretty prints the following:
<src-path>:<src-line> -> <function-name>
<source-code>
<local-variables>
---------------------------------------- | Below is the the instruction that describes the task:
### Input:
inspects an input frame and pretty prints the following:
<src-path>:<src-line> -> <function-name>
<source-code>
<local-variables>
----------------------------------------
### Response:
def default_profiler(f, _type, _value):
''' inspects an input frame and pretty prints the following:
<src-path>:<src-line> -> <function-name>
<source-code>
<local-variables>
----------------------------------------
'''
try:
profile_print(
'\n'.join([
get_frame_src(f),
get_locals(f),
'----------------------------------------'
])
)
except:
pass |
def forward(self, inputs, begin_state): # pylint: disable=arguments-differ
"""Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
Returns
--------
out : NDArray
output tensor with shape `(sequence_length, batch_size, vocab_size)`
when `layout` is "TNC".
out_states : list
output recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
"""
encoded = self.embedding(inputs)
length = inputs.shape[0]
batch_size = inputs.shape[1]
encoded, state = self.encoder.unroll(length, encoded, begin_state,
layout='TNC', merge_outputs=True)
encoded = encoded.reshape((-1, self._projection_size))
out = self.decoder(encoded)
out = out.reshape((length, batch_size, -1))
return out, state | Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
Returns
--------
out : NDArray
output tensor with shape `(sequence_length, batch_size, vocab_size)`
when `layout` is "TNC".
out_states : list
output recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)` | Below is the the instruction that describes the task:
### Input:
Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
Returns
--------
out : NDArray
output tensor with shape `(sequence_length, batch_size, vocab_size)`
when `layout` is "TNC".
out_states : list
output recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
### Response:
def forward(self, inputs, begin_state): # pylint: disable=arguments-differ
"""Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
Returns
--------
out : NDArray
output tensor with shape `(sequence_length, batch_size, vocab_size)`
when `layout` is "TNC".
out_states : list
output recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
"""
encoded = self.embedding(inputs)
length = inputs.shape[0]
batch_size = inputs.shape[1]
encoded, state = self.encoder.unroll(length, encoded, begin_state,
layout='TNC', merge_outputs=True)
encoded = encoded.reshape((-1, self._projection_size))
out = self.decoder(encoded)
out = out.reshape((length, batch_size, -1))
return out, state |
def is_dev_version(cls):
"""
Check if the current branch is `dev`.
"""
# We initiate the command we have to run in order to
# get the branch we are currently working with.
command = "git branch"
# We execute and get the command output.
command_result = Command(command).execute()
for branch in command_result.split("\n"):
# We loop through each line of the command output.
if branch.startswith("*") and "dev" in branch:
# The current branch is `dev`.
# We return True.
return True
# The current branch is not `dev`.
# We return False.
return False | Check if the current branch is `dev`. | Below is the the instruction that describes the task:
### Input:
Check if the current branch is `dev`.
### Response:
def is_dev_version(cls):
"""
Check if the current branch is `dev`.
"""
# We initiate the command we have to run in order to
# get the branch we are currently working with.
command = "git branch"
# We execute and get the command output.
command_result = Command(command).execute()
for branch in command_result.split("\n"):
# We loop through each line of the command output.
if branch.startswith("*") and "dev" in branch:
# The current branch is `dev`.
# We return True.
return True
# The current branch is not `dev`.
# We return False.
return False |
def make_filenames(self, **kwargs):
""" Make a dictionary of filenames for various types
"""
out_dict = dict(ft1file=self.ft1file(**kwargs),
ltcube=self.ltcube(**kwargs),
ccube=self.ccube(**kwargs),
bexpcube=self.bexpcube(**kwargs),
srcmaps=self.srcmaps(**kwargs),
mcube=self.mcube(**kwargs))
return out_dict | Make a dictionary of filenames for various types | Below is the the instruction that describes the task:
### Input:
Make a dictionary of filenames for various types
### Response:
def make_filenames(self, **kwargs):
""" Make a dictionary of filenames for various types
"""
out_dict = dict(ft1file=self.ft1file(**kwargs),
ltcube=self.ltcube(**kwargs),
ccube=self.ccube(**kwargs),
bexpcube=self.bexpcube(**kwargs),
srcmaps=self.srcmaps(**kwargs),
mcube=self.mcube(**kwargs))
return out_dict |
def get_balance_on(self, on_date: datetime) -> Decimal:
""" Returns the balance on (and including) a certain date """
assert isinstance(on_date, datetime)
total = Decimal(0)
splits = self.get_splits_up_to(on_date)
for split in splits:
total += split.quantity * self.account.sign
return total | Returns the balance on (and including) a certain date | Below is the the instruction that describes the task:
### Input:
Returns the balance on (and including) a certain date
### Response:
def get_balance_on(self, on_date: datetime) -> Decimal:
""" Returns the balance on (and including) a certain date """
assert isinstance(on_date, datetime)
total = Decimal(0)
splits = self.get_splits_up_to(on_date)
for split in splits:
total += split.quantity * self.account.sign
return total |
def random(self, numnodes = 10, degree_range = (2, 4), length_range = (1, 10),
density = None, edge_format = None, node_format = None,
Euclidean = False, seedInput = 0, add_labels = True,
parallel_allowed = False, node_selection = 'closest',
scale = 10, scale_cost = 5):
'''
API:
random(self, numnodes = 10, degree_range = None, length_range = None,
density = None, edge_format = None, node_format = None,
Euclidean = False, seedInput = 0)
Description:
Populates graph with random edges and nodes.
Input:
numnodes: Number of nodes to add.
degree_range: A tuple that has lower and upper bounds of degree for
a node.
length_range: A tuple that has lower and upper bounds for 'cost'
attribute of edges.
density: Density of edges, ie. 0.5 indicates a node will
approximately have edge to half of the other nodes.
edge_format: Dictionary that specifies attribute values for edges.
node_format: Dictionary that specifies attribute values for nodes.
Euclidean: Creates an Euclidean graph (Euclidean distance between
nodes) if True.
seedInput: Seed that will be used for random number generation.
Pre:
It is recommended to call this method on empty Graph objects.
Post:
Graph will be populated by nodes and edges.
'''
random.seed(seedInput)
if edge_format == None:
edge_format = {'fontsize':10,
'fontcolor':'blue'}
if node_format == None:
node_format = {'height':0.5,
'width':0.5,
'fixedsize':'true',
'fontsize':10,
'fontcolor':'red',
'shape':'circle',
}
if Euclidean == False:
for m in range(numnodes):
self.add_node(m, **node_format)
if degree_range is not None and density is None:
for m in range(numnodes):
degree = random.randint(degree_range[0], degree_range[1])
i = 0
while i < degree:
n = random.randint(1, numnodes-1)
if (((m,n) not in self.edge_attr and m != n) and
(parallel_allowed or (n, m) not in self.edge_attr)):
if length_range is not None:
length = random.randint(length_range[0],
length_range[1])
self.add_edge(m, n, cost = length, **edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(length))
else:
self.add_edge(m, n, **edge_format)
i += 1
elif density != None:
for m in range(numnodes):
if self.graph_type == DIRECTED_GRAPH:
numnodes2 = numnodes
else:
numnodes2 = m
for n in range(numnodes2):
if ((parallel_allowed or (n, m) not in self.edge_attr)
and m != n):
if random.random() < density:
if length_range is not None:
length = random.randint(length_range[0],
length_range[1])
self.add_edge(m, n, cost = length,
**edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(length))
else:
self.add_edge(m, n, **edge_format)
else:
print("Must set either degree range or density")
else:
for m in range(numnodes):
''' Assigns random coordinates (between 1 and 20) to the nodes
'''
x = random.random()*scale
y = random.random()*scale
self.add_node(m, locationx = x, locationy = y,
pos = '"'+str(x) + "," + str(y)+'!"',
**node_format)
if degree_range is not None and density is None:
for m in range(numnodes):
degree = random.randint(degree_range[0], degree_range[1])
i = 0
neighbors = []
if node_selection is 'random':
while i < degree:
length = round((((self.get_node(n).get_attr('locationx') -
self.get_node(m).get_attr('locationx')) ** 2 +
(self.get_node(n).get_attr('locationy') -
self.get_node(m).get_attr('locationy')) ** 2) ** 0.5)*scale_cost,
0)
if (((m,n) not in self.edge_attr and m != n) and
(parallel_allowed or (n, m) not in self.edge_attr)):
neighbors.append(random.randint(0, numnodes-1))
self.add_edge(m, n, cost = int(length), **edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(int(length)))
i += 1
elif node_selection is 'closest':
lengths = []
for n in range(numnodes):
lengths.append((n, round((((self.get_node(n).get_attr('locationx') -
self.get_node(m).get_attr('locationx')) ** 2 +
(self.get_node(n).get_attr('locationy') -
self.get_node(m).get_attr('locationy')) ** 2) ** 0.5)*scale_cost,
0)))
lengths.sort(key = lambda l : l[1])
for i in range(degree+1):
if not (lengths[i][0] == m or self.check_edge(m, lengths[i][0])):
self.add_edge(m, lengths[i][0], cost = int(lengths[i][1]), **edge_format)
if add_labels:
self.set_edge_attr(m, lengths[i][0], 'label', str(int(lengths[i][1])))
else:
print("Unknown node selection rule...exiting")
return
elif density != None:
for m in range(numnodes):
if self.graph_type == DIRECTED_GRAPH:
numnodes2 = numnodes
else:
numnodes2 = m
for n in range(numnodes2):
if ((parallel_allowed or (n, m) not in self.edge_attr)
and m != n):
if random.random() < density:
if length_range is None:
''' calculates the euclidean norm and round it
to an integer '''
length = round((((self.get_node(n).get_attr('locationx') -
self.get_node(m).get_attr('locationx')) ** 2 +
(self.get_node(n).get_attr('locationy') -
self.get_node(m).get_attr('locationy')) ** 2) ** 0.5), 0)
self.add_edge(m, n, cost = int(length), **edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(int(length)))
else:
self.add_edge(m, n, **edge_format)
else:
print("Must set either degree range or density") | API:
random(self, numnodes = 10, degree_range = None, length_range = None,
density = None, edge_format = None, node_format = None,
Euclidean = False, seedInput = 0)
Description:
Populates graph with random edges and nodes.
Input:
numnodes: Number of nodes to add.
degree_range: A tuple that has lower and upper bounds of degree for
a node.
length_range: A tuple that has lower and upper bounds for 'cost'
attribute of edges.
density: Density of edges, ie. 0.5 indicates a node will
approximately have edge to half of the other nodes.
edge_format: Dictionary that specifies attribute values for edges.
node_format: Dictionary that specifies attribute values for nodes.
Euclidean: Creates an Euclidean graph (Euclidean distance between
nodes) if True.
seedInput: Seed that will be used for random number generation.
Pre:
It is recommended to call this method on empty Graph objects.
Post:
Graph will be populated by nodes and edges. | Below is the the instruction that describes the task:
### Input:
API:
random(self, numnodes = 10, degree_range = None, length_range = None,
density = None, edge_format = None, node_format = None,
Euclidean = False, seedInput = 0)
Description:
Populates graph with random edges and nodes.
Input:
numnodes: Number of nodes to add.
degree_range: A tuple that has lower and upper bounds of degree for
a node.
length_range: A tuple that has lower and upper bounds for 'cost'
attribute of edges.
density: Density of edges, ie. 0.5 indicates a node will
approximately have edge to half of the other nodes.
edge_format: Dictionary that specifies attribute values for edges.
node_format: Dictionary that specifies attribute values for nodes.
Euclidean: Creates an Euclidean graph (Euclidean distance between
nodes) if True.
seedInput: Seed that will be used for random number generation.
Pre:
It is recommended to call this method on empty Graph objects.
Post:
Graph will be populated by nodes and edges.
### Response:
def random(self, numnodes = 10, degree_range = (2, 4), length_range = (1, 10),
density = None, edge_format = None, node_format = None,
Euclidean = False, seedInput = 0, add_labels = True,
parallel_allowed = False, node_selection = 'closest',
scale = 10, scale_cost = 5):
'''
API:
random(self, numnodes = 10, degree_range = None, length_range = None,
density = None, edge_format = None, node_format = None,
Euclidean = False, seedInput = 0)
Description:
Populates graph with random edges and nodes.
Input:
numnodes: Number of nodes to add.
degree_range: A tuple that has lower and upper bounds of degree for
a node.
length_range: A tuple that has lower and upper bounds for 'cost'
attribute of edges.
density: Density of edges, ie. 0.5 indicates a node will
approximately have edge to half of the other nodes.
edge_format: Dictionary that specifies attribute values for edges.
node_format: Dictionary that specifies attribute values for nodes.
Euclidean: Creates an Euclidean graph (Euclidean distance between
nodes) if True.
seedInput: Seed that will be used for random number generation.
Pre:
It is recommended to call this method on empty Graph objects.
Post:
Graph will be populated by nodes and edges.
'''
random.seed(seedInput)
if edge_format == None:
edge_format = {'fontsize':10,
'fontcolor':'blue'}
if node_format == None:
node_format = {'height':0.5,
'width':0.5,
'fixedsize':'true',
'fontsize':10,
'fontcolor':'red',
'shape':'circle',
}
if Euclidean == False:
for m in range(numnodes):
self.add_node(m, **node_format)
if degree_range is not None and density is None:
for m in range(numnodes):
degree = random.randint(degree_range[0], degree_range[1])
i = 0
while i < degree:
n = random.randint(1, numnodes-1)
if (((m,n) not in self.edge_attr and m != n) and
(parallel_allowed or (n, m) not in self.edge_attr)):
if length_range is not None:
length = random.randint(length_range[0],
length_range[1])
self.add_edge(m, n, cost = length, **edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(length))
else:
self.add_edge(m, n, **edge_format)
i += 1
elif density != None:
for m in range(numnodes):
if self.graph_type == DIRECTED_GRAPH:
numnodes2 = numnodes
else:
numnodes2 = m
for n in range(numnodes2):
if ((parallel_allowed or (n, m) not in self.edge_attr)
and m != n):
if random.random() < density:
if length_range is not None:
length = random.randint(length_range[0],
length_range[1])
self.add_edge(m, n, cost = length,
**edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(length))
else:
self.add_edge(m, n, **edge_format)
else:
print("Must set either degree range or density")
else:
for m in range(numnodes):
''' Assigns random coordinates (between 1 and 20) to the nodes
'''
x = random.random()*scale
y = random.random()*scale
self.add_node(m, locationx = x, locationy = y,
pos = '"'+str(x) + "," + str(y)+'!"',
**node_format)
if degree_range is not None and density is None:
for m in range(numnodes):
degree = random.randint(degree_range[0], degree_range[1])
i = 0
neighbors = []
if node_selection is 'random':
while i < degree:
length = round((((self.get_node(n).get_attr('locationx') -
self.get_node(m).get_attr('locationx')) ** 2 +
(self.get_node(n).get_attr('locationy') -
self.get_node(m).get_attr('locationy')) ** 2) ** 0.5)*scale_cost,
0)
if (((m,n) not in self.edge_attr and m != n) and
(parallel_allowed or (n, m) not in self.edge_attr)):
neighbors.append(random.randint(0, numnodes-1))
self.add_edge(m, n, cost = int(length), **edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(int(length)))
i += 1
elif node_selection is 'closest':
lengths = []
for n in range(numnodes):
lengths.append((n, round((((self.get_node(n).get_attr('locationx') -
self.get_node(m).get_attr('locationx')) ** 2 +
(self.get_node(n).get_attr('locationy') -
self.get_node(m).get_attr('locationy')) ** 2) ** 0.5)*scale_cost,
0)))
lengths.sort(key = lambda l : l[1])
for i in range(degree+1):
if not (lengths[i][0] == m or self.check_edge(m, lengths[i][0])):
self.add_edge(m, lengths[i][0], cost = int(lengths[i][1]), **edge_format)
if add_labels:
self.set_edge_attr(m, lengths[i][0], 'label', str(int(lengths[i][1])))
else:
print("Unknown node selection rule...exiting")
return
elif density != None:
for m in range(numnodes):
if self.graph_type == DIRECTED_GRAPH:
numnodes2 = numnodes
else:
numnodes2 = m
for n in range(numnodes2):
if ((parallel_allowed or (n, m) not in self.edge_attr)
and m != n):
if random.random() < density:
if length_range is None:
''' calculates the euclidean norm and round it
to an integer '''
length = round((((self.get_node(n).get_attr('locationx') -
self.get_node(m).get_attr('locationx')) ** 2 +
(self.get_node(n).get_attr('locationy') -
self.get_node(m).get_attr('locationy')) ** 2) ** 0.5), 0)
self.add_edge(m, n, cost = int(length), **edge_format)
if add_labels:
self.set_edge_attr(m, n, 'label', str(int(length)))
else:
self.add_edge(m, n, **edge_format)
else:
print("Must set either degree range or density") |
def _decode_label(label):
"""Convert a list label into a tuple. Works recursively on nested lists."""
if isinstance(label, list):
return tuple(_decode_label(v) for v in label)
return label | Convert a list label into a tuple. Works recursively on nested lists. | Below is the the instruction that describes the task:
### Input:
Convert a list label into a tuple. Works recursively on nested lists.
### Response:
def _decode_label(label):
"""Convert a list label into a tuple. Works recursively on nested lists."""
if isinstance(label, list):
return tuple(_decode_label(v) for v in label)
return label |
def _find_games(self, date, end_date):
"""
Retrieve all major games played on a given day.
Builds a URL based on the requested date and downloads the HTML
contents before parsing any and all games played during that day. Any
games that are found are added to the boxscores dictionary with
high-level game information such as the home and away team names and a
link to the boxscore page.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will
be pulled. If left empty, or if 'end_date' is prior to 'date', only
the games from the day specified in the 'date' parameter will be
saved.
"""
# Set the end date to the start date if the end date is before the
# start date.
if not end_date or date > end_date:
end_date = date
date_step = date
while date_step <= end_date:
url = self._create_url(date_step)
page = self._get_requested_page(url)
games = page('table[class="teams"]').items()
boxscores = self._extract_game_info(games)
timestamp = '%s-%s-%s' % (date_step.month, date_step.day,
date_step.year)
self._boxscores[timestamp] = boxscores
date_step += timedelta(days=1) | Retrieve all major games played on a given day.
Builds a URL based on the requested date and downloads the HTML
contents before parsing any and all games played during that day. Any
games that are found are added to the boxscores dictionary with
high-level game information such as the home and away team names and a
link to the boxscore page.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will
be pulled. If left empty, or if 'end_date' is prior to 'date', only
the games from the day specified in the 'date' parameter will be
saved. | Below is the the instruction that describes the task:
### Input:
Retrieve all major games played on a given day.
Builds a URL based on the requested date and downloads the HTML
contents before parsing any and all games played during that day. Any
games that are found are added to the boxscores dictionary with
high-level game information such as the home and away team names and a
link to the boxscore page.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will
be pulled. If left empty, or if 'end_date' is prior to 'date', only
the games from the day specified in the 'date' parameter will be
saved.
### Response:
def _find_games(self, date, end_date):
"""
Retrieve all major games played on a given day.
Builds a URL based on the requested date and downloads the HTML
contents before parsing any and all games played during that day. Any
games that are found are added to the boxscores dictionary with
high-level game information such as the home and away team names and a
link to the boxscore page.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will
be pulled. If left empty, or if 'end_date' is prior to 'date', only
the games from the day specified in the 'date' parameter will be
saved.
"""
# Set the end date to the start date if the end date is before the
# start date.
if not end_date or date > end_date:
end_date = date
date_step = date
while date_step <= end_date:
url = self._create_url(date_step)
page = self._get_requested_page(url)
games = page('table[class="teams"]').items()
boxscores = self._extract_game_info(games)
timestamp = '%s-%s-%s' % (date_step.month, date_step.day,
date_step.year)
self._boxscores[timestamp] = boxscores
date_step += timedelta(days=1) |
def clean(ctx):
"""Clean previously built package artifacts.
"""
ctx.run(f"python setup.py clean")
dist = ROOT.joinpath("dist")
build = ROOT.joinpath("build")
print(f"[clean] Removing {dist} and {build}")
if dist.exists():
shutil.rmtree(str(dist))
if build.exists():
shutil.rmtree(str(build)) | Clean previously built package artifacts. | Below is the the instruction that describes the task:
### Input:
Clean previously built package artifacts.
### Response:
def clean(ctx):
"""Clean previously built package artifacts.
"""
ctx.run(f"python setup.py clean")
dist = ROOT.joinpath("dist")
build = ROOT.joinpath("build")
print(f"[clean] Removing {dist} and {build}")
if dist.exists():
shutil.rmtree(str(dist))
if build.exists():
shutil.rmtree(str(build)) |
def remove_vcf_info(keyword, variant_line=None, variant_dict=None):
"""Remove the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
Returns:
variant_line (str): A annotated variant line
"""
logger.debug("Removing variant information {0}".format(keyword))
fixed_variant = None
def get_new_info_string(info_string, keyword):
"""Return a info string without keyword info"""
new_info_list = []
splitted_info_string = info_string.split(';')
for info in splitted_info_string:
splitted_info_entry = info.split('=')
if splitted_info_entry[0] != keyword:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
return new_info_string
if variant_line:
logger.debug("Removing information from a variant line")
splitted_variant = variant_line.rstrip('\n').split('\t')
old_info = splitted_variant[7]
if old_info == '.':
new_info_string = '.'
else:
new_info_string = get_new_info_string(old_info, keyword)
splitted_variant[7] = new_info_string
fixed_variant = '\t'.join(splitted_variant)
elif variant_dict:
logger.debug("Removing information to a variant dict")
old_info = variant_dict['INFO']
if old_info == '.':
variant_dict['INFO'] = old_info
else:
new_info_string = get_new_info_string(old_info, keyword)
variant_dict['INFO'] = new_info_string
fixed_variant = variant_dict
return fixed_variant | Remove the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
Returns:
variant_line (str): A annotated variant line | Below is the the instruction that describes the task:
### Input:
Remove the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
Returns:
variant_line (str): A annotated variant line
### Response:
def remove_vcf_info(keyword, variant_line=None, variant_dict=None):
"""Remove the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
Returns:
variant_line (str): A annotated variant line
"""
logger.debug("Removing variant information {0}".format(keyword))
fixed_variant = None
def get_new_info_string(info_string, keyword):
"""Return a info string without keyword info"""
new_info_list = []
splitted_info_string = info_string.split(';')
for info in splitted_info_string:
splitted_info_entry = info.split('=')
if splitted_info_entry[0] != keyword:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
return new_info_string
if variant_line:
logger.debug("Removing information from a variant line")
splitted_variant = variant_line.rstrip('\n').split('\t')
old_info = splitted_variant[7]
if old_info == '.':
new_info_string = '.'
else:
new_info_string = get_new_info_string(old_info, keyword)
splitted_variant[7] = new_info_string
fixed_variant = '\t'.join(splitted_variant)
elif variant_dict:
logger.debug("Removing information to a variant dict")
old_info = variant_dict['INFO']
if old_info == '.':
variant_dict['INFO'] = old_info
else:
new_info_string = get_new_info_string(old_info, keyword)
variant_dict['INFO'] = new_info_string
fixed_variant = variant_dict
return fixed_variant |
def get_swimlane(self):
"""Return list of record ids"""
value = super(ReferenceField, self).get_swimlane()
if value:
ids = list(value.keys())
if self.multiselect:
return ids
return ids[0]
return None | Return list of record ids | Below is the the instruction that describes the task:
### Input:
Return list of record ids
### Response:
def get_swimlane(self):
"""Return list of record ids"""
value = super(ReferenceField, self).get_swimlane()
if value:
ids = list(value.keys())
if self.multiselect:
return ids
return ids[0]
return None |
def index_of_reports(self, report, account_id):
"""
Index of Reports.
Shows all reports that have been run for the account of a specific type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""ID"""
path["report"] = report
self.logger.debug("GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, all_pages=True) | Index of Reports.
Shows all reports that have been run for the account of a specific type. | Below is the the instruction that describes the task:
### Input:
Index of Reports.
Shows all reports that have been run for the account of a specific type.
### Response:
def index_of_reports(self, report, account_id):
"""
Index of Reports.
Shows all reports that have been run for the account of a specific type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""ID"""
path["report"] = report
self.logger.debug("GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/reports/{report}".format(**path), data=data, params=params, all_pages=True) |
def _instantiate_session(self, method_name, proxy=None, *args, **kwargs):
"""Instantiates a provider session"""
if 'manager' in kwargs:
session_class = getattr(kwargs['manager'], method_name)
del kwargs['manager']
else:
session_class = getattr(self._provider_manager, method_name)
if proxy is None:
try:
return session_class(bank_id=self._catalog_id, *args, **kwargs)
except AttributeError:
return session_class(*args, **kwargs)
else:
try:
return session_class(bank_id=self._catalog_id, proxy=proxy, *args, **kwargs)
except AttributeError:
return session_class(proxy=proxy, *args, **kwargs) | Instantiates a provider session | Below is the the instruction that describes the task:
### Input:
Instantiates a provider session
### Response:
def _instantiate_session(self, method_name, proxy=None, *args, **kwargs):
"""Instantiates a provider session"""
if 'manager' in kwargs:
session_class = getattr(kwargs['manager'], method_name)
del kwargs['manager']
else:
session_class = getattr(self._provider_manager, method_name)
if proxy is None:
try:
return session_class(bank_id=self._catalog_id, *args, **kwargs)
except AttributeError:
return session_class(*args, **kwargs)
else:
try:
return session_class(bank_id=self._catalog_id, proxy=proxy, *args, **kwargs)
except AttributeError:
return session_class(proxy=proxy, *args, **kwargs) |
def get_src_model(self, name, paramsonly=False, reoptimize=False,
npts=None, **kwargs):
"""Compose a dictionary for a source with the current best-fit
parameters.
Parameters
----------
name : str
paramsonly : bool
Skip computing TS and likelihood profile.
reoptimize : bool
Re-fit background parameters in likelihood scan.
npts : int
Number of points for likelihood scan.
Returns
-------
src_dict : dict
"""
self.logger.debug('Generating source dict for ' + name)
optimizer = kwargs.get('optimizer', self.config['optimizer'])
if npts is None:
npts = self.config['gtlike']['llscan_npts']
name = self.get_source_name(name)
source = self.like[name].src
spectrum = source.spectrum()
normPar = self.like.normPar(name)
src_dict = defaults.make_default_dict(defaults.source_flux_output)
src_dict.update({'name': name,
'pivot_energy': 1000.,
'ts': np.nan,
'loglike': np.nan,
'npred': 0.0,
'npred_wt': 0.0,
'loglike_scan': np.nan * np.ones(npts),
'dloglike_scan': np.nan * np.ones(npts),
'eflux_scan': np.nan * np.ones(npts),
'flux_scan': np.nan * np.ones(npts),
'norm_scan': np.nan * np.ones(npts),
})
src_dict.update(gtutils.gtlike_spectrum_to_vectors(spectrum))
src_dict['spectral_pars'] = gtutils.get_function_pars_dict(spectrum)
# Get Counts Spectrum
src_dict['model_counts'] = self.model_counts_spectrum(
name, summed=True)
src_dict['model_counts_wt'] = self.model_counts_spectrum(
name, summed=True, weighted=True)
# Get NPred
src_dict['npred'] = self.like.NpredValue(str(name))
# EAC, we need this b/c older version of the ST don't have the right signature
try:
src_dict['npred_wt'] = self.like.NpredValue(str(name), True)
except (TypeError, NotImplementedError):
src_dict['npred_wt'] = src_dict['npred']
# Get the Model Fluxes
try:
thesrc = self.like[name]
src_dict['flux'] = self.like.flux(name, self.energies[0],
self.energies[-1])
src_dict['flux100'] = self.like.flux(name, 100., 10 ** 5.5)
src_dict['flux1000'] = self.like.flux(name, 1000., 10 ** 5.5)
src_dict['flux10000'] = self.like.flux(name, 10000., 10 ** 5.5)
src_dict['eflux'] = self.like.energyFlux(name,
self.energies[0],
self.energies[-1])
src_dict['eflux100'] = self.like.energyFlux(name, 100.,
10 ** 5.5)
src_dict['eflux1000'] = self.like.energyFlux(name, 1000.,
10 ** 5.5)
src_dict['eflux10000'] = self.like.energyFlux(name, 10000.,
10 ** 5.5)
src_dict['dnde'] = self.like[name].spectrum()(
pyLike.dArg(src_dict['pivot_energy']))
src_dict['dnde100'] = self.like[name].spectrum()(
pyLike.dArg(100.))
src_dict['dnde1000'] = self.like[name].spectrum()(
pyLike.dArg(1000.))
src_dict['dnde10000'] = self.like[name].spectrum()(
pyLike.dArg(10000.))
if normPar.getValue() == 0:
normPar.setValue(1.0)
dnde_index = -get_spectral_index(self.like[name],
src_dict['pivot_energy'])
dnde100_index = -get_spectral_index(self.like[name],
100.)
dnde1000_index = -get_spectral_index(self.like[name],
1000.)
dnde10000_index = -get_spectral_index(self.like[name],
10000.)
normPar.setValue(0.0)
else:
dnde_index = -get_spectral_index(self.like[name],
src_dict['pivot_energy'])
dnde100_index = -get_spectral_index(self.like[name],
100.)
dnde1000_index = -get_spectral_index(self.like[name],
1000.)
dnde10000_index = -get_spectral_index(self.like[name],
10000.)
src_dict['dnde_index'] = dnde_index
src_dict['dnde100_index'] = dnde100_index
src_dict['dnde1000_index'] = dnde1000_index
src_dict['dnde10000_index'] = dnde10000_index
except Exception:
self.logger.error('Failed to update source parameters.',
exc_info=True)
# Only compute TS, errors, and ULs if the source was free in
# the fit
if not self.get_free_source_params(name) or paramsonly:
return src_dict
emax = 10 ** 5.5
try:
src_dict['flux_err'] = self.like.fluxError(name,
self.energies[0],
self.energies[-1])
src_dict['flux100_err'] = self.like.fluxError(name, 100., emax)
src_dict['flux1000_err'] = self.like.fluxError(name, 1000., emax)
src_dict['flux10000_err'] = self.like.fluxError(name, 10000., emax)
src_dict['eflux_err'] = \
self.like.energyFluxError(name, self.energies[0],
self.energies[-1])
src_dict['eflux100_err'] = self.like.energyFluxError(name, 100.,
emax)
src_dict['eflux1000_err'] = self.like.energyFluxError(name, 1000.,
emax)
src_dict['eflux10000_err'] = self.like.energyFluxError(name, 10000.,
emax)
except Exception:
pass
# self.logger.error('Failed to update source parameters.',
# exc_info=True)
lnlp = self.profile_norm(name, savestate=True,
reoptimize=reoptimize, npts=npts,
optimizer=optimizer)
src_dict['loglike_scan'] = lnlp['loglike']
src_dict['dloglike_scan'] = lnlp['dloglike']
src_dict['eflux_scan'] = lnlp['eflux']
src_dict['flux_scan'] = lnlp['flux']
src_dict['norm_scan'] = lnlp['xvals']
src_dict['loglike'] = np.max(lnlp['loglike'])
flux_ul_data = utils.get_parameter_limits(
lnlp['flux'], lnlp['dloglike'])
eflux_ul_data = utils.get_parameter_limits(
lnlp['eflux'], lnlp['dloglike'])
if normPar.getValue() == 0:
normPar.setValue(1.0)
flux = self.like.flux(name, self.energies[0], self.energies[-1])
flux100 = self.like.flux(name, 100., emax)
flux1000 = self.like.flux(name, 1000., emax)
flux10000 = self.like.flux(name, 10000., emax)
eflux = self.like.energyFlux(name, self.energies[0],
self.energies[-1])
eflux100 = self.like.energyFlux(name, 100., emax)
eflux1000 = self.like.energyFlux(name, 1000., emax)
eflux10000 = self.like.energyFlux(name, 10000., emax)
flux100_ratio = flux100 / flux
flux1000_ratio = flux1000 / flux
flux10000_ratio = flux10000 / flux
eflux100_ratio = eflux100 / eflux
eflux1000_ratio = eflux1000 / eflux
eflux10000_ratio = eflux10000 / eflux
normPar.setValue(0.0)
else:
flux100_ratio = src_dict['flux100'] / src_dict['flux']
flux1000_ratio = src_dict['flux1000'] / src_dict['flux']
flux10000_ratio = src_dict['flux10000'] / src_dict['flux']
eflux100_ratio = src_dict['eflux100'] / src_dict['eflux']
eflux1000_ratio = src_dict['eflux1000'] / src_dict['eflux']
eflux10000_ratio = src_dict['eflux10000'] / src_dict['eflux']
src_dict['flux_ul95'] = flux_ul_data['ul']
src_dict['flux100_ul95'] = flux_ul_data['ul'] * flux100_ratio
src_dict['flux1000_ul95'] = flux_ul_data['ul'] * flux1000_ratio
src_dict['flux10000_ul95'] = flux_ul_data['ul'] * flux10000_ratio
src_dict['eflux_ul95'] = eflux_ul_data['ul']
src_dict['eflux100_ul95'] = eflux_ul_data['ul'] * eflux100_ratio
src_dict['eflux1000_ul95'] = eflux_ul_data['ul'] * eflux1000_ratio
src_dict['eflux10000_ul95'] = eflux_ul_data['ul'] * eflux10000_ratio
# Extract covariance matrix
fd = None
try:
fd = FluxDensity.FluxDensity(self.like, name)
src_dict['covar'] = fd.covar
except RuntimeError:
pass
# if ex.message == 'Covariance matrix has not been
# computed.':
# Extract bowtie
if fd and len(src_dict['covar']) and src_dict['covar'].ndim >= 1:
loge = np.linspace(self.log_energies[0],
self.log_energies[-1], 50)
src_dict['model_flux'] = self.bowtie(name, fd=fd, loge=loge)
src_dict['dnde100_err'] = fd.error(100.)
src_dict['dnde1000_err'] = fd.error(1000.)
src_dict['dnde10000_err'] = fd.error(10000.)
src_dict['pivot_energy'] = src_dict['model_flux']['pivot_energy']
e0 = src_dict['pivot_energy']
src_dict['dnde'] = self.like[name].spectrum()(pyLike.dArg(e0))
src_dict['dnde_err'] = fd.error(e0)
if not reoptimize:
src_dict['ts'] = self.like.Ts2(name, reoptimize=reoptimize)
else:
src_dict['ts'] = -2.0 * lnlp['dloglike'][0]
return src_dict | Compose a dictionary for a source with the current best-fit
parameters.
Parameters
----------
name : str
paramsonly : bool
Skip computing TS and likelihood profile.
reoptimize : bool
Re-fit background parameters in likelihood scan.
npts : int
Number of points for likelihood scan.
Returns
-------
src_dict : dict | Below is the the instruction that describes the task:
### Input:
Compose a dictionary for a source with the current best-fit
parameters.
Parameters
----------
name : str
paramsonly : bool
Skip computing TS and likelihood profile.
reoptimize : bool
Re-fit background parameters in likelihood scan.
npts : int
Number of points for likelihood scan.
Returns
-------
src_dict : dict
### Response:
def get_src_model(self, name, paramsonly=False, reoptimize=False,
npts=None, **kwargs):
"""Compose a dictionary for a source with the current best-fit
parameters.
Parameters
----------
name : str
paramsonly : bool
Skip computing TS and likelihood profile.
reoptimize : bool
Re-fit background parameters in likelihood scan.
npts : int
Number of points for likelihood scan.
Returns
-------
src_dict : dict
"""
self.logger.debug('Generating source dict for ' + name)
optimizer = kwargs.get('optimizer', self.config['optimizer'])
if npts is None:
npts = self.config['gtlike']['llscan_npts']
name = self.get_source_name(name)
source = self.like[name].src
spectrum = source.spectrum()
normPar = self.like.normPar(name)
src_dict = defaults.make_default_dict(defaults.source_flux_output)
src_dict.update({'name': name,
'pivot_energy': 1000.,
'ts': np.nan,
'loglike': np.nan,
'npred': 0.0,
'npred_wt': 0.0,
'loglike_scan': np.nan * np.ones(npts),
'dloglike_scan': np.nan * np.ones(npts),
'eflux_scan': np.nan * np.ones(npts),
'flux_scan': np.nan * np.ones(npts),
'norm_scan': np.nan * np.ones(npts),
})
src_dict.update(gtutils.gtlike_spectrum_to_vectors(spectrum))
src_dict['spectral_pars'] = gtutils.get_function_pars_dict(spectrum)
# Get Counts Spectrum
src_dict['model_counts'] = self.model_counts_spectrum(
name, summed=True)
src_dict['model_counts_wt'] = self.model_counts_spectrum(
name, summed=True, weighted=True)
# Get NPred
src_dict['npred'] = self.like.NpredValue(str(name))
# EAC, we need this b/c older version of the ST don't have the right signature
try:
src_dict['npred_wt'] = self.like.NpredValue(str(name), True)
except (TypeError, NotImplementedError):
src_dict['npred_wt'] = src_dict['npred']
# Get the Model Fluxes
try:
thesrc = self.like[name]
src_dict['flux'] = self.like.flux(name, self.energies[0],
self.energies[-1])
src_dict['flux100'] = self.like.flux(name, 100., 10 ** 5.5)
src_dict['flux1000'] = self.like.flux(name, 1000., 10 ** 5.5)
src_dict['flux10000'] = self.like.flux(name, 10000., 10 ** 5.5)
src_dict['eflux'] = self.like.energyFlux(name,
self.energies[0],
self.energies[-1])
src_dict['eflux100'] = self.like.energyFlux(name, 100.,
10 ** 5.5)
src_dict['eflux1000'] = self.like.energyFlux(name, 1000.,
10 ** 5.5)
src_dict['eflux10000'] = self.like.energyFlux(name, 10000.,
10 ** 5.5)
src_dict['dnde'] = self.like[name].spectrum()(
pyLike.dArg(src_dict['pivot_energy']))
src_dict['dnde100'] = self.like[name].spectrum()(
pyLike.dArg(100.))
src_dict['dnde1000'] = self.like[name].spectrum()(
pyLike.dArg(1000.))
src_dict['dnde10000'] = self.like[name].spectrum()(
pyLike.dArg(10000.))
if normPar.getValue() == 0:
normPar.setValue(1.0)
dnde_index = -get_spectral_index(self.like[name],
src_dict['pivot_energy'])
dnde100_index = -get_spectral_index(self.like[name],
100.)
dnde1000_index = -get_spectral_index(self.like[name],
1000.)
dnde10000_index = -get_spectral_index(self.like[name],
10000.)
normPar.setValue(0.0)
else:
dnde_index = -get_spectral_index(self.like[name],
src_dict['pivot_energy'])
dnde100_index = -get_spectral_index(self.like[name],
100.)
dnde1000_index = -get_spectral_index(self.like[name],
1000.)
dnde10000_index = -get_spectral_index(self.like[name],
10000.)
src_dict['dnde_index'] = dnde_index
src_dict['dnde100_index'] = dnde100_index
src_dict['dnde1000_index'] = dnde1000_index
src_dict['dnde10000_index'] = dnde10000_index
except Exception:
self.logger.error('Failed to update source parameters.',
exc_info=True)
# Only compute TS, errors, and ULs if the source was free in
# the fit
if not self.get_free_source_params(name) or paramsonly:
return src_dict
emax = 10 ** 5.5
try:
src_dict['flux_err'] = self.like.fluxError(name,
self.energies[0],
self.energies[-1])
src_dict['flux100_err'] = self.like.fluxError(name, 100., emax)
src_dict['flux1000_err'] = self.like.fluxError(name, 1000., emax)
src_dict['flux10000_err'] = self.like.fluxError(name, 10000., emax)
src_dict['eflux_err'] = \
self.like.energyFluxError(name, self.energies[0],
self.energies[-1])
src_dict['eflux100_err'] = self.like.energyFluxError(name, 100.,
emax)
src_dict['eflux1000_err'] = self.like.energyFluxError(name, 1000.,
emax)
src_dict['eflux10000_err'] = self.like.energyFluxError(name, 10000.,
emax)
except Exception:
pass
# self.logger.error('Failed to update source parameters.',
# exc_info=True)
lnlp = self.profile_norm(name, savestate=True,
reoptimize=reoptimize, npts=npts,
optimizer=optimizer)
src_dict['loglike_scan'] = lnlp['loglike']
src_dict['dloglike_scan'] = lnlp['dloglike']
src_dict['eflux_scan'] = lnlp['eflux']
src_dict['flux_scan'] = lnlp['flux']
src_dict['norm_scan'] = lnlp['xvals']
src_dict['loglike'] = np.max(lnlp['loglike'])
flux_ul_data = utils.get_parameter_limits(
lnlp['flux'], lnlp['dloglike'])
eflux_ul_data = utils.get_parameter_limits(
lnlp['eflux'], lnlp['dloglike'])
if normPar.getValue() == 0:
normPar.setValue(1.0)
flux = self.like.flux(name, self.energies[0], self.energies[-1])
flux100 = self.like.flux(name, 100., emax)
flux1000 = self.like.flux(name, 1000., emax)
flux10000 = self.like.flux(name, 10000., emax)
eflux = self.like.energyFlux(name, self.energies[0],
self.energies[-1])
eflux100 = self.like.energyFlux(name, 100., emax)
eflux1000 = self.like.energyFlux(name, 1000., emax)
eflux10000 = self.like.energyFlux(name, 10000., emax)
flux100_ratio = flux100 / flux
flux1000_ratio = flux1000 / flux
flux10000_ratio = flux10000 / flux
eflux100_ratio = eflux100 / eflux
eflux1000_ratio = eflux1000 / eflux
eflux10000_ratio = eflux10000 / eflux
normPar.setValue(0.0)
else:
flux100_ratio = src_dict['flux100'] / src_dict['flux']
flux1000_ratio = src_dict['flux1000'] / src_dict['flux']
flux10000_ratio = src_dict['flux10000'] / src_dict['flux']
eflux100_ratio = src_dict['eflux100'] / src_dict['eflux']
eflux1000_ratio = src_dict['eflux1000'] / src_dict['eflux']
eflux10000_ratio = src_dict['eflux10000'] / src_dict['eflux']
src_dict['flux_ul95'] = flux_ul_data['ul']
src_dict['flux100_ul95'] = flux_ul_data['ul'] * flux100_ratio
src_dict['flux1000_ul95'] = flux_ul_data['ul'] * flux1000_ratio
src_dict['flux10000_ul95'] = flux_ul_data['ul'] * flux10000_ratio
src_dict['eflux_ul95'] = eflux_ul_data['ul']
src_dict['eflux100_ul95'] = eflux_ul_data['ul'] * eflux100_ratio
src_dict['eflux1000_ul95'] = eflux_ul_data['ul'] * eflux1000_ratio
src_dict['eflux10000_ul95'] = eflux_ul_data['ul'] * eflux10000_ratio
# Extract covariance matrix
fd = None
try:
fd = FluxDensity.FluxDensity(self.like, name)
src_dict['covar'] = fd.covar
except RuntimeError:
pass
# if ex.message == 'Covariance matrix has not been
# computed.':
# Extract bowtie
if fd and len(src_dict['covar']) and src_dict['covar'].ndim >= 1:
loge = np.linspace(self.log_energies[0],
self.log_energies[-1], 50)
src_dict['model_flux'] = self.bowtie(name, fd=fd, loge=loge)
src_dict['dnde100_err'] = fd.error(100.)
src_dict['dnde1000_err'] = fd.error(1000.)
src_dict['dnde10000_err'] = fd.error(10000.)
src_dict['pivot_energy'] = src_dict['model_flux']['pivot_energy']
e0 = src_dict['pivot_energy']
src_dict['dnde'] = self.like[name].spectrum()(pyLike.dArg(e0))
src_dict['dnde_err'] = fd.error(e0)
if not reoptimize:
src_dict['ts'] = self.like.Ts2(name, reoptimize=reoptimize)
else:
src_dict['ts'] = -2.0 * lnlp['dloglike'][0]
return src_dict |
def eq_or_parent(self, other):
"""Check whether ``other`` is an ancestor.
Returns:
(bool) True IFF ``other`` is an ancestor or equal to ``self``,
else False.
"""
return self.parts[: len(other.parts)] == other.parts[: len(self.parts)] | Check whether ``other`` is an ancestor.
Returns:
(bool) True IFF ``other`` is an ancestor or equal to ``self``,
else False. | Below is the the instruction that describes the task:
### Input:
Check whether ``other`` is an ancestor.
Returns:
(bool) True IFF ``other`` is an ancestor or equal to ``self``,
else False.
### Response:
def eq_or_parent(self, other):
"""Check whether ``other`` is an ancestor.
Returns:
(bool) True IFF ``other`` is an ancestor or equal to ``self``,
else False.
"""
return self.parts[: len(other.parts)] == other.parts[: len(self.parts)] |
def query(self, query, time_precision='s', chunked=False):
"""Query data from the influxdb v0.8 database.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
return self._query(query, time_precision=time_precision,
chunked=chunked) | Query data from the influxdb v0.8 database.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise. | Below is the the instruction that describes the task:
### Input:
Query data from the influxdb v0.8 database.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
### Response:
def query(self, query, time_precision='s', chunked=False):
"""Query data from the influxdb v0.8 database.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
return self._query(query, time_precision=time_precision,
chunked=chunked) |
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None,
weighting='none'):
'''See neighbor_graph.'''
assert ((k is not None) or (epsilon is not None)
), "Must provide `k` or `epsilon`"
assert (_issequence(k) ^ _issequence(epsilon)
), "Exactly one of `k` or `epsilon` must be a sequence."
assert weighting in ('binary','none'), "Invalid weighting param: " + weighting
is_weighted = weighting == 'none'
if precomputed:
D = X
else:
D = pairwise_distances(X, metric='euclidean')
# pre-sort for efficiency
order = np.argsort(D)[:,1:]
if k is None:
k = D.shape[0]
# generate the sequence of graphs
# TODO: convert the core of these loops to Cython for speed
W = np.zeros_like(D)
I = np.arange(D.shape[0])
if _issequence(k):
# varied k, fixed epsilon
if epsilon is not None:
D[D > epsilon] = 0
old_k = 0
for new_k in k:
idx = order[:, old_k:new_k]
dist = D[I, idx.T]
W[I, idx.T] = dist if is_weighted else 1
yield Graph.from_adj_matrix(W)
old_k = new_k
else:
# varied epsilon, fixed k
idx = order[:,:k]
dist = D[I, idx.T].T
old_i = np.zeros(D.shape[0], dtype=int)
for eps in epsilon:
for i, row in enumerate(dist):
oi = old_i[i]
ni = oi + np.searchsorted(row[oi:], eps)
rr = row[oi:ni]
W[i, idx[i,oi:ni]] = rr if is_weighted else 1
old_i[i] = ni
yield Graph.from_adj_matrix(W) | See neighbor_graph. | Below is the the instruction that describes the task:
### Input:
See neighbor_graph.
### Response:
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None,
weighting='none'):
'''See neighbor_graph.'''
assert ((k is not None) or (epsilon is not None)
), "Must provide `k` or `epsilon`"
assert (_issequence(k) ^ _issequence(epsilon)
), "Exactly one of `k` or `epsilon` must be a sequence."
assert weighting in ('binary','none'), "Invalid weighting param: " + weighting
is_weighted = weighting == 'none'
if precomputed:
D = X
else:
D = pairwise_distances(X, metric='euclidean')
# pre-sort for efficiency
order = np.argsort(D)[:,1:]
if k is None:
k = D.shape[0]
# generate the sequence of graphs
# TODO: convert the core of these loops to Cython for speed
W = np.zeros_like(D)
I = np.arange(D.shape[0])
if _issequence(k):
# varied k, fixed epsilon
if epsilon is not None:
D[D > epsilon] = 0
old_k = 0
for new_k in k:
idx = order[:, old_k:new_k]
dist = D[I, idx.T]
W[I, idx.T] = dist if is_weighted else 1
yield Graph.from_adj_matrix(W)
old_k = new_k
else:
# varied epsilon, fixed k
idx = order[:,:k]
dist = D[I, idx.T].T
old_i = np.zeros(D.shape[0], dtype=int)
for eps in epsilon:
for i, row in enumerate(dist):
oi = old_i[i]
ni = oi + np.searchsorted(row[oi:], eps)
rr = row[oi:ni]
W[i, idx[i,oi:ni]] = rr if is_weighted else 1
old_i[i] = ni
yield Graph.from_adj_matrix(W) |
def get(self, section, key):
''' Get the value of a key in the given section. It will automatically
translate the paramter type if the parameter has a type specified
with the description.
@param section: the section where the key can be found.
@param key: the key the value is stored under.
@return the value as a string or the specified type.
@exception: if the type is specified and the value could not be
translated to the given type.
'''
section = section.lower()
key = key.lower()
descr, value_type, default = self.get_description(section, key)
value = ConfigParser.get(self, section, key)
if value_type == bool:
if PY2:
if value.lower() not in self._boolean_states:
raise AppConfigValueException('Not a boolean: {0}'.
format(value))
return self._boolean_states[value.lower()]
else:
try:
return self._convert_to_boolean(value)
except ValueError:
raise AppConfigValueException('Not a boolean: {0}'.
format(value))
return value_type(value) | Get the value of a key in the given section. It will automatically
translate the paramter type if the parameter has a type specified
with the description.
@param section: the section where the key can be found.
@param key: the key the value is stored under.
@return the value as a string or the specified type.
@exception: if the type is specified and the value could not be
translated to the given type. | Below is the the instruction that describes the task:
### Input:
Get the value of a key in the given section. It will automatically
translate the paramter type if the parameter has a type specified
with the description.
@param section: the section where the key can be found.
@param key: the key the value is stored under.
@return the value as a string or the specified type.
@exception: if the type is specified and the value could not be
translated to the given type.
### Response:
def get(self, section, key):
''' Get the value of a key in the given section. It will automatically
translate the paramter type if the parameter has a type specified
with the description.
@param section: the section where the key can be found.
@param key: the key the value is stored under.
@return the value as a string or the specified type.
@exception: if the type is specified and the value could not be
translated to the given type.
'''
section = section.lower()
key = key.lower()
descr, value_type, default = self.get_description(section, key)
value = ConfigParser.get(self, section, key)
if value_type == bool:
if PY2:
if value.lower() not in self._boolean_states:
raise AppConfigValueException('Not a boolean: {0}'.
format(value))
return self._boolean_states[value.lower()]
else:
try:
return self._convert_to_boolean(value)
except ValueError:
raise AppConfigValueException('Not a boolean: {0}'.
format(value))
return value_type(value) |
def getStates(self):
'''
Gets simulated consumers pLvl and mNrm for this period, but with the alteration that these
represent perceived rather than actual values. Also calculates mLvlTrue, the true level of
market resources that the individual has on hand.
Parameters
----------
None
Returns
-------
None
'''
# Update consumers' perception of their permanent income level
pLvlPrev = self.pLvlNow
self.pLvlNow = pLvlPrev*self.PermShkNow # Perceived permanent income level (only correct if macro state is observed this period)
self.PlvlAggNow *= self.PermShkAggNow # Updated aggregate permanent productivity level
self.pLvlTrue = self.pLvlNow*self.pLvlErrNow
# Calculate what the consumers perceive their normalized market resources to be
RfreeNow = self.getRfree()
bLvlNow = RfreeNow*self.aLvlNow # This is the true level
yLvlNow = self.pLvlTrue*self.TranShkNow # This is true income level
mLvlTrueNow = bLvlNow + yLvlNow # This is true market resource level
mNrmPcvdNow = mLvlTrueNow/self.pLvlNow # This is perceived normalized resources
self.mNrmNow = mNrmPcvdNow
self.mLvlTrueNow = mLvlTrueNow
self.yLvlNow = yLvlNow | Gets simulated consumers pLvl and mNrm for this period, but with the alteration that these
represent perceived rather than actual values. Also calculates mLvlTrue, the true level of
market resources that the individual has on hand.
Parameters
----------
None
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Gets simulated consumers pLvl and mNrm for this period, but with the alteration that these
represent perceived rather than actual values. Also calculates mLvlTrue, the true level of
market resources that the individual has on hand.
Parameters
----------
None
Returns
-------
None
### Response:
def getStates(self):
'''
Gets simulated consumers pLvl and mNrm for this period, but with the alteration that these
represent perceived rather than actual values. Also calculates mLvlTrue, the true level of
market resources that the individual has on hand.
Parameters
----------
None
Returns
-------
None
'''
# Update consumers' perception of their permanent income level
pLvlPrev = self.pLvlNow
self.pLvlNow = pLvlPrev*self.PermShkNow # Perceived permanent income level (only correct if macro state is observed this period)
self.PlvlAggNow *= self.PermShkAggNow # Updated aggregate permanent productivity level
self.pLvlTrue = self.pLvlNow*self.pLvlErrNow
# Calculate what the consumers perceive their normalized market resources to be
RfreeNow = self.getRfree()
bLvlNow = RfreeNow*self.aLvlNow # This is the true level
yLvlNow = self.pLvlTrue*self.TranShkNow # This is true income level
mLvlTrueNow = bLvlNow + yLvlNow # This is true market resource level
mNrmPcvdNow = mLvlTrueNow/self.pLvlNow # This is perceived normalized resources
self.mNrmNow = mNrmPcvdNow
self.mLvlTrueNow = mLvlTrueNow
self.yLvlNow = yLvlNow |
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var) | Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3) | Below is the the instruction that describes the task:
### Input:
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
### Response:
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var) |
def get_messages(self,
statuses=DEFAULT_MESSAGE_STATUSES,
order="sent_at desc",
offset=None,
count=None,
content=False):
"""Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message.
"""
req_data = [ { "status": statuses }, order, fmt_paging(offset, count) ]
service = "query:Message.stats"
if content: service += ", Message.content"
return self.request(service, req_data) | Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message. | Below is the the instruction that describes the task:
### Input:
Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message.
### Response:
def get_messages(self,
statuses=DEFAULT_MESSAGE_STATUSES,
order="sent_at desc",
offset=None,
count=None,
content=False):
"""Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message.
"""
req_data = [ { "status": statuses }, order, fmt_paging(offset, count) ]
service = "query:Message.stats"
if content: service += ", Message.content"
return self.request(service, req_data) |
def _normalize_datatype(datatype_instance):
"""
Translates a user specified datatype to an instance of the ones defined above.
Valid data types are passed through, and the following type specifications
are translated to the proper instances:
str, "String" -> String()
int, "Int64" -> Int64()
float, "Double" -> Double()
If a data type is not recognized, then an error is raised.
"""
global _simple_type_remap
if datatype_instance in _simple_type_remap:
return _simple_type_remap[datatype_instance]
# Now set the protobuf from this interface.
if isinstance(datatype_instance, (Int64, Double, String, Array)):
return datatype_instance
elif isinstance(datatype_instance, Dictionary):
kt = datatype_instance.key_type
if isinstance(kt, (Int64, String)):
return datatype_instance
raise ValueError("Datatype instance not recognized.") | Translates a user specified datatype to an instance of the ones defined above.
Valid data types are passed through, and the following type specifications
are translated to the proper instances:
str, "String" -> String()
int, "Int64" -> Int64()
float, "Double" -> Double()
If a data type is not recognized, then an error is raised. | Below is the the instruction that describes the task:
### Input:
Translates a user specified datatype to an instance of the ones defined above.
Valid data types are passed through, and the following type specifications
are translated to the proper instances:
str, "String" -> String()
int, "Int64" -> Int64()
float, "Double" -> Double()
If a data type is not recognized, then an error is raised.
### Response:
def _normalize_datatype(datatype_instance):
"""
Translates a user specified datatype to an instance of the ones defined above.
Valid data types are passed through, and the following type specifications
are translated to the proper instances:
str, "String" -> String()
int, "Int64" -> Int64()
float, "Double" -> Double()
If a data type is not recognized, then an error is raised.
"""
global _simple_type_remap
if datatype_instance in _simple_type_remap:
return _simple_type_remap[datatype_instance]
# Now set the protobuf from this interface.
if isinstance(datatype_instance, (Int64, Double, String, Array)):
return datatype_instance
elif isinstance(datatype_instance, Dictionary):
kt = datatype_instance.key_type
if isinstance(kt, (Int64, String)):
return datatype_instance
raise ValueError("Datatype instance not recognized.") |
def add_error(self, property_name, message):
"""Add an error for the given property."""
if property_name not in self.errors:
self.errors[property_name] = []
self.errors[property_name].append(message) | Add an error for the given property. | Below is the the instruction that describes the task:
### Input:
Add an error for the given property.
### Response:
def add_error(self, property_name, message):
"""Add an error for the given property."""
if property_name not in self.errors:
self.errors[property_name] = []
self.errors[property_name].append(message) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.