code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_slice_end(self):
"""
返回queryset切片的尾巴
"""
value = None
if self.easyui_page:
value = self.easyui_page * self.easyui_rows
return value | 返回queryset切片的尾巴 | Below is the the instruction that describes the task:
### Input:
返回queryset切片的尾巴
### Response:
def get_slice_end(self):
"""
返回queryset切片的尾巴
"""
value = None
if self.easyui_page:
value = self.easyui_page * self.easyui_rows
return value |
def _tree_create_leaf(self, name, trajectory, hdf5_group):
""" Creates a new pypet leaf instance.
Returns the leaf and if it is an explored parameter the length of the range.
"""
class_name = self._all_get_from_attrs(hdf5_group, HDF5StorageService.CLASS_NAME)
# Create the instance with the appropriate constructor
class_constructor = trajectory._create_class(class_name)
instance = trajectory._construct_instance(class_constructor, name)
return instance | Creates a new pypet leaf instance.
Returns the leaf and if it is an explored parameter the length of the range. | Below is the the instruction that describes the task:
### Input:
Creates a new pypet leaf instance.
Returns the leaf and if it is an explored parameter the length of the range.
### Response:
def _tree_create_leaf(self, name, trajectory, hdf5_group):
""" Creates a new pypet leaf instance.
Returns the leaf and if it is an explored parameter the length of the range.
"""
class_name = self._all_get_from_attrs(hdf5_group, HDF5StorageService.CLASS_NAME)
# Create the instance with the appropriate constructor
class_constructor = trajectory._create_class(class_name)
instance = trajectory._construct_instance(class_constructor, name)
return instance |
def _initfile(path, data="dict"):
"""Initialize an empty JSON file."""
data = {} if data.lower() == "dict" else []
# The file will need to be created if it doesn't exist
if not os.path.exists(path): # The file doesn't exist
# Raise exception if the directory that should contain the file doesn't
# exist
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
raise IOError(
("Could not initialize empty JSON file in non-existant "
"directory '{}'").format(os.path.dirname(path))
)
# Write an empty file there
with open(path, "w") as f:
json.dump(data, f)
return True
elif os.path.getsize(path) == 0: # The file is empty
with open(path, "w") as f:
json.dump(data, f)
else: # The file exists and contains content
return False | Initialize an empty JSON file. | Below is the the instruction that describes the task:
### Input:
Initialize an empty JSON file.
### Response:
def _initfile(path, data="dict"):
"""Initialize an empty JSON file."""
data = {} if data.lower() == "dict" else []
# The file will need to be created if it doesn't exist
if not os.path.exists(path): # The file doesn't exist
# Raise exception if the directory that should contain the file doesn't
# exist
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
raise IOError(
("Could not initialize empty JSON file in non-existant "
"directory '{}'").format(os.path.dirname(path))
)
# Write an empty file there
with open(path, "w") as f:
json.dump(data, f)
return True
elif os.path.getsize(path) == 0: # The file is empty
with open(path, "w") as f:
json.dump(data, f)
else: # The file exists and contains content
return False |
def renderHTTP(self, context):
"""
Check to see if the wrapped resource wants to be rendered over HTTPS
and generate a redirect if this is so, if HTTPS is available, and if
the request is not already over HTTPS.
"""
if getattr(self.wrappedResource, 'needsSecure', False):
request = IRequest(context)
url = self.urlGenerator.encryptedRoot()
if url is not None:
for seg in request.prepath:
url = url.child(seg)
return url
return self.wrappedResource.renderHTTP(context) | Check to see if the wrapped resource wants to be rendered over HTTPS
and generate a redirect if this is so, if HTTPS is available, and if
the request is not already over HTTPS. | Below is the the instruction that describes the task:
### Input:
Check to see if the wrapped resource wants to be rendered over HTTPS
and generate a redirect if this is so, if HTTPS is available, and if
the request is not already over HTTPS.
### Response:
def renderHTTP(self, context):
"""
Check to see if the wrapped resource wants to be rendered over HTTPS
and generate a redirect if this is so, if HTTPS is available, and if
the request is not already over HTTPS.
"""
if getattr(self.wrappedResource, 'needsSecure', False):
request = IRequest(context)
url = self.urlGenerator.encryptedRoot()
if url is not None:
for seg in request.prepath:
url = url.child(seg)
return url
return self.wrappedResource.renderHTTP(context) |
def persist(self, name, project=None, drop_model=False, **kwargs):
"""
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
"""
return super(ODPSModelExpr, self).persist(name, project=project, drop_model=drop_model, **kwargs) | Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation | Below is the the instruction that describes the task:
### Input:
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
### Response:
def persist(self, name, project=None, drop_model=False, **kwargs):
"""
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
"""
return super(ODPSModelExpr, self).persist(name, project=project, drop_model=drop_model, **kwargs) |
def parse_creation_info(self, ci_term):
"""
Parse creators, created and comment.
"""
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['creator'], None)):
try:
ent = self.builder.create_entity(self.doc, six.text_type(o))
self.builder.add_creator(self.doc, ent)
except SPDXValueError:
self.value_error('CREATOR_VALUE', o)
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['created'], None)):
try:
self.builder.set_created_date(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('CREATED_VALUE', o)
except CardinalityError:
self.more_than_one_error('created')
break
for _s, _p, o in self.graph.triples((ci_term, RDFS.comment, None)):
try:
self.builder.set_creation_comment(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('CreationInfo comment')
break
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['licenseListVersion'], None)):
try:
self.builder.set_lics_list_ver(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('licenseListVersion')
break
except SPDXValueError:
self.value_error('LL_VALUE', o) | Parse creators, created and comment. | Below is the the instruction that describes the task:
### Input:
Parse creators, created and comment.
### Response:
def parse_creation_info(self, ci_term):
"""
Parse creators, created and comment.
"""
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['creator'], None)):
try:
ent = self.builder.create_entity(self.doc, six.text_type(o))
self.builder.add_creator(self.doc, ent)
except SPDXValueError:
self.value_error('CREATOR_VALUE', o)
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['created'], None)):
try:
self.builder.set_created_date(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('CREATED_VALUE', o)
except CardinalityError:
self.more_than_one_error('created')
break
for _s, _p, o in self.graph.triples((ci_term, RDFS.comment, None)):
try:
self.builder.set_creation_comment(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('CreationInfo comment')
break
for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['licenseListVersion'], None)):
try:
self.builder.set_lics_list_ver(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('licenseListVersion')
break
except SPDXValueError:
self.value_error('LL_VALUE', o) |
def visitNodeConstraintValueSet(self, ctx: ShExDocParser.NodeConstraintValueSetContext):
""" nodeConstraint: valueSet xsFacet* #nodeConstraintValueSet """
self.nodeconstraint.values = []
self.visitChildren(ctx) | nodeConstraint: valueSet xsFacet* #nodeConstraintValueSet | Below is the the instruction that describes the task:
### Input:
nodeConstraint: valueSet xsFacet* #nodeConstraintValueSet
### Response:
def visitNodeConstraintValueSet(self, ctx: ShExDocParser.NodeConstraintValueSetContext):
""" nodeConstraint: valueSet xsFacet* #nodeConstraintValueSet """
self.nodeconstraint.values = []
self.visitChildren(ctx) |
def try_write(wd_item, record_id, record_prop, login, edit_summary='', write=True):
"""
Write a PBB_core item. Log if item was created, updated, or skipped.
Catch and log all errors.
:param wd_item: A wikidata item that will be written
:type wd_item: PBB_Core.WDItemEngine
:param record_id: An external identifier, to be used for logging
:type record_id: str
:param record_prop: Property of the external identifier
:type record_prop: str
:param login: PBB_core login instance
:type login: PBB_login.WDLogin
:param edit_summary: passed directly to wd_item.write
:type edit_summary: str
:param write: If `False`, do not actually perform write. Action will be logged as if write had occured
:type write: bool
:return: True if write did not throw an exception, returns the exception otherwise
"""
if wd_item.require_write:
if wd_item.create_new_item:
msg = "CREATE"
else:
msg = "UPDATE"
else:
msg = "SKIP"
try:
if write:
wd_item.write(login=login, edit_summary=edit_summary)
wdi_core.WDItemEngine.log("INFO", format_msg(record_id, record_prop, wd_item.wd_item_id, msg) + ";" + str(
wd_item.lastrevid))
except wdi_core.WDApiError as e:
print(e)
wdi_core.WDItemEngine.log("ERROR",
format_msg(record_id, record_prop, wd_item.wd_item_id, json.dumps(e.wd_error_msg),
type(e)))
return e
except Exception as e:
print(e)
wdi_core.WDItemEngine.log("ERROR", format_msg(record_id, record_prop, wd_item.wd_item_id, str(e), type(e)))
return e
return True | Write a PBB_core item. Log if item was created, updated, or skipped.
Catch and log all errors.
:param wd_item: A wikidata item that will be written
:type wd_item: PBB_Core.WDItemEngine
:param record_id: An external identifier, to be used for logging
:type record_id: str
:param record_prop: Property of the external identifier
:type record_prop: str
:param login: PBB_core login instance
:type login: PBB_login.WDLogin
:param edit_summary: passed directly to wd_item.write
:type edit_summary: str
:param write: If `False`, do not actually perform write. Action will be logged as if write had occured
:type write: bool
:return: True if write did not throw an exception, returns the exception otherwise | Below is the the instruction that describes the task:
### Input:
Write a PBB_core item. Log if item was created, updated, or skipped.
Catch and log all errors.
:param wd_item: A wikidata item that will be written
:type wd_item: PBB_Core.WDItemEngine
:param record_id: An external identifier, to be used for logging
:type record_id: str
:param record_prop: Property of the external identifier
:type record_prop: str
:param login: PBB_core login instance
:type login: PBB_login.WDLogin
:param edit_summary: passed directly to wd_item.write
:type edit_summary: str
:param write: If `False`, do not actually perform write. Action will be logged as if write had occured
:type write: bool
:return: True if write did not throw an exception, returns the exception otherwise
### Response:
def try_write(wd_item, record_id, record_prop, login, edit_summary='', write=True):
"""
Write a PBB_core item. Log if item was created, updated, or skipped.
Catch and log all errors.
:param wd_item: A wikidata item that will be written
:type wd_item: PBB_Core.WDItemEngine
:param record_id: An external identifier, to be used for logging
:type record_id: str
:param record_prop: Property of the external identifier
:type record_prop: str
:param login: PBB_core login instance
:type login: PBB_login.WDLogin
:param edit_summary: passed directly to wd_item.write
:type edit_summary: str
:param write: If `False`, do not actually perform write. Action will be logged as if write had occured
:type write: bool
:return: True if write did not throw an exception, returns the exception otherwise
"""
if wd_item.require_write:
if wd_item.create_new_item:
msg = "CREATE"
else:
msg = "UPDATE"
else:
msg = "SKIP"
try:
if write:
wd_item.write(login=login, edit_summary=edit_summary)
wdi_core.WDItemEngine.log("INFO", format_msg(record_id, record_prop, wd_item.wd_item_id, msg) + ";" + str(
wd_item.lastrevid))
except wdi_core.WDApiError as e:
print(e)
wdi_core.WDItemEngine.log("ERROR",
format_msg(record_id, record_prop, wd_item.wd_item_id, json.dumps(e.wd_error_msg),
type(e)))
return e
except Exception as e:
print(e)
wdi_core.WDItemEngine.log("ERROR", format_msg(record_id, record_prop, wd_item.wd_item_id, str(e), type(e)))
return e
return True |
def add_psk(self, **kwargs):
"""Add"""
api = self._get_api(bootstrap.PreSharedKeysApi)
item = PreSharedKey._create_request_map(kwargs)
item = models.PreSharedKey(**item)
api.upload_pre_shared_key(item)
return PreSharedKey(item) | Add | Below is the the instruction that describes the task:
### Input:
Add
### Response:
def add_psk(self, **kwargs):
"""Add"""
api = self._get_api(bootstrap.PreSharedKeysApi)
item = PreSharedKey._create_request_map(kwargs)
item = models.PreSharedKey(**item)
api.upload_pre_shared_key(item)
return PreSharedKey(item) |
def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for timedeltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'P{td.days}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S' | ISO 8601 encoding for timedeltas. | Below is the the instruction that describes the task:
### Input:
ISO 8601 encoding for timedeltas.
### Response:
def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for timedeltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'P{td.days}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S' |
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_) | Wrap the deprecated function to print out deprecation warnings | Below is the the instruction that describes the task:
### Input:
Wrap the deprecated function to print out deprecation warnings
### Response:
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_) |
def update_countries_geo_zone_by_id(cls, countries_geo_zone_id, countries_geo_zone, **kwargs):
"""Update CountriesGeoZone
Update attributes of CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_countries_geo_zone_by_id(countries_geo_zone_id, countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to update. (required)
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to update. (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
else:
(data) = cls._update_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
return data | Update CountriesGeoZone
Update attributes of CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_countries_geo_zone_by_id(countries_geo_zone_id, countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to update. (required)
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to update. (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Update CountriesGeoZone
Update attributes of CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_countries_geo_zone_by_id(countries_geo_zone_id, countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to update. (required)
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to update. (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread.
### Response:
def update_countries_geo_zone_by_id(cls, countries_geo_zone_id, countries_geo_zone, **kwargs):
"""Update CountriesGeoZone
Update attributes of CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_countries_geo_zone_by_id(countries_geo_zone_id, countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to update. (required)
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to update. (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
else:
(data) = cls._update_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
return data |
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop()) | _handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line. | Below is the the instruction that describes the task:
### Input:
_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
### Response:
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop()) |
def process_pfa_results(network, pypsa, timesteps):
"""
Assing values from PyPSA to
:meth:`results <edisgo.grid.network.Network.results>`
Parameters
----------
network : Network
The eDisGo grid topology model overall container
pypsa : :pypsa:`pypsa.Network<network>`
The PyPSA `Network container
<https://www.pypsa.org/doc/components.html#network>`_
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Time steps for which latest power flow analysis was conducted for and
for which to retrieve pypsa results.
Notes
-----
P and Q (and respectively later S) are returned from the line ending/
transformer side with highest apparent power S, exemplary written as
.. math::
S_{max} = max(\sqrt{P0^2 + Q0^2}, \sqrt{P1^2 + Q1^2})
P = P0P1(S_{max})
Q = Q0Q1(S_{max})
See Also
--------
:class:`~.grid.network.Results`
Understand how results of power flow analysis are structured in eDisGo.
"""
# get the absolute losses in the system
# subtracting total generation (including slack) from total load
grid_losses = {'p': 1e3 * (pypsa.generators_t['p'].sum(axis=1) -
pypsa.loads_t['p'].sum(axis=1)),
'q': 1e3 * (pypsa.generators_t['q'].sum(axis=1) -
pypsa.loads_t['q'].sum(axis=1))}
network.results.grid_losses = pd.DataFrame(grid_losses).loc[timesteps, :]
# get slack results
grid_exchanges = {'p': 1e3 * (pypsa.generators_t['p']['Generator_slack']),
'q': 1e3 * (pypsa.generators_t['q']['Generator_slack'])}
network.results.hv_mv_exchanges = pd.DataFrame(grid_exchanges).loc[timesteps, :]
# get p and q of lines, LV transformers and MV Station (slack generator)
# in absolute values
q0 = pd.concat(
[np.abs(pypsa.lines_t['q0']),
np.abs(pypsa.transformers_t['q0']),
np.abs(pypsa.generators_t['q']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
q1 = pd.concat(
[np.abs(pypsa.lines_t['q1']),
np.abs(pypsa.transformers_t['q1']),
np.abs(pypsa.generators_t['q']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
p0 = pd.concat(
[np.abs(pypsa.lines_t['p0']),
np.abs(pypsa.transformers_t['p0']),
np.abs(pypsa.generators_t['p']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
p1 = pd.concat(
[np.abs(pypsa.lines_t['p1']),
np.abs(pypsa.transformers_t['p1']),
np.abs(pypsa.generators_t['p']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
# determine apparent power and line endings/transformers' side
s0 = np.hypot(p0, q0)
s1 = np.hypot(p1, q1)
# choose p and q from line ending with max(s0,s1)
network.results.pfa_p = p0.where(s0 > s1, p1) * 1e3
network.results.pfa_q = q0.where(s0 > s1, q1) * 1e3
lines_bus0 = pypsa.lines['bus0'].to_dict()
bus0_v_mag_pu = pypsa.buses_t['v_mag_pu'].T.loc[
list(lines_bus0.values()), :].copy()
bus0_v_mag_pu.index = list(lines_bus0.keys())
lines_bus1 = pypsa.lines['bus1'].to_dict()
bus1_v_mag_pu = pypsa.buses_t['v_mag_pu'].T.loc[
list(lines_bus1.values()), :].copy()
bus1_v_mag_pu.index = list(lines_bus1.keys())
line_voltage_avg = 0.5 * (bus0_v_mag_pu.loc[:, timesteps] +
bus1_v_mag_pu.loc[:, timesteps])
# Get voltage levels at line (avg. of buses at both sides)
network.results._i_res = \
network.results.s_res()[pypsa.lines_t['q0'].columns].truediv(
pypsa.lines['v_nom'] * line_voltage_avg.T,
axis='columns') * sqrt(3)
# process results at nodes
generators_names = [repr(g) for g in network.mv_grid.generators]
generators_mapping = {v: k for k, v in
pypsa.generators.loc[generators_names][
'bus'].to_dict().items()}
storages_names = [repr(g) for g in
network.mv_grid.graph.nodes_by_attribute('storage')]
storages_mapping = {v: k for k, v in
pypsa.storage_units.loc[storages_names][
'bus'].to_dict().items()}
branch_t_names = [repr(bt) for bt in
network.mv_grid.graph.nodes_by_attribute('branch_tee')]
branch_t_mapping = {'_'.join(['Bus', v]): v for v in branch_t_names}
mv_station_names = [repr(m) for m in
network.mv_grid.graph.nodes_by_attribute('mv_station')]
mv_station_mapping_sec = {'_'.join(['Bus', v]): v for v in
mv_station_names}
mv_switch_disconnector_names = [repr(sd) for sd in
network.mv_grid.graph.nodes_by_attribute(
'mv_disconnecting_point')]
mv_switch_disconnector_mapping = {'_'.join(['Bus', v]): v for v in
mv_switch_disconnector_names}
lv_station_mapping_pri = {
'_'.join(['Bus', l.__repr__('mv')]): repr(l)
for l in network.mv_grid.graph.nodes_by_attribute('lv_station')}
lv_station_mapping_sec = {
'_'.join(['Bus', l.__repr__('lv')]): repr(l)
for l in network.mv_grid.graph.nodes_by_attribute('lv_station')}
loads_names = [repr(lo) for lo in
network.mv_grid.graph.nodes_by_attribute('load')]
loads_mapping = {v: k for k, v in
pypsa.loads.loc[loads_names][
'bus'].to_dict().items()}
lv_generators_names = []
lv_storages_names = []
lv_branch_t_names = []
lv_loads_names = []
for lv_grid in network.mv_grid.lv_grids:
lv_generators_names.extend([repr(g) for g in
lv_grid.graph.nodes_by_attribute(
'generator')])
lv_storages_names.extend([repr(g) for g in
lv_grid.graph.nodes_by_attribute(
'storage')])
lv_branch_t_names.extend([repr(bt) for bt in
lv_grid.graph.nodes_by_attribute('branch_tee')])
lv_loads_names.extend([repr(lo) for lo in
lv_grid.graph.nodes_by_attribute('load')])
lv_generators_mapping = {v: k for k, v in
pypsa.generators.loc[lv_generators_names][
'bus'].to_dict().items()}
lv_storages_mapping = {v: k for k, v in
pypsa.storage_units.loc[lv_storages_names][
'bus'].to_dict().items()}
lv_branch_t_mapping = {'_'.join(['Bus', v]): v for v in lv_branch_t_names}
lv_loads_mapping = {v: k for k, v in pypsa.loads.loc[lv_loads_names][
'bus'].to_dict().items()}
names_mapping = {
**generators_mapping,
**storages_mapping,
**branch_t_mapping,
**mv_station_mapping_sec,
**lv_station_mapping_pri,
**lv_station_mapping_sec,
**mv_switch_disconnector_mapping,
**loads_mapping,
**lv_generators_mapping,
**lv_storages_mapping,
**lv_loads_mapping,
**lv_branch_t_mapping
}
# write voltage levels obtained from power flow to results object
pfa_v_mag_pu_mv = (pypsa.buses_t['v_mag_pu'][
list(generators_mapping) +
list(storages_mapping) +
list(branch_t_mapping) +
list(mv_station_mapping_sec) +
list(mv_switch_disconnector_mapping) +
list(lv_station_mapping_pri) +
list(loads_mapping)]).rename(columns=names_mapping)
pfa_v_mag_pu_lv = (pypsa.buses_t['v_mag_pu'][
list(lv_station_mapping_sec) +
list(lv_generators_mapping) +
list(lv_storages_mapping) +
list(lv_branch_t_mapping) +
list(lv_loads_mapping)]).rename(columns=names_mapping)
network.results.pfa_v_mag_pu = pd.concat(
{'mv': pfa_v_mag_pu_mv.loc[timesteps, :],
'lv': pfa_v_mag_pu_lv.loc[timesteps, :]}, axis=1) | Assing values from PyPSA to
:meth:`results <edisgo.grid.network.Network.results>`
Parameters
----------
network : Network
The eDisGo grid topology model overall container
pypsa : :pypsa:`pypsa.Network<network>`
The PyPSA `Network container
<https://www.pypsa.org/doc/components.html#network>`_
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Time steps for which latest power flow analysis was conducted for and
for which to retrieve pypsa results.
Notes
-----
P and Q (and respectively later S) are returned from the line ending/
transformer side with highest apparent power S, exemplary written as
.. math::
S_{max} = max(\sqrt{P0^2 + Q0^2}, \sqrt{P1^2 + Q1^2})
P = P0P1(S_{max})
Q = Q0Q1(S_{max})
See Also
--------
:class:`~.grid.network.Results`
Understand how results of power flow analysis are structured in eDisGo. | Below is the the instruction that describes the task:
### Input:
Assing values from PyPSA to
:meth:`results <edisgo.grid.network.Network.results>`
Parameters
----------
network : Network
The eDisGo grid topology model overall container
pypsa : :pypsa:`pypsa.Network<network>`
The PyPSA `Network container
<https://www.pypsa.org/doc/components.html#network>`_
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Time steps for which latest power flow analysis was conducted for and
for which to retrieve pypsa results.
Notes
-----
P and Q (and respectively later S) are returned from the line ending/
transformer side with highest apparent power S, exemplary written as
.. math::
S_{max} = max(\sqrt{P0^2 + Q0^2}, \sqrt{P1^2 + Q1^2})
P = P0P1(S_{max})
Q = Q0Q1(S_{max})
See Also
--------
:class:`~.grid.network.Results`
Understand how results of power flow analysis are structured in eDisGo.
### Response:
def process_pfa_results(network, pypsa, timesteps):
"""
Assing values from PyPSA to
:meth:`results <edisgo.grid.network.Network.results>`
Parameters
----------
network : Network
The eDisGo grid topology model overall container
pypsa : :pypsa:`pypsa.Network<network>`
The PyPSA `Network container
<https://www.pypsa.org/doc/components.html#network>`_
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Time steps for which latest power flow analysis was conducted for and
for which to retrieve pypsa results.
Notes
-----
P and Q (and respectively later S) are returned from the line ending/
transformer side with highest apparent power S, exemplary written as
.. math::
S_{max} = max(\sqrt{P0^2 + Q0^2}, \sqrt{P1^2 + Q1^2})
P = P0P1(S_{max})
Q = Q0Q1(S_{max})
See Also
--------
:class:`~.grid.network.Results`
Understand how results of power flow analysis are structured in eDisGo.
"""
# get the absolute losses in the system
# subtracting total generation (including slack) from total load
grid_losses = {'p': 1e3 * (pypsa.generators_t['p'].sum(axis=1) -
pypsa.loads_t['p'].sum(axis=1)),
'q': 1e3 * (pypsa.generators_t['q'].sum(axis=1) -
pypsa.loads_t['q'].sum(axis=1))}
network.results.grid_losses = pd.DataFrame(grid_losses).loc[timesteps, :]
# get slack results
grid_exchanges = {'p': 1e3 * (pypsa.generators_t['p']['Generator_slack']),
'q': 1e3 * (pypsa.generators_t['q']['Generator_slack'])}
network.results.hv_mv_exchanges = pd.DataFrame(grid_exchanges).loc[timesteps, :]
# get p and q of lines, LV transformers and MV Station (slack generator)
# in absolute values
q0 = pd.concat(
[np.abs(pypsa.lines_t['q0']),
np.abs(pypsa.transformers_t['q0']),
np.abs(pypsa.generators_t['q']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
q1 = pd.concat(
[np.abs(pypsa.lines_t['q1']),
np.abs(pypsa.transformers_t['q1']),
np.abs(pypsa.generators_t['q']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
p0 = pd.concat(
[np.abs(pypsa.lines_t['p0']),
np.abs(pypsa.transformers_t['p0']),
np.abs(pypsa.generators_t['p']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
p1 = pd.concat(
[np.abs(pypsa.lines_t['p1']),
np.abs(pypsa.transformers_t['p1']),
np.abs(pypsa.generators_t['p']['Generator_slack'].rename(
repr(network.mv_grid.station)))], axis=1).loc[timesteps, :]
# determine apparent power and line endings/transformers' side
s0 = np.hypot(p0, q0)
s1 = np.hypot(p1, q1)
# choose p and q from line ending with max(s0,s1)
network.results.pfa_p = p0.where(s0 > s1, p1) * 1e3
network.results.pfa_q = q0.where(s0 > s1, q1) * 1e3
lines_bus0 = pypsa.lines['bus0'].to_dict()
bus0_v_mag_pu = pypsa.buses_t['v_mag_pu'].T.loc[
list(lines_bus0.values()), :].copy()
bus0_v_mag_pu.index = list(lines_bus0.keys())
lines_bus1 = pypsa.lines['bus1'].to_dict()
bus1_v_mag_pu = pypsa.buses_t['v_mag_pu'].T.loc[
list(lines_bus1.values()), :].copy()
bus1_v_mag_pu.index = list(lines_bus1.keys())
line_voltage_avg = 0.5 * (bus0_v_mag_pu.loc[:, timesteps] +
bus1_v_mag_pu.loc[:, timesteps])
# Get voltage levels at line (avg. of buses at both sides)
network.results._i_res = \
network.results.s_res()[pypsa.lines_t['q0'].columns].truediv(
pypsa.lines['v_nom'] * line_voltage_avg.T,
axis='columns') * sqrt(3)
# process results at nodes
generators_names = [repr(g) for g in network.mv_grid.generators]
generators_mapping = {v: k for k, v in
pypsa.generators.loc[generators_names][
'bus'].to_dict().items()}
storages_names = [repr(g) for g in
network.mv_grid.graph.nodes_by_attribute('storage')]
storages_mapping = {v: k for k, v in
pypsa.storage_units.loc[storages_names][
'bus'].to_dict().items()}
branch_t_names = [repr(bt) for bt in
network.mv_grid.graph.nodes_by_attribute('branch_tee')]
branch_t_mapping = {'_'.join(['Bus', v]): v for v in branch_t_names}
mv_station_names = [repr(m) for m in
network.mv_grid.graph.nodes_by_attribute('mv_station')]
mv_station_mapping_sec = {'_'.join(['Bus', v]): v for v in
mv_station_names}
mv_switch_disconnector_names = [repr(sd) for sd in
network.mv_grid.graph.nodes_by_attribute(
'mv_disconnecting_point')]
mv_switch_disconnector_mapping = {'_'.join(['Bus', v]): v for v in
mv_switch_disconnector_names}
lv_station_mapping_pri = {
'_'.join(['Bus', l.__repr__('mv')]): repr(l)
for l in network.mv_grid.graph.nodes_by_attribute('lv_station')}
lv_station_mapping_sec = {
'_'.join(['Bus', l.__repr__('lv')]): repr(l)
for l in network.mv_grid.graph.nodes_by_attribute('lv_station')}
loads_names = [repr(lo) for lo in
network.mv_grid.graph.nodes_by_attribute('load')]
loads_mapping = {v: k for k, v in
pypsa.loads.loc[loads_names][
'bus'].to_dict().items()}
lv_generators_names = []
lv_storages_names = []
lv_branch_t_names = []
lv_loads_names = []
for lv_grid in network.mv_grid.lv_grids:
lv_generators_names.extend([repr(g) for g in
lv_grid.graph.nodes_by_attribute(
'generator')])
lv_storages_names.extend([repr(g) for g in
lv_grid.graph.nodes_by_attribute(
'storage')])
lv_branch_t_names.extend([repr(bt) for bt in
lv_grid.graph.nodes_by_attribute('branch_tee')])
lv_loads_names.extend([repr(lo) for lo in
lv_grid.graph.nodes_by_attribute('load')])
lv_generators_mapping = {v: k for k, v in
pypsa.generators.loc[lv_generators_names][
'bus'].to_dict().items()}
lv_storages_mapping = {v: k for k, v in
pypsa.storage_units.loc[lv_storages_names][
'bus'].to_dict().items()}
lv_branch_t_mapping = {'_'.join(['Bus', v]): v for v in lv_branch_t_names}
lv_loads_mapping = {v: k for k, v in pypsa.loads.loc[lv_loads_names][
'bus'].to_dict().items()}
names_mapping = {
**generators_mapping,
**storages_mapping,
**branch_t_mapping,
**mv_station_mapping_sec,
**lv_station_mapping_pri,
**lv_station_mapping_sec,
**mv_switch_disconnector_mapping,
**loads_mapping,
**lv_generators_mapping,
**lv_storages_mapping,
**lv_loads_mapping,
**lv_branch_t_mapping
}
# write voltage levels obtained from power flow to results object
pfa_v_mag_pu_mv = (pypsa.buses_t['v_mag_pu'][
list(generators_mapping) +
list(storages_mapping) +
list(branch_t_mapping) +
list(mv_station_mapping_sec) +
list(mv_switch_disconnector_mapping) +
list(lv_station_mapping_pri) +
list(loads_mapping)]).rename(columns=names_mapping)
pfa_v_mag_pu_lv = (pypsa.buses_t['v_mag_pu'][
list(lv_station_mapping_sec) +
list(lv_generators_mapping) +
list(lv_storages_mapping) +
list(lv_branch_t_mapping) +
list(lv_loads_mapping)]).rename(columns=names_mapping)
network.results.pfa_v_mag_pu = pd.concat(
{'mv': pfa_v_mag_pu_mv.loc[timesteps, :],
'lv': pfa_v_mag_pu_lv.loc[timesteps, :]}, axis=1) |
def playlist_category(self):
"""doc: http://open.youku.com/docs/doc?id=94
"""
url = 'https://openapi.youku.com/v2/schemas/playlist/category.json'
r = requests.get(url)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=94 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=94
### Response:
def playlist_category(self):
"""doc: http://open.youku.com/docs/doc?id=94
"""
url = 'https://openapi.youku.com/v2/schemas/playlist/category.json'
r = requests.get(url)
check_error(r)
return r.json() |
def cli(env, identifier, sortby, cpu, domain, hostname, memory, tag, columns):
"""List guests which are in a dedicated host server."""
mgr = SoftLayer.DedicatedHostManager(env.client)
guests = mgr.list_guests(host_id=identifier,
cpus=cpu,
hostname=hostname,
domain=domain,
memory=memory,
tags=tag,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for guest in guests:
table.add_row([value or formatting.blank()
for value in columns.row(guest)])
env.fout(table) | List guests which are in a dedicated host server. | Below is the the instruction that describes the task:
### Input:
List guests which are in a dedicated host server.
### Response:
def cli(env, identifier, sortby, cpu, domain, hostname, memory, tag, columns):
"""List guests which are in a dedicated host server."""
mgr = SoftLayer.DedicatedHostManager(env.client)
guests = mgr.list_guests(host_id=identifier,
cpus=cpu,
hostname=hostname,
domain=domain,
memory=memory,
tags=tag,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for guest in guests:
table.add_row([value or formatting.blank()
for value in columns.row(guest)])
env.fout(table) |
def version():
"""Return version string."""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'pep8radius',
'main.py')) as input_file:
for line in input_file:
if line.startswith('__version__'):
return parse(line).body[0].value.s | Return version string. | Below is the the instruction that describes the task:
### Input:
Return version string.
### Response:
def version():
"""Return version string."""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'pep8radius',
'main.py')) as input_file:
for line in input_file:
if line.startswith('__version__'):
return parse(line).body[0].value.s |
def _resolve_requirements_chain(requirements):
'''
Return an array of requirements file paths that can be used to complete
the no_chown==False && user != None conundrum
'''
chain = []
if isinstance(requirements, six.string_types):
requirements = [requirements]
for req_file in requirements:
chain.append(req_file)
chain.extend(_resolve_requirements_chain(_find_req(req_file)))
return chain | Return an array of requirements file paths that can be used to complete
the no_chown==False && user != None conundrum | Below is the the instruction that describes the task:
### Input:
Return an array of requirements file paths that can be used to complete
the no_chown==False && user != None conundrum
### Response:
def _resolve_requirements_chain(requirements):
'''
Return an array of requirements file paths that can be used to complete
the no_chown==False && user != None conundrum
'''
chain = []
if isinstance(requirements, six.string_types):
requirements = [requirements]
for req_file in requirements:
chain.append(req_file)
chain.extend(_resolve_requirements_chain(_find_req(req_file)))
return chain |
def apply_environ(self):
"""Apply changes to target environ.
"""
if self.manager is None:
raise RezSystemError("You must call 'set_manager' on a Python rex "
"interpreter before using it.")
self.target_environ.update(self.manager.environ) | Apply changes to target environ. | Below is the the instruction that describes the task:
### Input:
Apply changes to target environ.
### Response:
def apply_environ(self):
"""Apply changes to target environ.
"""
if self.manager is None:
raise RezSystemError("You must call 'set_manager' on a Python rex "
"interpreter before using it.")
self.target_environ.update(self.manager.environ) |
def file_dict(*packages, **kwargs):
'''
.. versionchanged: 2016.3.0
List the files that belong to a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_dict nginx
salt '*' pkg.file_dict nginx varnish
'''
errors = []
files = {}
for package in packages:
cmd = ['pkg_info', '-qL', package]
ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
files[package] = []
for line in ret['stderr'].splitlines():
errors.append(line)
for line in ret['stdout'].splitlines():
if line.startswith('/'):
files[package].append(line)
else:
continue # unexpected string
ret = {'errors': errors, 'files': files}
for field in list(ret):
if not ret[field] or ret[field] == '':
del ret[field]
return ret | .. versionchanged: 2016.3.0
List the files that belong to a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_dict nginx
salt '*' pkg.file_dict nginx varnish | Below is the the instruction that describes the task:
### Input:
.. versionchanged: 2016.3.0
List the files that belong to a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_dict nginx
salt '*' pkg.file_dict nginx varnish
### Response:
def file_dict(*packages, **kwargs):
'''
.. versionchanged: 2016.3.0
List the files that belong to a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_dict nginx
salt '*' pkg.file_dict nginx varnish
'''
errors = []
files = {}
for package in packages:
cmd = ['pkg_info', '-qL', package]
ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
files[package] = []
for line in ret['stderr'].splitlines():
errors.append(line)
for line in ret['stdout'].splitlines():
if line.startswith('/'):
files[package].append(line)
else:
continue # unexpected string
ret = {'errors': errors, 'files': files}
for field in list(ret):
if not ret[field] or ret[field] == '':
del ret[field]
return ret |
def x11(self, data: ['SASdata', str] = None,
arima: str = None,
by: [str, list] = None,
id: [str, list] = None,
macurves: str = None,
monthly: str = None,
output: [str, bool, 'SASdata'] = None,
pdweights: str = None,
quarterly: str = None,
sspan: str = None,
tables: str = None,
var: str = None,
procopts: [str, list] = None,
stmtpassthrough: [str, list] = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the X11 procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_x11_syntax.htm&locale=en
Either the MONTHLY or QUARTERLY statement must be specified, depending on the type of time series data you have.
The PDWEIGHTS and MACURVES statements can be used only with the MONTHLY statement. The TABLES statement controls
the printing of tables, while the OUTPUT statement controls the creation of the OUT= data set.
:param data: SASdata object or string. This parameter is required.
:parm arima: The arima variable can only be a string type.
:parm by: The by variable can be a string or list type.
:parm id: The id variable can be a string or list type.
:parm macurves: The macurves variable can only be a string type.
:parm monthly: The monthly variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm pdweights: The pdweights variable can only be a string type.
:parm quarterly: The quarterly variable can only be a string type.
:parm sspan: The sspan variable can only be a string type.
:parm tables: The tables variable can only be a string type.
:parm var: The var variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | Python method to call the X11 procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_x11_syntax.htm&locale=en
Either the MONTHLY or QUARTERLY statement must be specified, depending on the type of time series data you have.
The PDWEIGHTS and MACURVES statements can be used only with the MONTHLY statement. The TABLES statement controls
the printing of tables, while the OUTPUT statement controls the creation of the OUT= data set.
:param data: SASdata object or string. This parameter is required.
:parm arima: The arima variable can only be a string type.
:parm by: The by variable can be a string or list type.
:parm id: The id variable can be a string or list type.
:parm macurves: The macurves variable can only be a string type.
:parm monthly: The monthly variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm pdweights: The pdweights variable can only be a string type.
:parm quarterly: The quarterly variable can only be a string type.
:parm sspan: The sspan variable can only be a string type.
:parm tables: The tables variable can only be a string type.
:parm var: The var variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object | Below is the the instruction that describes the task:
### Input:
Python method to call the X11 procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_x11_syntax.htm&locale=en
Either the MONTHLY or QUARTERLY statement must be specified, depending on the type of time series data you have.
The PDWEIGHTS and MACURVES statements can be used only with the MONTHLY statement. The TABLES statement controls
the printing of tables, while the OUTPUT statement controls the creation of the OUT= data set.
:param data: SASdata object or string. This parameter is required.
:parm arima: The arima variable can only be a string type.
:parm by: The by variable can be a string or list type.
:parm id: The id variable can be a string or list type.
:parm macurves: The macurves variable can only be a string type.
:parm monthly: The monthly variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm pdweights: The pdweights variable can only be a string type.
:parm quarterly: The quarterly variable can only be a string type.
:parm sspan: The sspan variable can only be a string type.
:parm tables: The tables variable can only be a string type.
:parm var: The var variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
### Response:
def x11(self, data: ['SASdata', str] = None,
arima: str = None,
by: [str, list] = None,
id: [str, list] = None,
macurves: str = None,
monthly: str = None,
output: [str, bool, 'SASdata'] = None,
pdweights: str = None,
quarterly: str = None,
sspan: str = None,
tables: str = None,
var: str = None,
procopts: [str, list] = None,
stmtpassthrough: [str, list] = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the X11 procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=etsug&docsetTarget=etsug_x11_syntax.htm&locale=en
Either the MONTHLY or QUARTERLY statement must be specified, depending on the type of time series data you have.
The PDWEIGHTS and MACURVES statements can be used only with the MONTHLY statement. The TABLES statement controls
the printing of tables, while the OUTPUT statement controls the creation of the OUT= data set.
:param data: SASdata object or string. This parameter is required.
:parm arima: The arima variable can only be a string type.
:parm by: The by variable can be a string or list type.
:parm id: The id variable can be a string or list type.
:parm macurves: The macurves variable can only be a string type.
:parm monthly: The monthly variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm pdweights: The pdweights variable can only be a string type.
:parm quarterly: The quarterly variable can only be a string type.
:parm sspan: The sspan variable can only be a string type.
:parm tables: The tables variable can only be a string type.
:parm var: The var variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" |
def get_friends(self):
"""Get user's friends."""
for k, v in iter(self.user_data.language_data.items()):
data = []
for friend in v['points_ranking_data']:
temp = {'username': friend['username'],
'id': friend['id'],
'points': friend['points_data']['total'],
'languages': [i['language_string'] for i in
friend['points_data']['languages']]}
data.append(temp)
return data | Get user's friends. | Below is the the instruction that describes the task:
### Input:
Get user's friends.
### Response:
def get_friends(self):
"""Get user's friends."""
for k, v in iter(self.user_data.language_data.items()):
data = []
for friend in v['points_ranking_data']:
temp = {'username': friend['username'],
'id': friend['id'],
'points': friend['points_data']['total'],
'languages': [i['language_string'] for i in
friend['points_data']['languages']]}
data.append(temp)
return data |
def _distance_covariance_sqr_naive(x, y, exponent=1):
"""
Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
a = _distance_matrix(x, exponent=exponent)
b = _distance_matrix(y, exponent=exponent)
return mean_product(a, b) | Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm. | Below is the the instruction that describes the task:
### Input:
Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
### Response:
def _distance_covariance_sqr_naive(x, y, exponent=1):
"""
Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
a = _distance_matrix(x, exponent=exponent)
b = _distance_matrix(y, exponent=exponent)
return mean_product(a, b) |
def cgi_method_is_post(environ: Dict[str, str]) -> bool:
"""
Determines if the CGI method was ``POST``, given the CGI environment.
"""
method = environ.get("REQUEST_METHOD", None)
if not method:
return False
return method.upper() == "POST" | Determines if the CGI method was ``POST``, given the CGI environment. | Below is the the instruction that describes the task:
### Input:
Determines if the CGI method was ``POST``, given the CGI environment.
### Response:
def cgi_method_is_post(environ: Dict[str, str]) -> bool:
"""
Determines if the CGI method was ``POST``, given the CGI environment.
"""
method = environ.get("REQUEST_METHOD", None)
if not method:
return False
return method.upper() == "POST" |
def _role_present(ret, IdentityPoolId, AuthenticatedRole, UnauthenticatedRole, conn_params):
'''
Helper function to set the Roles to the identity pool
'''
r = __salt__['boto_cognitoidentity.get_identity_pool_roles'](IdentityPoolName='',
IdentityPoolId=IdentityPoolId,
**conn_params)
if r.get('error'):
ret['result'] = False
failure_comment = ('Failed to get existing identity pool roles: '
'{0}'.format(r['error'].get('message', r['error'])))
ret['comment'] = '{0}\n{1}'.format(ret['comment'], failure_comment)
return
existing_identity_pool_role = r.get('identity_pool_roles')[0].get('Roles', {})
r = __salt__['boto_cognitoidentity.set_identity_pool_roles'](IdentityPoolId=IdentityPoolId,
AuthenticatedRole=AuthenticatedRole,
UnauthenticatedRole=UnauthenticatedRole,
**conn_params)
if not r.get('set'):
ret['result'] = False
failure_comment = ('Failed to set roles: '
'{0}'.format(r['error'].get('message', r['error'])))
ret['comment'] = '{0}\n{1}'.format(ret['comment'], failure_comment)
return
updated_identity_pool_role = r.get('roles')
if existing_identity_pool_role != updated_identity_pool_role:
if not ret['changes']:
ret['changes']['old'] = dict()
ret['changes']['new'] = dict()
ret['changes']['old']['Roles'] = existing_identity_pool_role
ret['changes']['new']['Roles'] = r.get('roles')
ret['comment'] = ('{0}\n{1}'.format(ret['comment'], 'identity pool roles updated.'))
else:
ret['comment'] = ('{0}\n{1}'.format(ret['comment'], 'identity pool roles is already current.'))
return | Helper function to set the Roles to the identity pool | Below is the the instruction that describes the task:
### Input:
Helper function to set the Roles to the identity pool
### Response:
def _role_present(ret, IdentityPoolId, AuthenticatedRole, UnauthenticatedRole, conn_params):
'''
Helper function to set the Roles to the identity pool
'''
r = __salt__['boto_cognitoidentity.get_identity_pool_roles'](IdentityPoolName='',
IdentityPoolId=IdentityPoolId,
**conn_params)
if r.get('error'):
ret['result'] = False
failure_comment = ('Failed to get existing identity pool roles: '
'{0}'.format(r['error'].get('message', r['error'])))
ret['comment'] = '{0}\n{1}'.format(ret['comment'], failure_comment)
return
existing_identity_pool_role = r.get('identity_pool_roles')[0].get('Roles', {})
r = __salt__['boto_cognitoidentity.set_identity_pool_roles'](IdentityPoolId=IdentityPoolId,
AuthenticatedRole=AuthenticatedRole,
UnauthenticatedRole=UnauthenticatedRole,
**conn_params)
if not r.get('set'):
ret['result'] = False
failure_comment = ('Failed to set roles: '
'{0}'.format(r['error'].get('message', r['error'])))
ret['comment'] = '{0}\n{1}'.format(ret['comment'], failure_comment)
return
updated_identity_pool_role = r.get('roles')
if existing_identity_pool_role != updated_identity_pool_role:
if not ret['changes']:
ret['changes']['old'] = dict()
ret['changes']['new'] = dict()
ret['changes']['old']['Roles'] = existing_identity_pool_role
ret['changes']['new']['Roles'] = r.get('roles')
ret['comment'] = ('{0}\n{1}'.format(ret['comment'], 'identity pool roles updated.'))
else:
ret['comment'] = ('{0}\n{1}'.format(ret['comment'], 'identity pool roles is already current.'))
return |
def add(self, word):
"""Add a word to the dictionary"""
if not word or word.strip() == '':
return
self.words[word]=word | Add a word to the dictionary | Below is the the instruction that describes the task:
### Input:
Add a word to the dictionary
### Response:
def add(self, word):
"""Add a word to the dictionary"""
if not word or word.strip() == '':
return
self.words[word]=word |
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key)) | Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0 | Below is the the instruction that describes the task:
### Input:
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
### Response:
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key)) |
def assign_method(stochastic, scale=None, verbose=-1):
"""
Returns a step method instance to handle a
variable. If several methods have the same competence,
it picks one arbitrarily (using set.pop()).
"""
# Retrieve set of best candidates
best_candidates = pick_best_methods(stochastic)
# Randomly grab and appropriate method
method = best_candidates.pop()
failure_header = """Failed attempting to automatically assign step method class %s
to stochastic variable %s. Try setting %s's competence method to return 0
and manually assigning it when appropriate. See the user guide.
Error message: """ % (method.__name__, stochastic.__name__, method.__name__)
try:
if scale:
out = method(stochastic, scale=scale, verbose=verbose)
else:
out = method(stochastic, verbose=verbose)
except:
a, b, c = sys.exc_info()
try:
args = list(b.args)
except AttributeError:
args = []
args.append(failure_header)
b.args = args
six.reraise(a, b, c)
return out | Returns a step method instance to handle a
variable. If several methods have the same competence,
it picks one arbitrarily (using set.pop()). | Below is the the instruction that describes the task:
### Input:
Returns a step method instance to handle a
variable. If several methods have the same competence,
it picks one arbitrarily (using set.pop()).
### Response:
def assign_method(stochastic, scale=None, verbose=-1):
"""
Returns a step method instance to handle a
variable. If several methods have the same competence,
it picks one arbitrarily (using set.pop()).
"""
# Retrieve set of best candidates
best_candidates = pick_best_methods(stochastic)
# Randomly grab and appropriate method
method = best_candidates.pop()
failure_header = """Failed attempting to automatically assign step method class %s
to stochastic variable %s. Try setting %s's competence method to return 0
and manually assigning it when appropriate. See the user guide.
Error message: """ % (method.__name__, stochastic.__name__, method.__name__)
try:
if scale:
out = method(stochastic, scale=scale, verbose=verbose)
else:
out = method(stochastic, verbose=verbose)
except:
a, b, c = sys.exc_info()
try:
args = list(b.args)
except AttributeError:
args = []
args.append(failure_header)
b.args = args
six.reraise(a, b, c)
return out |
def set_url(self, url=None):
"""Sets the url.
:param url: the new copyright
:type url: ``string``
:raise: ``InvalidArgument`` -- ``url`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if url is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['url'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(url, metadata, array=False):
self._my_map['url'] = url
else:
raise InvalidArgument() | Sets the url.
:param url: the new copyright
:type url: ``string``
:raise: ``InvalidArgument`` -- ``url`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the url.
:param url: the new copyright
:type url: ``string``
:raise: ``InvalidArgument`` -- ``url`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_url(self, url=None):
"""Sets the url.
:param url: the new copyright
:type url: ``string``
:raise: ``InvalidArgument`` -- ``url`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if url is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['url'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(url, metadata, array=False):
self._my_map['url'] = url
else:
raise InvalidArgument() |
def set_long_features(self, features, columns_to_set=[], partition=2):
""" Sets features of double the duration
Example: Setting 14 day RSIs to longer will create add a
feature column of a 28 day RSIs.
Args:
features: Pandas DataFrame instance with columns as numpy.float32 features.
columns_to_set: List of strings of feature names to make longer
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
"""
# Create long features DataFrame
features_long = self.set_features(partition=2 * partition)
# Remove features not specified by args.long
unwanted_features = [f for f in features.columns if f not in columns_to_set]
features_long = features_long.drop(unwanted_features, axis=1)
# Prefix long columns with 'long_' to fix naming conflicts
features_long.columns = ['long_{0}'.format(f) for f in features_long.columns]
# Merge the two DataFrames
skip = partition
return pd.concat([features[skip:].reset_index(drop=True),
features_long],
axis=1) | Sets features of double the duration
Example: Setting 14 day RSIs to longer will create add a
feature column of a 28 day RSIs.
Args:
features: Pandas DataFrame instance with columns as numpy.float32 features.
columns_to_set: List of strings of feature names to make longer
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features. | Below is the the instruction that describes the task:
### Input:
Sets features of double the duration
Example: Setting 14 day RSIs to longer will create add a
feature column of a 28 day RSIs.
Args:
features: Pandas DataFrame instance with columns as numpy.float32 features.
columns_to_set: List of strings of feature names to make longer
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
### Response:
def set_long_features(self, features, columns_to_set=[], partition=2):
""" Sets features of double the duration
Example: Setting 14 day RSIs to longer will create add a
feature column of a 28 day RSIs.
Args:
features: Pandas DataFrame instance with columns as numpy.float32 features.
columns_to_set: List of strings of feature names to make longer
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
"""
# Create long features DataFrame
features_long = self.set_features(partition=2 * partition)
# Remove features not specified by args.long
unwanted_features = [f for f in features.columns if f not in columns_to_set]
features_long = features_long.drop(unwanted_features, axis=1)
# Prefix long columns with 'long_' to fix naming conflicts
features_long.columns = ['long_{0}'.format(f) for f in features_long.columns]
# Merge the two DataFrames
skip = partition
return pd.concat([features[skip:].reset_index(drop=True),
features_long],
axis=1) |
def select_by_value(self, value):
"""Selects a DropDownItem by means of the contained text-
Args:
value (str): Textual content of the DropDownItem that have to be selected.
"""
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
if item.get_text() == value:
item.attributes['selected'] = 'selected'
self._selected_key = k
self._selected_item = item
else:
if 'selected' in item.attributes:
del item.attributes['selected'] | Selects a DropDownItem by means of the contained text-
Args:
value (str): Textual content of the DropDownItem that have to be selected. | Below is the the instruction that describes the task:
### Input:
Selects a DropDownItem by means of the contained text-
Args:
value (str): Textual content of the DropDownItem that have to be selected.
### Response:
def select_by_value(self, value):
"""Selects a DropDownItem by means of the contained text-
Args:
value (str): Textual content of the DropDownItem that have to be selected.
"""
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
if item.get_text() == value:
item.attributes['selected'] = 'selected'
self._selected_key = k
self._selected_item = item
else:
if 'selected' in item.attributes:
del item.attributes['selected'] |
def run_op(self, op, sched):
"""
Handle the operation:
* if coro is in STATE_RUNNING, send or throw the given op
* if coro is in STATE_NEED_INIT, call the init function and if it
doesn't return a generator, set STATE_COMPLETED and set the result
to whatever the function returned.
* if StopIteration is raised, set STATE_COMPLETED and return self.
* if any other exception is raised, set STATE_FAILED, handle error
or send it to the caller, return self
Return self is used as a optimization. Coroutine is also a Operation
which handles it's own completion (resuming the caller and the waiters).
"""
if op is self:
import warnings
warnings.warn("Running coro %s with itself. Something is fishy."%op)
assert self.state < self.STATE_COMPLETED, \
"%s called with %s op %r, coroutine state (%s) should be less than %s!" % (
self, isinstance(op, CoroutineException) and op or
(hasattr(op, 'state') and {0:'RUNNING', 1:'FINALIZED', 2:'ERRORED'}[op.state] or 'NOP'), op,
self._state_names[self.state],
self._state_names[self.STATE_COMPLETED]
)
#~ assert self.state < self.STATE_COMPLETED, \
#~ "%s called with:%s, last one:%s, expected state less than %s!" % (
#~ self,
#~ op,
#~ isinstance(self.lastop, CoroutineException) and ''.join(traceback.format_exception(*self.lastop.message)) or self.lastop,
#~ self._state_names[self.STATE_COMPLETED]
#~ )
#~ self.lastop = op
if self.debug:
print
if isinstance(op, CoroutineException):
print 'Running %r with exception:' % self,
if len(op.args) == 3:
print '[[['
traceback.print_exception(*op.args)
print ']]]'
else:
print op.args
else:
print 'Running %r with: %r' % (self, op)
global ident
ident = self
try:
if self.state == self.STATE_RUNNING:
if self.debug:
traceback.print_stack(self.coro.gi_frame)
if isinstance(op, CoroutineException):
rop = self.coro.throw(*op.args)
else:
rop = self.coro.send(op and op.finalize(sched))
elif self.state == self.STATE_NEED_INIT:
assert op is None
self.coro = self.coro(*self.f_args, **self.f_kws)
del self.f_args
del self.f_kws
if self._valid_gen(self.coro):
self.state = self.STATE_RUNNING
rop = None
else:
self.state = self.STATE_COMPLETED
self.result = self.coro
self.coro = None
rop = self
else:
return None
except StopIteration, e:
self.state = self.STATE_COMPLETED
self.result = e.args and e.args[0]
if hasattr(self.coro, 'close'):
self.coro.close()
rop = self
except (KeyboardInterrupt, GeneratorExit, SystemExit):
raise
except:
self.state = self.STATE_FAILED
self.result = None
self.exception = sys.exc_info()
if hasattr(self.coro, 'close'):
self.coro.close()
if not self.caller:
self.handle_error(op)
rop = self
sys.exc_clear()
finally:
ident = None
if self.debug:
print "Yields %s." % rop
return rop | Handle the operation:
* if coro is in STATE_RUNNING, send or throw the given op
* if coro is in STATE_NEED_INIT, call the init function and if it
doesn't return a generator, set STATE_COMPLETED and set the result
to whatever the function returned.
* if StopIteration is raised, set STATE_COMPLETED and return self.
* if any other exception is raised, set STATE_FAILED, handle error
or send it to the caller, return self
Return self is used as a optimization. Coroutine is also a Operation
which handles it's own completion (resuming the caller and the waiters). | Below is the the instruction that describes the task:
### Input:
Handle the operation:
* if coro is in STATE_RUNNING, send or throw the given op
* if coro is in STATE_NEED_INIT, call the init function and if it
doesn't return a generator, set STATE_COMPLETED and set the result
to whatever the function returned.
* if StopIteration is raised, set STATE_COMPLETED and return self.
* if any other exception is raised, set STATE_FAILED, handle error
or send it to the caller, return self
Return self is used as a optimization. Coroutine is also a Operation
which handles it's own completion (resuming the caller and the waiters).
### Response:
def run_op(self, op, sched):
"""
Handle the operation:
* if coro is in STATE_RUNNING, send or throw the given op
* if coro is in STATE_NEED_INIT, call the init function and if it
doesn't return a generator, set STATE_COMPLETED and set the result
to whatever the function returned.
* if StopIteration is raised, set STATE_COMPLETED and return self.
* if any other exception is raised, set STATE_FAILED, handle error
or send it to the caller, return self
Return self is used as a optimization. Coroutine is also a Operation
which handles it's own completion (resuming the caller and the waiters).
"""
if op is self:
import warnings
warnings.warn("Running coro %s with itself. Something is fishy."%op)
assert self.state < self.STATE_COMPLETED, \
"%s called with %s op %r, coroutine state (%s) should be less than %s!" % (
self, isinstance(op, CoroutineException) and op or
(hasattr(op, 'state') and {0:'RUNNING', 1:'FINALIZED', 2:'ERRORED'}[op.state] or 'NOP'), op,
self._state_names[self.state],
self._state_names[self.STATE_COMPLETED]
)
#~ assert self.state < self.STATE_COMPLETED, \
#~ "%s called with:%s, last one:%s, expected state less than %s!" % (
#~ self,
#~ op,
#~ isinstance(self.lastop, CoroutineException) and ''.join(traceback.format_exception(*self.lastop.message)) or self.lastop,
#~ self._state_names[self.STATE_COMPLETED]
#~ )
#~ self.lastop = op
if self.debug:
print
if isinstance(op, CoroutineException):
print 'Running %r with exception:' % self,
if len(op.args) == 3:
print '[[['
traceback.print_exception(*op.args)
print ']]]'
else:
print op.args
else:
print 'Running %r with: %r' % (self, op)
global ident
ident = self
try:
if self.state == self.STATE_RUNNING:
if self.debug:
traceback.print_stack(self.coro.gi_frame)
if isinstance(op, CoroutineException):
rop = self.coro.throw(*op.args)
else:
rop = self.coro.send(op and op.finalize(sched))
elif self.state == self.STATE_NEED_INIT:
assert op is None
self.coro = self.coro(*self.f_args, **self.f_kws)
del self.f_args
del self.f_kws
if self._valid_gen(self.coro):
self.state = self.STATE_RUNNING
rop = None
else:
self.state = self.STATE_COMPLETED
self.result = self.coro
self.coro = None
rop = self
else:
return None
except StopIteration, e:
self.state = self.STATE_COMPLETED
self.result = e.args and e.args[0]
if hasattr(self.coro, 'close'):
self.coro.close()
rop = self
except (KeyboardInterrupt, GeneratorExit, SystemExit):
raise
except:
self.state = self.STATE_FAILED
self.result = None
self.exception = sys.exc_info()
if hasattr(self.coro, 'close'):
self.coro.close()
if not self.caller:
self.handle_error(op)
rop = self
sys.exc_clear()
finally:
ident = None
if self.debug:
print "Yields %s." % rop
return rop |
def subtract_days(self, days: int) -> datetime:
""" Subtracts dates from the given value """
self.value = self.value - relativedelta(days=days)
return self.value | Subtracts dates from the given value | Below is the the instruction that describes the task:
### Input:
Subtracts dates from the given value
### Response:
def subtract_days(self, days: int) -> datetime:
""" Subtracts dates from the given value """
self.value = self.value - relativedelta(days=days)
return self.value |
def unpack_close(self):
"""
Unpack a close message into a status code and a reason. If no payload
is given, the code is None and the reason is an empty string.
"""
if self.payload:
code = struct.unpack('!H', str(self.payload[:2]))[0]
reason = str(self.payload[2:])
else:
code = None
reason = ''
return code, reason | Unpack a close message into a status code and a reason. If no payload
is given, the code is None and the reason is an empty string. | Below is the the instruction that describes the task:
### Input:
Unpack a close message into a status code and a reason. If no payload
is given, the code is None and the reason is an empty string.
### Response:
def unpack_close(self):
"""
Unpack a close message into a status code and a reason. If no payload
is given, the code is None and the reason is an empty string.
"""
if self.payload:
code = struct.unpack('!H', str(self.payload[:2]))[0]
reason = str(self.payload[2:])
else:
code = None
reason = ''
return code, reason |
def get_commits(self, repository_id, search_criteria, project=None, skip=None, top=None):
"""GetCommits.
Retrieve git commits for a project
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param :class:`<GitQueryCommitsCriteria> <azure.devops.v5_0.git.models.GitQueryCommitsCriteria>` search_criteria:
:param str project: Project ID or project name
:param int skip:
:param int top:
:rtype: [GitCommitRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if search_criteria is not None:
if search_criteria.ids is not None:
query_parameters['searchCriteria.ids'] = search_criteria.ids
if search_criteria.from_date is not None:
query_parameters['searchCriteria.fromDate'] = search_criteria.from_date
if search_criteria.to_date is not None:
query_parameters['searchCriteria.toDate'] = search_criteria.to_date
if search_criteria.item_version is not None:
if search_criteria.item_version.version_type is not None:
query_parameters['searchCriteria.itemVersion.versionType'] = search_criteria.item_version.version_type
if search_criteria.item_version.version is not None:
query_parameters['searchCriteria.itemVersion.version'] = search_criteria.item_version.version
if search_criteria.item_version.version_options is not None:
query_parameters['searchCriteria.itemVersion.versionOptions'] = search_criteria.item_version.version_options
if search_criteria.compare_version is not None:
if search_criteria.compare_version.version_type is not None:
query_parameters['searchCriteria.compareVersion.versionType'] = search_criteria.compare_version.version_type
if search_criteria.compare_version.version is not None:
query_parameters['searchCriteria.compareVersion.version'] = search_criteria.compare_version.version
if search_criteria.compare_version.version_options is not None:
query_parameters['searchCriteria.compareVersion.versionOptions'] = search_criteria.compare_version.version_options
if search_criteria.from_commit_id is not None:
query_parameters['searchCriteria.fromCommitId'] = search_criteria.from_commit_id
if search_criteria.to_commit_id is not None:
query_parameters['searchCriteria.toCommitId'] = search_criteria.to_commit_id
if search_criteria.user is not None:
query_parameters['searchCriteria.user'] = search_criteria.user
if search_criteria.author is not None:
query_parameters['searchCriteria.author'] = search_criteria.author
if search_criteria.item_path is not None:
query_parameters['searchCriteria.itemPath'] = search_criteria.item_path
if search_criteria.exclude_deletes is not None:
query_parameters['searchCriteria.excludeDeletes'] = search_criteria.exclude_deletes
if search_criteria.skip is not None:
query_parameters['searchCriteria.$skip'] = search_criteria.skip
if search_criteria.top is not None:
query_parameters['searchCriteria.$top'] = search_criteria.top
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
if search_criteria.include_work_items is not None:
query_parameters['searchCriteria.includeWorkItems'] = search_criteria.include_work_items
if search_criteria.include_user_image_url is not None:
query_parameters['searchCriteria.includeUserImageUrl'] = search_criteria.include_user_image_url
if search_criteria.include_push_data is not None:
query_parameters['searchCriteria.includePushData'] = search_criteria.include_push_data
if search_criteria.history_mode is not None:
query_parameters['searchCriteria.historyMode'] = search_criteria.history_mode
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='c2570c3b-5b3f-41b8-98bf-5407bfde8d58',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitCommitRef]', self._unwrap_collection(response)) | GetCommits.
Retrieve git commits for a project
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param :class:`<GitQueryCommitsCriteria> <azure.devops.v5_0.git.models.GitQueryCommitsCriteria>` search_criteria:
:param str project: Project ID or project name
:param int skip:
:param int top:
:rtype: [GitCommitRef] | Below is the the instruction that describes the task:
### Input:
GetCommits.
Retrieve git commits for a project
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param :class:`<GitQueryCommitsCriteria> <azure.devops.v5_0.git.models.GitQueryCommitsCriteria>` search_criteria:
:param str project: Project ID or project name
:param int skip:
:param int top:
:rtype: [GitCommitRef]
### Response:
def get_commits(self, repository_id, search_criteria, project=None, skip=None, top=None):
"""GetCommits.
Retrieve git commits for a project
:param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified.
:param :class:`<GitQueryCommitsCriteria> <azure.devops.v5_0.git.models.GitQueryCommitsCriteria>` search_criteria:
:param str project: Project ID or project name
:param int skip:
:param int top:
:rtype: [GitCommitRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if search_criteria is not None:
if search_criteria.ids is not None:
query_parameters['searchCriteria.ids'] = search_criteria.ids
if search_criteria.from_date is not None:
query_parameters['searchCriteria.fromDate'] = search_criteria.from_date
if search_criteria.to_date is not None:
query_parameters['searchCriteria.toDate'] = search_criteria.to_date
if search_criteria.item_version is not None:
if search_criteria.item_version.version_type is not None:
query_parameters['searchCriteria.itemVersion.versionType'] = search_criteria.item_version.version_type
if search_criteria.item_version.version is not None:
query_parameters['searchCriteria.itemVersion.version'] = search_criteria.item_version.version
if search_criteria.item_version.version_options is not None:
query_parameters['searchCriteria.itemVersion.versionOptions'] = search_criteria.item_version.version_options
if search_criteria.compare_version is not None:
if search_criteria.compare_version.version_type is not None:
query_parameters['searchCriteria.compareVersion.versionType'] = search_criteria.compare_version.version_type
if search_criteria.compare_version.version is not None:
query_parameters['searchCriteria.compareVersion.version'] = search_criteria.compare_version.version
if search_criteria.compare_version.version_options is not None:
query_parameters['searchCriteria.compareVersion.versionOptions'] = search_criteria.compare_version.version_options
if search_criteria.from_commit_id is not None:
query_parameters['searchCriteria.fromCommitId'] = search_criteria.from_commit_id
if search_criteria.to_commit_id is not None:
query_parameters['searchCriteria.toCommitId'] = search_criteria.to_commit_id
if search_criteria.user is not None:
query_parameters['searchCriteria.user'] = search_criteria.user
if search_criteria.author is not None:
query_parameters['searchCriteria.author'] = search_criteria.author
if search_criteria.item_path is not None:
query_parameters['searchCriteria.itemPath'] = search_criteria.item_path
if search_criteria.exclude_deletes is not None:
query_parameters['searchCriteria.excludeDeletes'] = search_criteria.exclude_deletes
if search_criteria.skip is not None:
query_parameters['searchCriteria.$skip'] = search_criteria.skip
if search_criteria.top is not None:
query_parameters['searchCriteria.$top'] = search_criteria.top
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
if search_criteria.include_work_items is not None:
query_parameters['searchCriteria.includeWorkItems'] = search_criteria.include_work_items
if search_criteria.include_user_image_url is not None:
query_parameters['searchCriteria.includeUserImageUrl'] = search_criteria.include_user_image_url
if search_criteria.include_push_data is not None:
query_parameters['searchCriteria.includePushData'] = search_criteria.include_push_data
if search_criteria.history_mode is not None:
query_parameters['searchCriteria.historyMode'] = search_criteria.history_mode
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='c2570c3b-5b3f-41b8-98bf-5407bfde8d58',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[GitCommitRef]', self._unwrap_collection(response)) |
def input(self, _in, out, **kw):
"""
Currently Elm does not write to stdout
(https://github.com/elm-lang/elm-make/issues/39), so we need to write
the compiled contents to a temporary file and then read it in order to
output to stdout.
"""
# create a temp file
tmp = NamedTemporaryFile(suffix='.js', delete=False)
tmp.close() # close it so windows can read it
# write to a temp file
elm_make = self.binary or 'elm-make'
change_directory = bool(self.change_directory or False)
source = kw['source_path']
source_dir = os.path.dirname(source)
exec_dir = source_dir if change_directory else os.getcwd()
write_args = [elm_make, source, '--output', tmp.name, '--yes']
with excursion(exec_dir), TemporaryFile(mode='w') as fake_write_obj:
self.subprocess(write_args, fake_write_obj)
# read the temp file
cat_or_type = 'type' if platform == 'win32' else 'cat'
read_args = [cat_or_type, tmp.name]
self.subprocess(read_args, out)
os.remove(tmp.name) | Currently Elm does not write to stdout
(https://github.com/elm-lang/elm-make/issues/39), so we need to write
the compiled contents to a temporary file and then read it in order to
output to stdout. | Below is the the instruction that describes the task:
### Input:
Currently Elm does not write to stdout
(https://github.com/elm-lang/elm-make/issues/39), so we need to write
the compiled contents to a temporary file and then read it in order to
output to stdout.
### Response:
def input(self, _in, out, **kw):
"""
Currently Elm does not write to stdout
(https://github.com/elm-lang/elm-make/issues/39), so we need to write
the compiled contents to a temporary file and then read it in order to
output to stdout.
"""
# create a temp file
tmp = NamedTemporaryFile(suffix='.js', delete=False)
tmp.close() # close it so windows can read it
# write to a temp file
elm_make = self.binary or 'elm-make'
change_directory = bool(self.change_directory or False)
source = kw['source_path']
source_dir = os.path.dirname(source)
exec_dir = source_dir if change_directory else os.getcwd()
write_args = [elm_make, source, '--output', tmp.name, '--yes']
with excursion(exec_dir), TemporaryFile(mode='w') as fake_write_obj:
self.subprocess(write_args, fake_write_obj)
# read the temp file
cat_or_type = 'type' if platform == 'win32' else 'cat'
read_args = [cat_or_type, tmp.name]
self.subprocess(read_args, out)
os.remove(tmp.name) |
def spent_outputs(self):
"""Tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index.
"""
return (
input_.fulfills.to_dict()
for input_ in self.inputs if input_.fulfills
) | Tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index. | Below is the the instruction that describes the task:
### Input:
Tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index.
### Response:
def spent_outputs(self):
"""Tuple of :obj:`dict`: Inputs of this transaction. Each input
is represented as a dictionary containing a transaction id and
output index.
"""
return (
input_.fulfills.to_dict()
for input_ in self.inputs if input_.fulfills
) |
def find_by_name(self, term: str, include_placeholders: bool = False) -> List[Account]:
""" Search for account by part of the name """
query = (
self.query
.filter(Account.name.like('%' + term + '%'))
.order_by(Account.name)
)
# Exclude placeholder accounts?
if not include_placeholders:
query = query.filter(Account.placeholder == 0)
# print(generic.get_sql(query))
return query.all() | Search for account by part of the name | Below is the the instruction that describes the task:
### Input:
Search for account by part of the name
### Response:
def find_by_name(self, term: str, include_placeholders: bool = False) -> List[Account]:
""" Search for account by part of the name """
query = (
self.query
.filter(Account.name.like('%' + term + '%'))
.order_by(Account.name)
)
# Exclude placeholder accounts?
if not include_placeholders:
query = query.filter(Account.placeholder == 0)
# print(generic.get_sql(query))
return query.all() |
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace() | Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string. | Below is the the instruction that describes the task:
### Input:
Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
### Response:
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace() |
def select(self, nowait=True, cb=None):
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
self.channel.basic._msg_id = 0
self.channel.basic._last_ack_id = 0
args = Writer()
args.write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 85, 10, args))
if not nowait:
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok) | Set this channel to use publisher confirmations. | Below is the the instruction that describes the task:
### Input:
Set this channel to use publisher confirmations.
### Response:
def select(self, nowait=True, cb=None):
'''
Set this channel to use publisher confirmations.
'''
nowait = nowait and self.allow_nowait() and not cb
if not self._enabled:
self._enabled = True
self.channel.basic._msg_id = 0
self.channel.basic._last_ack_id = 0
args = Writer()
args.write_bit(nowait)
self.send_frame(MethodFrame(self.channel_id, 85, 10, args))
if not nowait:
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok) |
def write_memory_dump():
"""Dump memory to a temporary filename with the meliae package.
@return: JSON filename where memory dump has been written to
@rtype: string
"""
# first do a full garbage collection run
gc.collect()
if gc.garbage:
log.warn(LOG_CHECK, "Unreachabe objects: %s", pprint.pformat(gc.garbage))
from meliae import scanner
fo, filename = get_temp_file(mode='wb', suffix='.json', prefix='lcdump_')
try:
scanner.dump_all_objects(fo)
finally:
fo.close()
return filename | Dump memory to a temporary filename with the meliae package.
@return: JSON filename where memory dump has been written to
@rtype: string | Below is the the instruction that describes the task:
### Input:
Dump memory to a temporary filename with the meliae package.
@return: JSON filename where memory dump has been written to
@rtype: string
### Response:
def write_memory_dump():
"""Dump memory to a temporary filename with the meliae package.
@return: JSON filename where memory dump has been written to
@rtype: string
"""
# first do a full garbage collection run
gc.collect()
if gc.garbage:
log.warn(LOG_CHECK, "Unreachabe objects: %s", pprint.pformat(gc.garbage))
from meliae import scanner
fo, filename = get_temp_file(mode='wb', suffix='.json', prefix='lcdump_')
try:
scanner.dump_all_objects(fo)
finally:
fo.close()
return filename |
def show_editor_buffer(self, editor_buffer):
"""
Open this `EditorBuffer` in the active window.
"""
assert isinstance(editor_buffer, EditorBuffer)
self.active_window.editor_buffer = editor_buffer | Open this `EditorBuffer` in the active window. | Below is the the instruction that describes the task:
### Input:
Open this `EditorBuffer` in the active window.
### Response:
def show_editor_buffer(self, editor_buffer):
"""
Open this `EditorBuffer` in the active window.
"""
assert isinstance(editor_buffer, EditorBuffer)
self.active_window.editor_buffer = editor_buffer |
def run(items):
"""Run MetaSV if we have enough supported callers, adding output to the set of calls.
"""
assert len(items) == 1, "Expect one input to MetaSV ensemble calling"
data = items[0]
work_dir = _sv_workdir(data)
out_file = os.path.join(work_dir, "variants.vcf.gz")
cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data),
"--bam", dd.get_align_bam(data), "--outdir", work_dir]
methods = []
for call in data.get("sv", []):
vcf_file = call.get("vcf_file", call.get("vrn_file", None))
if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods and vcf_file is not None:
methods.append(call["variantcaller"])
cmd += ["--%s_vcf" % call["variantcaller"], vcf_file]
if len(methods) >= MIN_CALLERS:
if not utils.file_exists(out_file):
tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data),
os.path.join(tx_work_dir, "insert-stats.yaml"))
cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))]
cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")]
cmd += ["--assembly_max_tools=1", "--assembly_pad=500"]
cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]]
do.run(cmd, "Combine variant calls with MetaSV")
filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || "
"(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)")
filter_file = vfilter.cutoff_w_expression(out_file, filters,
data, name="ReassemblyStats", limit_regions=None)
effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff")
data["sv"].append({"variantcaller": "metasv",
"vrn_file": effects_vcf or filter_file})
return [data] | Run MetaSV if we have enough supported callers, adding output to the set of calls. | Below is the the instruction that describes the task:
### Input:
Run MetaSV if we have enough supported callers, adding output to the set of calls.
### Response:
def run(items):
"""Run MetaSV if we have enough supported callers, adding output to the set of calls.
"""
assert len(items) == 1, "Expect one input to MetaSV ensemble calling"
data = items[0]
work_dir = _sv_workdir(data)
out_file = os.path.join(work_dir, "variants.vcf.gz")
cmd = _get_cmd() + ["--sample", dd.get_sample_name(data), "--reference", dd.get_ref_file(data),
"--bam", dd.get_align_bam(data), "--outdir", work_dir]
methods = []
for call in data.get("sv", []):
vcf_file = call.get("vcf_file", call.get("vrn_file", None))
if call["variantcaller"] in SUPPORTED and call["variantcaller"] not in methods and vcf_file is not None:
methods.append(call["variantcaller"])
cmd += ["--%s_vcf" % call["variantcaller"], vcf_file]
if len(methods) >= MIN_CALLERS:
if not utils.file_exists(out_file):
tx_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
ins_stats = shared.calc_paired_insert_stats_save(dd.get_align_bam(data),
os.path.join(tx_work_dir, "insert-stats.yaml"))
cmd += ["--workdir", tx_work_dir, "--num_threads", str(dd.get_num_cores(data))]
cmd += ["--spades", utils.which("spades.py"), "--age", utils.which("age_align")]
cmd += ["--assembly_max_tools=1", "--assembly_pad=500"]
cmd += ["--boost_sc", "--isize_mean", ins_stats["mean"], "--isize_sd", ins_stats["std"]]
do.run(cmd, "Combine variant calls with MetaSV")
filters = ("(NUM_SVTOOLS = 1 && ABS(SVLEN)>50000) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_FLANK_PERCENT>80) || "
"(NUM_SVTOOLS = 1 && ABS(SVLEN)<4000 && BA_NUM_GOOD_REC=0) || "
"(ABS(SVLEN)<4000 && BA_NUM_GOOD_REC>2)")
filter_file = vfilter.cutoff_w_expression(out_file, filters,
data, name="ReassemblyStats", limit_regions=None)
effects_vcf, _ = effects.add_to_vcf(filter_file, data, "snpeff")
data["sv"].append({"variantcaller": "metasv",
"vrn_file": effects_vcf or filter_file})
return [data] |
def num_available_breakpoints(self, arm=False, thumb=False, ram=False, flash=False, hw=False):
"""Returns the number of available breakpoints of the specified type.
If ``arm`` is set, gets the number of available ARM breakpoint units.
If ``thumb`` is set, gets the number of available THUMB breakpoint
units. If ``ram`` is set, gets the number of available software RAM
breakpoint units. If ``flash`` is set, gets the number of available
software flash breakpoint units. If ``hw`` is set, gets the number of
available hardware breakpoint units.
If a combination of the flags is given, then
``num_available_breakpoints()`` returns the number of breakpoints
specified by the given flags. If no flags are specified, then the
count of available breakpoint units is returned.
Args:
self (JLink): the ``JLink`` instance
arm (bool): Boolean indicating to get number of ARM breakpoints.
thumb (bool): Boolean indicating to get number of THUMB breakpoints.
ram (bool): Boolean indicating to get number of SW RAM breakpoints.
flash (bool): Boolean indicating to get number of Flash breakpoints.
hw (bool): Boolean indicating to get number of Hardware breakpoints.
Returns:
The number of available breakpoint units of the specified type.
"""
flags = [
enums.JLinkBreakpoint.ARM,
enums.JLinkBreakpoint.THUMB,
enums.JLinkBreakpoint.SW_RAM,
enums.JLinkBreakpoint.SW_FLASH,
enums.JLinkBreakpoint.HW
]
set_flags = [
arm,
thumb,
ram,
flash,
hw
]
if not any(set_flags):
flags = enums.JLinkBreakpoint.ANY
else:
flags = list(f for i, f in enumerate(flags) if set_flags[i])
flags = functools.reduce(operator.__or__, flags, 0)
return self._dll.JLINKARM_GetNumBPUnits(flags) | Returns the number of available breakpoints of the specified type.
If ``arm`` is set, gets the number of available ARM breakpoint units.
If ``thumb`` is set, gets the number of available THUMB breakpoint
units. If ``ram`` is set, gets the number of available software RAM
breakpoint units. If ``flash`` is set, gets the number of available
software flash breakpoint units. If ``hw`` is set, gets the number of
available hardware breakpoint units.
If a combination of the flags is given, then
``num_available_breakpoints()`` returns the number of breakpoints
specified by the given flags. If no flags are specified, then the
count of available breakpoint units is returned.
Args:
self (JLink): the ``JLink`` instance
arm (bool): Boolean indicating to get number of ARM breakpoints.
thumb (bool): Boolean indicating to get number of THUMB breakpoints.
ram (bool): Boolean indicating to get number of SW RAM breakpoints.
flash (bool): Boolean indicating to get number of Flash breakpoints.
hw (bool): Boolean indicating to get number of Hardware breakpoints.
Returns:
The number of available breakpoint units of the specified type. | Below is the the instruction that describes the task:
### Input:
Returns the number of available breakpoints of the specified type.
If ``arm`` is set, gets the number of available ARM breakpoint units.
If ``thumb`` is set, gets the number of available THUMB breakpoint
units. If ``ram`` is set, gets the number of available software RAM
breakpoint units. If ``flash`` is set, gets the number of available
software flash breakpoint units. If ``hw`` is set, gets the number of
available hardware breakpoint units.
If a combination of the flags is given, then
``num_available_breakpoints()`` returns the number of breakpoints
specified by the given flags. If no flags are specified, then the
count of available breakpoint units is returned.
Args:
self (JLink): the ``JLink`` instance
arm (bool): Boolean indicating to get number of ARM breakpoints.
thumb (bool): Boolean indicating to get number of THUMB breakpoints.
ram (bool): Boolean indicating to get number of SW RAM breakpoints.
flash (bool): Boolean indicating to get number of Flash breakpoints.
hw (bool): Boolean indicating to get number of Hardware breakpoints.
Returns:
The number of available breakpoint units of the specified type.
### Response:
def num_available_breakpoints(self, arm=False, thumb=False, ram=False, flash=False, hw=False):
"""Returns the number of available breakpoints of the specified type.
If ``arm`` is set, gets the number of available ARM breakpoint units.
If ``thumb`` is set, gets the number of available THUMB breakpoint
units. If ``ram`` is set, gets the number of available software RAM
breakpoint units. If ``flash`` is set, gets the number of available
software flash breakpoint units. If ``hw`` is set, gets the number of
available hardware breakpoint units.
If a combination of the flags is given, then
``num_available_breakpoints()`` returns the number of breakpoints
specified by the given flags. If no flags are specified, then the
count of available breakpoint units is returned.
Args:
self (JLink): the ``JLink`` instance
arm (bool): Boolean indicating to get number of ARM breakpoints.
thumb (bool): Boolean indicating to get number of THUMB breakpoints.
ram (bool): Boolean indicating to get number of SW RAM breakpoints.
flash (bool): Boolean indicating to get number of Flash breakpoints.
hw (bool): Boolean indicating to get number of Hardware breakpoints.
Returns:
The number of available breakpoint units of the specified type.
"""
flags = [
enums.JLinkBreakpoint.ARM,
enums.JLinkBreakpoint.THUMB,
enums.JLinkBreakpoint.SW_RAM,
enums.JLinkBreakpoint.SW_FLASH,
enums.JLinkBreakpoint.HW
]
set_flags = [
arm,
thumb,
ram,
flash,
hw
]
if not any(set_flags):
flags = enums.JLinkBreakpoint.ANY
else:
flags = list(f for i, f in enumerate(flags) if set_flags[i])
flags = functools.reduce(operator.__or__, flags, 0)
return self._dll.JLINKARM_GetNumBPUnits(flags) |
def get_monitoring_problems(self):
"""Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
"""
res = {}
if not self.sched:
return res
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=True)
if 'livesynthesis' in scheduler_stats:
res['livesynthesis'] = scheduler_stats['livesynthesis']
if 'problems' in scheduler_stats:
res['problems'] = scheduler_stats['problems']
return res | Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
### Response:
def get_monitoring_problems(self):
"""Get the current scheduler livesynthesis
:return: live synthesis and problems dictionary
:rtype: dict
"""
res = {}
if not self.sched:
return res
# Get statistics from the scheduler
scheduler_stats = self.sched.get_scheduler_stats(details=True)
if 'livesynthesis' in scheduler_stats:
res['livesynthesis'] = scheduler_stats['livesynthesis']
if 'problems' in scheduler_stats:
res['problems'] = scheduler_stats['problems']
return res |
def get_object_or_none(klass, *args, **kwargs):
""" Calls get() on a given model manager, but it returns None instead of the model’s
DoesNotExist exception.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except: # noqa: E722
return None | Calls get() on a given model manager, but it returns None instead of the model’s
DoesNotExist exception. | Below is the the instruction that describes the task:
### Input:
Calls get() on a given model manager, but it returns None instead of the model’s
DoesNotExist exception.
### Response:
def get_object_or_none(klass, *args, **kwargs):
""" Calls get() on a given model manager, but it returns None instead of the model’s
DoesNotExist exception.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except: # noqa: E722
return None |
def nested_insert(self, item_list):
""" Create a series of nested LIVVDicts given a list """
if len(item_list) == 1:
self[item_list[0]] = LIVVDict()
elif len(item_list) > 1:
if item_list[0] not in self:
self[item_list[0]] = LIVVDict()
self[item_list[0]].nested_insert(item_list[1:]) | Create a series of nested LIVVDicts given a list | Below is the the instruction that describes the task:
### Input:
Create a series of nested LIVVDicts given a list
### Response:
def nested_insert(self, item_list):
""" Create a series of nested LIVVDicts given a list """
if len(item_list) == 1:
self[item_list[0]] = LIVVDict()
elif len(item_list) > 1:
if item_list[0] not in self:
self[item_list[0]] = LIVVDict()
self[item_list[0]].nested_insert(item_list[1:]) |
def get_files(dirname):
'''recursion rockz'''
all_files = []
os.chdir(dirname)
for fn in os.listdir(os.path.abspath(dirname)):
fn = os.path.abspath(fn)
if os.path.isdir(fn):
all_files += get_files(fn)
else:
all_files.append(fn)
os.chdir('../')
return all_files | recursion rockz | Below is the the instruction that describes the task:
### Input:
recursion rockz
### Response:
def get_files(dirname):
'''recursion rockz'''
all_files = []
os.chdir(dirname)
for fn in os.listdir(os.path.abspath(dirname)):
fn = os.path.abspath(fn)
if os.path.isdir(fn):
all_files += get_files(fn)
else:
all_files.append(fn)
os.chdir('../')
return all_files |
def _fetch_pageviews(self, storage, year, week, ip_users=False):
"""
Fetch PageViews from Elasticsearch.
:param time_from: Staring at timestamp.
:param time_to: To timestamp
"""
prefix = 'Pageviews'
if ip_users:
query_add = "AND !(bot:True) AND (id_user:0)"
prefix += '_IP'
else:
query_add = "AND !(bot:True) AND !(id_user:0)"
store = self.storage.get(prefix, year, week)
if not self.config['overwrite_files'] and store.does_file_exist():
logger.debug("File already exist, skip: {}-{}".format(year, week))
return
store.open('overwrite')
time_from, time_to = get_week_dates(year, week, as_timestamp=True)
es_type = "events.pageviews"
es_query = self.ES_QUERY % {'timestamp_start': time_from * 1000,
'timestamp_end': time_to * 1000,
'event_name': es_type,
'query_add': query_add}
logger.info("{}: {} - {}".format(es_type, time_from, time_to))
for hit in self._fetch_elasticsearch(es_query):
item = {}
try:
item['user'] = hit['_source'].get('id_user')
if ip_users:
assert 0 == item['user']
else:
assert 0 != item['user']
assert es_type == hit['_type']
item['timestamp'] = float(hit['_source']['@timestamp']) / 1000
if ip_users:
item['ip'] = str(hit['_source'].get('client_host'))
user_agent = str(hit['_source'].get('user_agent'))
if user_agent is None or user_agent == 'None':
continue
elif _is_bot(user_agent):
continue
item['user_agent'] = user_agent
item['recid'] = int(hit['_source'].get('id_bibrec'))
except UnicodeEncodeError as e:
# TODO: Error logging.
# print(e)
continue
# Save entry
store.add_hit(item)
store.close()
# Delete File if no hits were added.
if store.number_of_hits == 0:
store.delete() | Fetch PageViews from Elasticsearch.
:param time_from: Staring at timestamp.
:param time_to: To timestamp | Below is the the instruction that describes the task:
### Input:
Fetch PageViews from Elasticsearch.
:param time_from: Staring at timestamp.
:param time_to: To timestamp
### Response:
def _fetch_pageviews(self, storage, year, week, ip_users=False):
"""
Fetch PageViews from Elasticsearch.
:param time_from: Staring at timestamp.
:param time_to: To timestamp
"""
prefix = 'Pageviews'
if ip_users:
query_add = "AND !(bot:True) AND (id_user:0)"
prefix += '_IP'
else:
query_add = "AND !(bot:True) AND !(id_user:0)"
store = self.storage.get(prefix, year, week)
if not self.config['overwrite_files'] and store.does_file_exist():
logger.debug("File already exist, skip: {}-{}".format(year, week))
return
store.open('overwrite')
time_from, time_to = get_week_dates(year, week, as_timestamp=True)
es_type = "events.pageviews"
es_query = self.ES_QUERY % {'timestamp_start': time_from * 1000,
'timestamp_end': time_to * 1000,
'event_name': es_type,
'query_add': query_add}
logger.info("{}: {} - {}".format(es_type, time_from, time_to))
for hit in self._fetch_elasticsearch(es_query):
item = {}
try:
item['user'] = hit['_source'].get('id_user')
if ip_users:
assert 0 == item['user']
else:
assert 0 != item['user']
assert es_type == hit['_type']
item['timestamp'] = float(hit['_source']['@timestamp']) / 1000
if ip_users:
item['ip'] = str(hit['_source'].get('client_host'))
user_agent = str(hit['_source'].get('user_agent'))
if user_agent is None or user_agent == 'None':
continue
elif _is_bot(user_agent):
continue
item['user_agent'] = user_agent
item['recid'] = int(hit['_source'].get('id_bibrec'))
except UnicodeEncodeError as e:
# TODO: Error logging.
# print(e)
continue
# Save entry
store.add_hit(item)
store.close()
# Delete File if no hits were added.
if store.number_of_hits == 0:
store.delete() |
def _write_html_file(word, translations, data_dir, native=False):
"""Create html file of word translations.
Parameters
----------
word : str
Word that was translated.
tralnslations : dict
Dictionary of word translations.
data_dir : pathlib.Path
Location where html files are saved.
"""
content_str = _create_html_file_content(translations)
html_string = HTML_TEMPLATE.replace("{% word %}", word)
html_string = html_string.replace("{% content %}", content_str)
trans_dir = "translations"
if native:
trans_dir += "_native"
translations_dir = data_dir.joinpath(trans_dir)
fname = translations_dir.joinpath("{word}.html".format(word=word))
save_file(fname, html_string, mk_parents=True) | Create html file of word translations.
Parameters
----------
word : str
Word that was translated.
tralnslations : dict
Dictionary of word translations.
data_dir : pathlib.Path
Location where html files are saved. | Below is the the instruction that describes the task:
### Input:
Create html file of word translations.
Parameters
----------
word : str
Word that was translated.
tralnslations : dict
Dictionary of word translations.
data_dir : pathlib.Path
Location where html files are saved.
### Response:
def _write_html_file(word, translations, data_dir, native=False):
"""Create html file of word translations.
Parameters
----------
word : str
Word that was translated.
tralnslations : dict
Dictionary of word translations.
data_dir : pathlib.Path
Location where html files are saved.
"""
content_str = _create_html_file_content(translations)
html_string = HTML_TEMPLATE.replace("{% word %}", word)
html_string = html_string.replace("{% content %}", content_str)
trans_dir = "translations"
if native:
trans_dir += "_native"
translations_dir = data_dir.joinpath(trans_dir)
fname = translations_dir.joinpath("{word}.html".format(word=word))
save_file(fname, html_string, mk_parents=True) |
def get_possible_importers(file_uris, current_doc=None):
"""
Return all the importer objects that can handle the specified files.
Possible imports may vary depending on the currently active document
"""
importers = []
for importer in IMPORTERS:
if importer.can_import(file_uris, current_doc):
importers.append(importer)
return importers | Return all the importer objects that can handle the specified files.
Possible imports may vary depending on the currently active document | Below is the the instruction that describes the task:
### Input:
Return all the importer objects that can handle the specified files.
Possible imports may vary depending on the currently active document
### Response:
def get_possible_importers(file_uris, current_doc=None):
"""
Return all the importer objects that can handle the specified files.
Possible imports may vary depending on the currently active document
"""
importers = []
for importer in IMPORTERS:
if importer.can_import(file_uris, current_doc):
importers.append(importer)
return importers |
def update_dependency(self, tile, depinfo, destdir=None):
"""Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
"""
if destdir is None:
destdir = os.path.join(tile.folder, 'build', 'deps', depinfo['unique_id'])
has_version = False
had_version = False
if os.path.exists(destdir):
has_version = True
had_version = True
for priority, rule in self.rules:
if not self._check_rule(rule, depinfo):
continue
resolver = self._find_resolver(rule)
if has_version:
deptile = IOTile(destdir)
# If the dependency is not up to date, don't do anything
depstatus = self._check_dep(depinfo, deptile, resolver)
if depstatus is False:
shutil.rmtree(destdir)
has_version = False
else:
continue
# Now try to resolve this dependency with the latest version
result = resolver.resolve(depinfo, destdir)
if not result['found'] and result.get('stop', False):
return 'not found'
if not result['found']:
continue
settings = {
'resolver': resolver.__class__.__name__,
'factory_args': rule[2]
}
if 'settings' in result:
settings['settings'] = result['settings']
self._save_depsettings(destdir, settings)
if had_version:
return "updated"
return "installed"
if has_version:
return "already installed"
return "not found" | Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found" | Below is the the instruction that describes the task:
### Input:
Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
### Response:
def update_dependency(self, tile, depinfo, destdir=None):
"""Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
"""
if destdir is None:
destdir = os.path.join(tile.folder, 'build', 'deps', depinfo['unique_id'])
has_version = False
had_version = False
if os.path.exists(destdir):
has_version = True
had_version = True
for priority, rule in self.rules:
if not self._check_rule(rule, depinfo):
continue
resolver = self._find_resolver(rule)
if has_version:
deptile = IOTile(destdir)
# If the dependency is not up to date, don't do anything
depstatus = self._check_dep(depinfo, deptile, resolver)
if depstatus is False:
shutil.rmtree(destdir)
has_version = False
else:
continue
# Now try to resolve this dependency with the latest version
result = resolver.resolve(depinfo, destdir)
if not result['found'] and result.get('stop', False):
return 'not found'
if not result['found']:
continue
settings = {
'resolver': resolver.__class__.__name__,
'factory_args': rule[2]
}
if 'settings' in result:
settings['settings'] = result['settings']
self._save_depsettings(destdir, settings)
if had_version:
return "updated"
return "installed"
if has_version:
return "already installed"
return "not found" |
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
result = []
for k, v in self._items:
if key == k:
result.append(v)
return result | Return a list of all values matching the key (may be an empty list) | Below is the the instruction that describes the task:
### Input:
Return a list of all values matching the key (may be an empty list)
### Response:
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
result = []
for k, v in self._items:
if key == k:
result.append(v)
return result |
def remove_post_process(self, name):
"""remove a post-process
Parameters
----------
name : str
name of the post-process to remove.
"""
self._pprocesses = [post_process
for post_process in self._pprocesses
if post_process.name != name] | remove a post-process
Parameters
----------
name : str
name of the post-process to remove. | Below is the the instruction that describes the task:
### Input:
remove a post-process
Parameters
----------
name : str
name of the post-process to remove.
### Response:
def remove_post_process(self, name):
"""remove a post-process
Parameters
----------
name : str
name of the post-process to remove.
"""
self._pprocesses = [post_process
for post_process in self._pprocesses
if post_process.name != name] |
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler | Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler` | Below is the the instruction that describes the task:
### Input:
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
### Response:
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler |
def update(self, collection_id, title=None, description=None, private=False):
"""
Update an existing collection belonging to the logged-in user.
This requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection.
"""
url = "/collections/%s" % collection_id
data = {
"title": title,
"description": description,
"private": private
}
result = self._put(url, data=data)
return CollectionModel.parse(result) | Update an existing collection belonging to the logged-in user.
This requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection. | Below is the the instruction that describes the task:
### Input:
Update an existing collection belonging to the logged-in user.
This requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection.
### Response:
def update(self, collection_id, title=None, description=None, private=False):
"""
Update an existing collection belonging to the logged-in user.
This requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection.
"""
url = "/collections/%s" % collection_id
data = {
"title": title,
"description": description,
"private": private
}
result = self._put(url, data=data)
return CollectionModel.parse(result) |
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers['Content-Type'] = 'application/json;charset=UTF-8'
response_headers['Cache-Control'] = 'no-store'
response_headers['Pragma'] = 'no-cache'
return self._make_response(json.dumps(data),
response_headers,
status_code) | Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
### Response:
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers['Content-Type'] = 'application/json;charset=UTF-8'
response_headers['Cache-Control'] = 'no-store'
response_headers['Pragma'] = 'no-cache'
return self._make_response(json.dumps(data),
response_headers,
status_code) |
def _clone_and_score_clusterer(clf, X, n_clusters):
"""Clones and scores clusterer instance.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
n_clusters (int): Number of clusters
Returns:
score: Score of clusters
time: Number of seconds it took to fit cluster
"""
start = time.time()
clf = clone(clf)
setattr(clf, 'n_clusters', n_clusters)
return clf.fit(X).score(X), time.time() - start | Clones and scores clusterer instance.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
n_clusters (int): Number of clusters
Returns:
score: Score of clusters
time: Number of seconds it took to fit cluster | Below is the the instruction that describes the task:
### Input:
Clones and scores clusterer instance.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
n_clusters (int): Number of clusters
Returns:
score: Score of clusters
time: Number of seconds it took to fit cluster
### Response:
def _clone_and_score_clusterer(clf, X, n_clusters):
"""Clones and scores clusterer instance.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
n_clusters (int): Number of clusters
Returns:
score: Score of clusters
time: Number of seconds it took to fit cluster
"""
start = time.time()
clf = clone(clf)
setattr(clf, 'n_clusters', n_clusters)
return clf.fit(X).score(X), time.time() - start |
def result_tree_list(cl):
"""
Displays the headers and data list together
"""
import django
result = {
'cl': cl,
'result_headers': list(result_headers(cl)),
'results': list(tree_results(cl))
}
if django.VERSION[0] == 1 and django.VERSION[1] > 2:
from django.contrib.admin.templatetags.admin_list import result_hidden_fields
result['result_hidden_fields'] = list(result_hidden_fields(cl))
return result | Displays the headers and data list together | Below is the the instruction that describes the task:
### Input:
Displays the headers and data list together
### Response:
def result_tree_list(cl):
"""
Displays the headers and data list together
"""
import django
result = {
'cl': cl,
'result_headers': list(result_headers(cl)),
'results': list(tree_results(cl))
}
if django.VERSION[0] == 1 and django.VERSION[1] > 2:
from django.contrib.admin.templatetags.admin_list import result_hidden_fields
result['result_hidden_fields'] = list(result_hidden_fields(cl))
return result |
def _handle_metadata(self, node, scope, ctxt, stream):
"""Handle metadata for the node
"""
self._dlog("handling node metadata {}".format(node.metadata.keyvals))
keyvals = node.metadata.keyvals
metadata_info = []
if "watch" in node.metadata.keyvals or "update" in keyvals:
metadata_info.append(
self._handle_watch_metadata(node, scope, ctxt, stream)
)
if "packtype" in node.metadata.keyvals or "packer" in keyvals:
metadata_info.append(
self._handle_packed_metadata(node, scope, ctxt, stream)
)
return metadata_info | Handle metadata for the node | Below is the the instruction that describes the task:
### Input:
Handle metadata for the node
### Response:
def _handle_metadata(self, node, scope, ctxt, stream):
"""Handle metadata for the node
"""
self._dlog("handling node metadata {}".format(node.metadata.keyvals))
keyvals = node.metadata.keyvals
metadata_info = []
if "watch" in node.metadata.keyvals or "update" in keyvals:
metadata_info.append(
self._handle_watch_metadata(node, scope, ctxt, stream)
)
if "packtype" in node.metadata.keyvals or "packer" in keyvals:
metadata_info.append(
self._handle_packed_metadata(node, scope, ctxt, stream)
)
return metadata_info |
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
"""
if self._word_embedding is not None and word_inputs is not None:
try:
mask_without_bos_eos = (word_inputs > 0).long()
# The character cnn part is cached - just look it up.
embedded_inputs = self._word_embedding(word_inputs) # type: ignore
# shape (batch_size, timesteps + 2, embedding_dim)
type_representation, mask = add_sentence_boundary_token_ids(
embedded_inputs,
mask_without_bos_eos,
self._bos_embedding,
self._eos_embedding
)
except RuntimeError:
# Back off to running the character convolutions,
# as we might not have the words in the cache.
token_embedding = self._token_embedder(inputs)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
else:
token_embedding = self._token_embedder(inputs)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
lstm_outputs = self._elmo_lstm(type_representation, mask)
# Prepare the output. The first layer is duplicated.
# Because of minor differences in how masking is applied depending
# on whether the char cnn layers are cached, we'll be defensive and
# multiply by the mask here. It's not strictly necessary, as the
# mask passed on is correct, but the values in the padded areas
# of the char cnn representations can change.
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {
'activations': output_tensors,
'mask': mask,
} | Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
### Response:
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
"""
if self._word_embedding is not None and word_inputs is not None:
try:
mask_without_bos_eos = (word_inputs > 0).long()
# The character cnn part is cached - just look it up.
embedded_inputs = self._word_embedding(word_inputs) # type: ignore
# shape (batch_size, timesteps + 2, embedding_dim)
type_representation, mask = add_sentence_boundary_token_ids(
embedded_inputs,
mask_without_bos_eos,
self._bos_embedding,
self._eos_embedding
)
except RuntimeError:
# Back off to running the character convolutions,
# as we might not have the words in the cache.
token_embedding = self._token_embedder(inputs)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
else:
token_embedding = self._token_embedder(inputs)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
lstm_outputs = self._elmo_lstm(type_representation, mask)
# Prepare the output. The first layer is duplicated.
# Because of minor differences in how masking is applied depending
# on whether the char cnn layers are cached, we'll be defensive and
# multiply by the mask here. It's not strictly necessary, as the
# mask passed on is correct, but the values in the padded areas
# of the char cnn representations can change.
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {
'activations': output_tensors,
'mask': mask,
} |
def calculate(self, **state):
"""
Calculate the material physical property at the specified temperature
in the units specified by the object's 'property_units' property.
:param T: [K] temperature
:returns: physical property value
"""
super().calculate(**state)
return np.polyval(self._coeffs, state['T']) | Calculate the material physical property at the specified temperature
in the units specified by the object's 'property_units' property.
:param T: [K] temperature
:returns: physical property value | Below is the the instruction that describes the task:
### Input:
Calculate the material physical property at the specified temperature
in the units specified by the object's 'property_units' property.
:param T: [K] temperature
:returns: physical property value
### Response:
def calculate(self, **state):
"""
Calculate the material physical property at the specified temperature
in the units specified by the object's 'property_units' property.
:param T: [K] temperature
:returns: physical property value
"""
super().calculate(**state)
return np.polyval(self._coeffs, state['T']) |
def blog_post_feed(request, format, **kwargs):
"""
Blog posts feeds - maps format to the correct feed view.
"""
try:
return {"rss": PostsRSS, "atom": PostsAtom}[format](**kwargs)(request)
except KeyError:
raise Http404() | Blog posts feeds - maps format to the correct feed view. | Below is the the instruction that describes the task:
### Input:
Blog posts feeds - maps format to the correct feed view.
### Response:
def blog_post_feed(request, format, **kwargs):
"""
Blog posts feeds - maps format to the correct feed view.
"""
try:
return {"rss": PostsRSS, "atom": PostsAtom}[format](**kwargs)(request)
except KeyError:
raise Http404() |
def set_loglevel(self, level):
"""
Set the minimum loglevel for all components
Args:
level (int): eg. logging.DEBUG or logging.ERROR. See also https://docs.python.org/2/library/logging.html#logging-levels
"""
self.log_level = level
log_manager.config_stdio(default_level=level) | Set the minimum loglevel for all components
Args:
level (int): eg. logging.DEBUG or logging.ERROR. See also https://docs.python.org/2/library/logging.html#logging-levels | Below is the the instruction that describes the task:
### Input:
Set the minimum loglevel for all components
Args:
level (int): eg. logging.DEBUG or logging.ERROR. See also https://docs.python.org/2/library/logging.html#logging-levels
### Response:
def set_loglevel(self, level):
"""
Set the minimum loglevel for all components
Args:
level (int): eg. logging.DEBUG or logging.ERROR. See also https://docs.python.org/2/library/logging.html#logging-levels
"""
self.log_level = level
log_manager.config_stdio(default_level=level) |
def checkout(cwd,
remote,
target=None,
user=None,
username=None,
password=None,
*opts):
'''
Download a working copy of the remote Subversion repository
directory or file
cwd
The path to the Subversion repository
remote : None
URL to checkout
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.checkout /path/to/repo svn://remote/repo
'''
opts += (remote,)
if target:
opts += (target,)
return _run_svn('checkout', cwd, user, username, password, opts) | Download a working copy of the remote Subversion repository
directory or file
cwd
The path to the Subversion repository
remote : None
URL to checkout
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.checkout /path/to/repo svn://remote/repo | Below is the the instruction that describes the task:
### Input:
Download a working copy of the remote Subversion repository
directory or file
cwd
The path to the Subversion repository
remote : None
URL to checkout
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.checkout /path/to/repo svn://remote/repo
### Response:
def checkout(cwd,
remote,
target=None,
user=None,
username=None,
password=None,
*opts):
'''
Download a working copy of the remote Subversion repository
directory or file
cwd
The path to the Subversion repository
remote : None
URL to checkout
target : None
The name to give the file or directory working copy
Default: svn uses the remote basename
user : None
Run svn as a user other than what the minion runs as
username : None
Connect to the Subversion server as another user
password : None
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
CLI Example:
.. code-block:: bash
salt '*' svn.checkout /path/to/repo svn://remote/repo
'''
opts += (remote,)
if target:
opts += (target,)
return _run_svn('checkout', cwd, user, username, password, opts) |
def ansi(string, *args):
""" Convenience function to chain multiple ColorWrappers to a string """
ansi = ''
for arg in args:
arg = str(arg)
if not re.match(ANSI_PATTERN, arg):
raise ValueError('Additional arguments must be ansi strings')
ansi += arg
return ansi + string + colorama.Style.RESET_ALL | Convenience function to chain multiple ColorWrappers to a string | Below is the the instruction that describes the task:
### Input:
Convenience function to chain multiple ColorWrappers to a string
### Response:
def ansi(string, *args):
""" Convenience function to chain multiple ColorWrappers to a string """
ansi = ''
for arg in args:
arg = str(arg)
if not re.match(ANSI_PATTERN, arg):
raise ValueError('Additional arguments must be ansi strings')
ansi += arg
return ansi + string + colorama.Style.RESET_ALL |
def center_visible_line(self, before_scroll_offset=False,
after_scroll_offset=False):
"""
Like `first_visible_line`, but for the center visible line.
"""
return (self.first_visible_line(after_scroll_offset) +
(self.last_visible_line(before_scroll_offset) -
self.first_visible_line(after_scroll_offset)) // 2
) | Like `first_visible_line`, but for the center visible line. | Below is the the instruction that describes the task:
### Input:
Like `first_visible_line`, but for the center visible line.
### Response:
def center_visible_line(self, before_scroll_offset=False,
after_scroll_offset=False):
"""
Like `first_visible_line`, but for the center visible line.
"""
return (self.first_visible_line(after_scroll_offset) +
(self.last_visible_line(before_scroll_offset) -
self.first_visible_line(after_scroll_offset)) // 2
) |
def group_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/groups#delete-group"
api_path = "/api/v2/groups/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/core/groups#delete-group | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/groups#delete-group
### Response:
def group_delete(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/groups#delete-group"
api_path = "/api/v2/groups/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, method="DELETE", **kwargs) |
def from_bytes(cls, image_bytes, affine, crs, band_names=None):
"""Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red'
"""
b = io.BytesIO(image_bytes)
image = imageio.imread(b)
roll = np.rollaxis(image, 2)
if band_names is None:
band_names = [0, 1, 2]
elif isinstance(band_names, str):
band_names = [band_names]
return GeoRaster2(image=roll[:3, :, :], affine=affine, crs=crs, band_names=band_names) | Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red' | Below is the the instruction that describes the task:
### Input:
Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red'
### Response:
def from_bytes(cls, image_bytes, affine, crs, band_names=None):
"""Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red'
"""
b = io.BytesIO(image_bytes)
image = imageio.imread(b)
roll = np.rollaxis(image, 2)
if band_names is None:
band_names = [0, 1, 2]
elif isinstance(band_names, str):
band_names = [band_names]
return GeoRaster2(image=roll[:3, :, :], affine=affine, crs=crs, band_names=band_names) |
def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
]) | Converts a GUID to the serialized bytes representation | Below is the the instruction that describes the task:
### Input:
Converts a GUID to the serialized bytes representation
### Response:
def guid2bytes(s):
"""Converts a GUID to the serialized bytes representation"""
assert isinstance(s, str)
assert len(s) == 36
p = struct.pack
return b"".join([
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
p(">H", int(s[19:23], 16)),
p(">Q", int(s[24:], 16))[2:],
]) |
def has_style(node):
"""Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False
"""
elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps']
return any([True for elem in elements if elem in node.rpr]) | Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False | Below is the the instruction that describes the task:
### Input:
Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False
### Response:
def has_style(node):
"""Tells us if node element has defined styling.
:Args:
- node (:class:`ooxml.doc.Element`): Element
:Returns:
True or False
"""
elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps']
return any([True for elem in elements if elem in node.rpr]) |
def predict(self, x):
"""
This function make forward pass through MLP (no update).
**Args:**
* `x` : input vector (1-dimensional array)
**Returns:**
* `y` : output of MLP (float or 1-diemnsional array).
Size depends on number of MLP outputs.
"""
# forward pass to hidden layers
for l in self.layers:
x = l.predict(x)
self.x[1:] = x
# forward pass to output layer
if self.outputs == 1:
self.y = np.dot(self.w, self.x)
else:
self.y = np.sum(self.w*self.x, axis=1)
return self.y | This function make forward pass through MLP (no update).
**Args:**
* `x` : input vector (1-dimensional array)
**Returns:**
* `y` : output of MLP (float or 1-diemnsional array).
Size depends on number of MLP outputs. | Below is the the instruction that describes the task:
### Input:
This function make forward pass through MLP (no update).
**Args:**
* `x` : input vector (1-dimensional array)
**Returns:**
* `y` : output of MLP (float or 1-diemnsional array).
Size depends on number of MLP outputs.
### Response:
def predict(self, x):
"""
This function make forward pass through MLP (no update).
**Args:**
* `x` : input vector (1-dimensional array)
**Returns:**
* `y` : output of MLP (float or 1-diemnsional array).
Size depends on number of MLP outputs.
"""
# forward pass to hidden layers
for l in self.layers:
x = l.predict(x)
self.x[1:] = x
# forward pass to output layer
if self.outputs == 1:
self.y = np.dot(self.w, self.x)
else:
self.y = np.sum(self.w*self.x, axis=1)
return self.y |
def _get_pdi(cls, df, windows):
""" +DI, positive directional moving index
:param df: data
:param windows: range
:return:
"""
window = cls.get_only_one_positive_int(windows)
pdm_column = 'pdm_{}'.format(window)
tr_column = 'atr_{}'.format(window)
pdi_column = 'pdi_{}'.format(window)
df[pdi_column] = df[pdm_column] / df[tr_column] * 100
return df[pdi_column] | +DI, positive directional moving index
:param df: data
:param windows: range
:return: | Below is the the instruction that describes the task:
### Input:
+DI, positive directional moving index
:param df: data
:param windows: range
:return:
### Response:
def _get_pdi(cls, df, windows):
""" +DI, positive directional moving index
:param df: data
:param windows: range
:return:
"""
window = cls.get_only_one_positive_int(windows)
pdm_column = 'pdm_{}'.format(window)
tr_column = 'atr_{}'.format(window)
pdi_column = 'pdi_{}'.format(window)
df[pdi_column] = df[pdm_column] / df[tr_column] * 100
return df[pdi_column] |
def gen(function):
"""
Allows using a generator to chain together reversible actions.
This decorator may be added to a generator that yields reversible actions
(any object with a ``.forwards()`` and ``.backwards()`` method). These may
be constructed manually or via :py:func:`reversible.action`. The decorated
function, when called, will return another reversible action that runs all
yielded actions and if any of them fails, rolls back all actions that had
been executed *in the reverse order*.
Values can be returned by raising the :py:class:`reversible.Return`
exception, or if using Python 3.3 or newer, by simply using the ``return``
statement.
For example,
.. code-block:: python
@reversible.gen
def submit_order(order):
# CreateOrder is a class that declares a forwards() and
# backwards() method. The forwards() method returns the
# order_id. It is propagated back to the yield point.
order_id = yield CreateOrder(order.cart)
# If get_payment_info throws an exception, the order will
# be deleted and the exeception will be re-raised to the
# caller.
payment_info = PaymentStore.get_payment_info(order.user_id)
try:
# charge_order is a function that returns an action.
# It is easy to create such a function by using
# reversible.action as a decorator.
total = yield charge_order(payment_info, order_id)
except InsufficientFundsException:
# Exceptions thrown by a dependency's forwards()
# method are propagated at the yield point. It's
# possible to handle them and prevent rollback for
# everything else.
send_insufficient_funds_email(order_id, order.user_id)
else:
yield update_receipt(order_id, total)
send_receipt(order_id)
# The order ID is the result of this action.
raise reversible.Return(order_id)
order_id = reversible.execute(submit_order(order))
# If another action based on reversible.gen calls
# submit_order, it can simply do:
#
# order_id = yield submit_order(order_details)
When an action fails, its ``backwards`` method and the ``backwards``
methods of all actions executed so far will be called in reverse of the
order in which the ``forwards`` methods were called.
If any of the ``backwards`` methods fail, rollback will be aborted.
:param function:
The generator function. This generator must yield action objects.
:returns:
A function that, when called, produces an action object that executes
actions and functions as yielded by the generator.
"""
@functools.wraps(function) # TODO: use wrapt instead?
def new_function(*args, **kwargs):
try:
value = function(*args, **kwargs)
except Return as result:
return SimpleAction(
lambda ctx: ctx.value,
lambda _: None,
result,
)
else:
if isinstance(value, types.GeneratorType):
return _GeneratorAction(value)
else:
return SimpleAction(
lambda _: value,
lambda _: None,
None,
)
return new_function | Allows using a generator to chain together reversible actions.
This decorator may be added to a generator that yields reversible actions
(any object with a ``.forwards()`` and ``.backwards()`` method). These may
be constructed manually or via :py:func:`reversible.action`. The decorated
function, when called, will return another reversible action that runs all
yielded actions and if any of them fails, rolls back all actions that had
been executed *in the reverse order*.
Values can be returned by raising the :py:class:`reversible.Return`
exception, or if using Python 3.3 or newer, by simply using the ``return``
statement.
For example,
.. code-block:: python
@reversible.gen
def submit_order(order):
# CreateOrder is a class that declares a forwards() and
# backwards() method. The forwards() method returns the
# order_id. It is propagated back to the yield point.
order_id = yield CreateOrder(order.cart)
# If get_payment_info throws an exception, the order will
# be deleted and the exeception will be re-raised to the
# caller.
payment_info = PaymentStore.get_payment_info(order.user_id)
try:
# charge_order is a function that returns an action.
# It is easy to create such a function by using
# reversible.action as a decorator.
total = yield charge_order(payment_info, order_id)
except InsufficientFundsException:
# Exceptions thrown by a dependency's forwards()
# method are propagated at the yield point. It's
# possible to handle them and prevent rollback for
# everything else.
send_insufficient_funds_email(order_id, order.user_id)
else:
yield update_receipt(order_id, total)
send_receipt(order_id)
# The order ID is the result of this action.
raise reversible.Return(order_id)
order_id = reversible.execute(submit_order(order))
# If another action based on reversible.gen calls
# submit_order, it can simply do:
#
# order_id = yield submit_order(order_details)
When an action fails, its ``backwards`` method and the ``backwards``
methods of all actions executed so far will be called in reverse of the
order in which the ``forwards`` methods were called.
If any of the ``backwards`` methods fail, rollback will be aborted.
:param function:
The generator function. This generator must yield action objects.
:returns:
A function that, when called, produces an action object that executes
actions and functions as yielded by the generator. | Below is the the instruction that describes the task:
### Input:
Allows using a generator to chain together reversible actions.
This decorator may be added to a generator that yields reversible actions
(any object with a ``.forwards()`` and ``.backwards()`` method). These may
be constructed manually or via :py:func:`reversible.action`. The decorated
function, when called, will return another reversible action that runs all
yielded actions and if any of them fails, rolls back all actions that had
been executed *in the reverse order*.
Values can be returned by raising the :py:class:`reversible.Return`
exception, or if using Python 3.3 or newer, by simply using the ``return``
statement.
For example,
.. code-block:: python
@reversible.gen
def submit_order(order):
# CreateOrder is a class that declares a forwards() and
# backwards() method. The forwards() method returns the
# order_id. It is propagated back to the yield point.
order_id = yield CreateOrder(order.cart)
# If get_payment_info throws an exception, the order will
# be deleted and the exeception will be re-raised to the
# caller.
payment_info = PaymentStore.get_payment_info(order.user_id)
try:
# charge_order is a function that returns an action.
# It is easy to create such a function by using
# reversible.action as a decorator.
total = yield charge_order(payment_info, order_id)
except InsufficientFundsException:
# Exceptions thrown by a dependency's forwards()
# method are propagated at the yield point. It's
# possible to handle them and prevent rollback for
# everything else.
send_insufficient_funds_email(order_id, order.user_id)
else:
yield update_receipt(order_id, total)
send_receipt(order_id)
# The order ID is the result of this action.
raise reversible.Return(order_id)
order_id = reversible.execute(submit_order(order))
# If another action based on reversible.gen calls
# submit_order, it can simply do:
#
# order_id = yield submit_order(order_details)
When an action fails, its ``backwards`` method and the ``backwards``
methods of all actions executed so far will be called in reverse of the
order in which the ``forwards`` methods were called.
If any of the ``backwards`` methods fail, rollback will be aborted.
:param function:
The generator function. This generator must yield action objects.
:returns:
A function that, when called, produces an action object that executes
actions and functions as yielded by the generator.
### Response:
def gen(function):
"""
Allows using a generator to chain together reversible actions.
This decorator may be added to a generator that yields reversible actions
(any object with a ``.forwards()`` and ``.backwards()`` method). These may
be constructed manually or via :py:func:`reversible.action`. The decorated
function, when called, will return another reversible action that runs all
yielded actions and if any of them fails, rolls back all actions that had
been executed *in the reverse order*.
Values can be returned by raising the :py:class:`reversible.Return`
exception, or if using Python 3.3 or newer, by simply using the ``return``
statement.
For example,
.. code-block:: python
@reversible.gen
def submit_order(order):
# CreateOrder is a class that declares a forwards() and
# backwards() method. The forwards() method returns the
# order_id. It is propagated back to the yield point.
order_id = yield CreateOrder(order.cart)
# If get_payment_info throws an exception, the order will
# be deleted and the exeception will be re-raised to the
# caller.
payment_info = PaymentStore.get_payment_info(order.user_id)
try:
# charge_order is a function that returns an action.
# It is easy to create such a function by using
# reversible.action as a decorator.
total = yield charge_order(payment_info, order_id)
except InsufficientFundsException:
# Exceptions thrown by a dependency's forwards()
# method are propagated at the yield point. It's
# possible to handle them and prevent rollback for
# everything else.
send_insufficient_funds_email(order_id, order.user_id)
else:
yield update_receipt(order_id, total)
send_receipt(order_id)
# The order ID is the result of this action.
raise reversible.Return(order_id)
order_id = reversible.execute(submit_order(order))
# If another action based on reversible.gen calls
# submit_order, it can simply do:
#
# order_id = yield submit_order(order_details)
When an action fails, its ``backwards`` method and the ``backwards``
methods of all actions executed so far will be called in reverse of the
order in which the ``forwards`` methods were called.
If any of the ``backwards`` methods fail, rollback will be aborted.
:param function:
The generator function. This generator must yield action objects.
:returns:
A function that, when called, produces an action object that executes
actions and functions as yielded by the generator.
"""
@functools.wraps(function) # TODO: use wrapt instead?
def new_function(*args, **kwargs):
try:
value = function(*args, **kwargs)
except Return as result:
return SimpleAction(
lambda ctx: ctx.value,
lambda _: None,
result,
)
else:
if isinstance(value, types.GeneratorType):
return _GeneratorAction(value)
else:
return SimpleAction(
lambda _: value,
lambda _: None,
None,
)
return new_function |
def add_group(network_id, group,**kwargs):
"""
Add a resourcegroup to a network
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
res_grp_i = net_i.add_group(group.name, group.description, group.status)
hdb.add_resource_attributes(res_grp_i, group.attributes)
db.DBSession.flush()
if group.types is not None and len(group.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in group.types:
ra, rt, rs = template.set_resource_type(res_grp_i,
typesummary.id,
**kwargs)
res_types.append(rt)
res_attrs.extend(ra)
res_scenarios.update(rs)#rs is a dict
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(res_grp_i)
return res_grp_i | Add a resourcegroup to a network | Below is the the instruction that describes the task:
### Input:
Add a resourcegroup to a network
### Response:
def add_group(network_id, group,**kwargs):
"""
Add a resourcegroup to a network
"""
user_id = kwargs.get('user_id')
try:
net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()
net_i.check_write_permission(user_id=user_id)
except NoResultFound:
raise ResourceNotFoundError("Network %s not found"%(network_id))
res_grp_i = net_i.add_group(group.name, group.description, group.status)
hdb.add_resource_attributes(res_grp_i, group.attributes)
db.DBSession.flush()
if group.types is not None and len(group.types) > 0:
res_types = []
res_attrs = []
res_scenarios = {}
for typesummary in group.types:
ra, rt, rs = template.set_resource_type(res_grp_i,
typesummary.id,
**kwargs)
res_types.append(rt)
res_attrs.extend(ra)
res_scenarios.update(rs)#rs is a dict
if len(res_types) > 0:
db.DBSession.bulk_insert_mappings(ResourceType, res_types)
if len(res_attrs) > 0:
db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)
new_res_attrs = db.DBSession.query(ResourceAttr).order_by(ResourceAttr.id.desc()).limit(len(res_attrs)).all()
all_rs = []
for ra in new_res_attrs:
ra_id = ra.id
if ra.attr_id in res_scenarios:
rs_list = res_scenarios[ra.attr_id]
for rs in rs_list:
rs_list[rs]['resource_attr_id'] = ra_id
all_rs.append(rs_list[rs])
if len(all_rs) > 0:
db.DBSession.bulk_insert_mappings(ResourceScenario, all_rs)
db.DBSession.refresh(res_grp_i)
return res_grp_i |
def get_db_attribute(self, table, record, column, key=None):
"""
Gets values of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL[:KEY]
"""
if key is not None:
column = '%s:%s' % (column, key)
command = ovs_vsctl.VSCtlCommand(
'get', (table, record, column))
self.run_command([command])
if command.result:
return command.result[0]
return None | Gets values of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL[:KEY] | Below is the the instruction that describes the task:
### Input:
Gets values of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL[:KEY]
### Response:
def get_db_attribute(self, table, record, column, key=None):
"""
Gets values of 'column' in 'record' in 'table'.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl get TBL REC COL[:KEY]
"""
if key is not None:
column = '%s:%s' % (column, key)
command = ovs_vsctl.VSCtlCommand(
'get', (table, record, column))
self.run_command([command])
if command.result:
return command.result[0]
return None |
def to_css(self):
''' Generate the CSS representation of this RGB color.
Returns:
str, ``"rgb(...)"`` or ``"rgba(...)"``
'''
if self.a == 1.0:
return "rgb(%d, %d, %d)" % (self.r, self.g, self.b)
else:
return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a) | Generate the CSS representation of this RGB color.
Returns:
str, ``"rgb(...)"`` or ``"rgba(...)"`` | Below is the the instruction that describes the task:
### Input:
Generate the CSS representation of this RGB color.
Returns:
str, ``"rgb(...)"`` or ``"rgba(...)"``
### Response:
def to_css(self):
''' Generate the CSS representation of this RGB color.
Returns:
str, ``"rgb(...)"`` or ``"rgba(...)"``
'''
if self.a == 1.0:
return "rgb(%d, %d, %d)" % (self.r, self.g, self.b)
else:
return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a) |
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, **kwargs):
"""
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
return self._volume.get_neuroglancer_link(resource, resolution, x_range, y_range, z_range, **kwargs) | Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation. | Below is the the instruction that describes the task:
### Input:
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
### Response:
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, **kwargs):
"""
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step.
Args:
resource (intern.resource.Resource): Resource compatible with cutout operations.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
Returns:
(string): Return neuroglancer link.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
return self._volume.get_neuroglancer_link(resource, resolution, x_range, y_range, z_range, **kwargs) |
def accept(self, title, titleAlignments):
"""
Return C{True} if the read id set in C{titleAlignments} is sufficiently
different from all previously seen read sets.
@param title: A C{str} sequence title.
@param titleAlignments: An instance of L{TitleAlignment}.
@return: A C{bool} indicating whether a title has an acceptably novel
read set or not.
"""
# Sanity check: titles can only be passed once.
assert title not in self._titles, (
'Title %r seen multiple times.' % title)
readIds = titleAlignments.readIds()
newReadsRequired = ceil(self._minNew * len(readIds))
for readSet, invalidatedTitles in self._titles.values():
if len(readIds - readSet) < newReadsRequired:
# Add this title to the set of titles invalidated by this
# previously seen read set.
invalidatedTitles.append(title)
return False
# Remember the new read set and an empty list of invalidated titles.
self._titles[title] = (readIds, [])
return True | Return C{True} if the read id set in C{titleAlignments} is sufficiently
different from all previously seen read sets.
@param title: A C{str} sequence title.
@param titleAlignments: An instance of L{TitleAlignment}.
@return: A C{bool} indicating whether a title has an acceptably novel
read set or not. | Below is the the instruction that describes the task:
### Input:
Return C{True} if the read id set in C{titleAlignments} is sufficiently
different from all previously seen read sets.
@param title: A C{str} sequence title.
@param titleAlignments: An instance of L{TitleAlignment}.
@return: A C{bool} indicating whether a title has an acceptably novel
read set or not.
### Response:
def accept(self, title, titleAlignments):
"""
Return C{True} if the read id set in C{titleAlignments} is sufficiently
different from all previously seen read sets.
@param title: A C{str} sequence title.
@param titleAlignments: An instance of L{TitleAlignment}.
@return: A C{bool} indicating whether a title has an acceptably novel
read set or not.
"""
# Sanity check: titles can only be passed once.
assert title not in self._titles, (
'Title %r seen multiple times.' % title)
readIds = titleAlignments.readIds()
newReadsRequired = ceil(self._minNew * len(readIds))
for readSet, invalidatedTitles in self._titles.values():
if len(readIds - readSet) < newReadsRequired:
# Add this title to the set of titles invalidated by this
# previously seen read set.
invalidatedTitles.append(title)
return False
# Remember the new read set and an empty list of invalidated titles.
self._titles[title] = (readIds, [])
return True |
def tiles_from_bounds(self, bounds, zoom):
"""
Return all tiles intersecting with bounds.
Bounds values will be cleaned if they cross the antimeridian or are
outside of the Northern or Southern tile pyramid bounds.
- bounds: tuple of (left, bottom, right, top) bounding values in tile
pyramid CRS
- zoom: zoom level
"""
validate_zoom(zoom)
if not isinstance(bounds, tuple) or len(bounds) != 4:
raise ValueError("bounds must be a tuple of left, bottom, right, top values")
if not isinstance(bounds, Bounds):
bounds = Bounds(*bounds)
if self.is_global:
for tile in _global_tiles_from_bounds(self, bounds, zoom):
yield tile
else:
for tile in _tiles_from_cleaned_bounds(self, bounds, zoom):
yield tile | Return all tiles intersecting with bounds.
Bounds values will be cleaned if they cross the antimeridian or are
outside of the Northern or Southern tile pyramid bounds.
- bounds: tuple of (left, bottom, right, top) bounding values in tile
pyramid CRS
- zoom: zoom level | Below is the the instruction that describes the task:
### Input:
Return all tiles intersecting with bounds.
Bounds values will be cleaned if they cross the antimeridian or are
outside of the Northern or Southern tile pyramid bounds.
- bounds: tuple of (left, bottom, right, top) bounding values in tile
pyramid CRS
- zoom: zoom level
### Response:
def tiles_from_bounds(self, bounds, zoom):
"""
Return all tiles intersecting with bounds.
Bounds values will be cleaned if they cross the antimeridian or are
outside of the Northern or Southern tile pyramid bounds.
- bounds: tuple of (left, bottom, right, top) bounding values in tile
pyramid CRS
- zoom: zoom level
"""
validate_zoom(zoom)
if not isinstance(bounds, tuple) or len(bounds) != 4:
raise ValueError("bounds must be a tuple of left, bottom, right, top values")
if not isinstance(bounds, Bounds):
bounds = Bounds(*bounds)
if self.is_global:
for tile in _global_tiles_from_bounds(self, bounds, zoom):
yield tile
else:
for tile in _tiles_from_cleaned_bounds(self, bounds, zoom):
yield tile |
def Hypergeometric(N, n, K, tag=None):
"""
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
"""
assert (
int(N) == N and N > 0
), 'Hypergeometric total population size "N" must be an integer greater than zero.'
assert (
int(n) == n and 0 < n <= N
), 'Hypergeometric interest population size "n" must be an integer greater than zero and no more than the total population size.'
assert (
int(K) == K and 0 < K <= N
), 'Hypergeometric chosen population size "K" must be an integer greater than zero and no more than the total population size.'
return uv(ss.hypergeom(N, n, K), tag=tag) | A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975 | Below is the the instruction that describes the task:
### Input:
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
### Response:
def Hypergeometric(N, n, K, tag=None):
"""
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
"""
assert (
int(N) == N and N > 0
), 'Hypergeometric total population size "N" must be an integer greater than zero.'
assert (
int(n) == n and 0 < n <= N
), 'Hypergeometric interest population size "n" must be an integer greater than zero and no more than the total population size.'
assert (
int(K) == K and 0 < K <= N
), 'Hypergeometric chosen population size "K" must be an integer greater than zero and no more than the total population size.'
return uv(ss.hypergeom(N, n, K), tag=tag) |
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(),
category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']):
"""Reformat a dataframe in etpinard's format for use in plot functions and sklearn models"""
possible_categories = ['Africa', 'Americas', 'Asia', 'Europe',
'Oceania'] if possible_categories is None else possible_categories
df.columns = clean_columns(df.columns)
df = pd.read_csv(df) if isinstance(df, str) else df
columns = clean_columns(list(columns))
df2 = pd.DataFrame(columns=columns)
df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories])
columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns])
for col, category_cols in columns:
df2[col] = np.concatenate([df[label].values for label in category_cols])
return df2 | Reformat a dataframe in etpinard's format for use in plot functions and sklearn models | Below is the the instruction that describes the task:
### Input:
Reformat a dataframe in etpinard's format for use in plot functions and sklearn models
### Response:
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(),
category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']):
"""Reformat a dataframe in etpinard's format for use in plot functions and sklearn models"""
possible_categories = ['Africa', 'Americas', 'Asia', 'Europe',
'Oceania'] if possible_categories is None else possible_categories
df.columns = clean_columns(df.columns)
df = pd.read_csv(df) if isinstance(df, str) else df
columns = clean_columns(list(columns))
df2 = pd.DataFrame(columns=columns)
df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories])
columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns])
for col, category_cols in columns:
df2[col] = np.concatenate([df[label].values for label in category_cols])
return df2 |
def write(self, iterable):
"""Writes values from iterable into CSV file"""
io_error_text = _("Error writing to file {filepath}.")
io_error_text = io_error_text.format(filepath=self.path)
try:
with open(self.path, "wb") as csvfile:
csv_writer = csv.writer(csvfile, self.dialect)
for line in iterable:
csv_writer.writerow(
list(encode_gen(line, encoding=self.encoding)))
except IOError:
txt = \
_("Error opening file {filepath}.").format(filepath=self.path)
try:
post_command_event(self.main_window, self.StatusBarMsg,
text=txt)
except TypeError:
# The main window does not exist any more
pass
return False | Writes values from iterable into CSV file | Below is the the instruction that describes the task:
### Input:
Writes values from iterable into CSV file
### Response:
def write(self, iterable):
"""Writes values from iterable into CSV file"""
io_error_text = _("Error writing to file {filepath}.")
io_error_text = io_error_text.format(filepath=self.path)
try:
with open(self.path, "wb") as csvfile:
csv_writer = csv.writer(csvfile, self.dialect)
for line in iterable:
csv_writer.writerow(
list(encode_gen(line, encoding=self.encoding)))
except IOError:
txt = \
_("Error opening file {filepath}.").format(filepath=self.path)
try:
post_command_event(self.main_window, self.StatusBarMsg,
text=txt)
except TypeError:
# The main window does not exist any more
pass
return False |
def loads(cls, value):
'''Returns mapping type deserialized `value`.'''
if len(value) == 1 and cls.sentinel in value:
value = value[cls.sentinel]
return value | Returns mapping type deserialized `value`. | Below is the the instruction that describes the task:
### Input:
Returns mapping type deserialized `value`.
### Response:
def loads(cls, value):
'''Returns mapping type deserialized `value`.'''
if len(value) == 1 and cls.sentinel in value:
value = value[cls.sentinel]
return value |
def _get_splunk_search_props(search):
'''
Get splunk search properties from an object
'''
props = search.content
props["app"] = search.access.app
props["sharing"] = search.access.sharing
return props | Get splunk search properties from an object | Below is the the instruction that describes the task:
### Input:
Get splunk search properties from an object
### Response:
def _get_splunk_search_props(search):
'''
Get splunk search properties from an object
'''
props = search.content
props["app"] = search.access.app
props["sharing"] = search.access.sharing
return props |
def to_dotfile(self, filename):
"""
Write graph to `filename`.
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> from anytree.exporter import DotExporter
>>> DotExporter(root).to_dotfile("tree.dot")
The generated file should be handed over to the `dot` tool from the
http://www.graphviz.org/ package::
$ dot tree.dot -T png -o tree.png
"""
with codecs.open(filename, "w", "utf-8") as file:
for line in self:
file.write("%s\n" % line) | Write graph to `filename`.
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> from anytree.exporter import DotExporter
>>> DotExporter(root).to_dotfile("tree.dot")
The generated file should be handed over to the `dot` tool from the
http://www.graphviz.org/ package::
$ dot tree.dot -T png -o tree.png | Below is the the instruction that describes the task:
### Input:
Write graph to `filename`.
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> from anytree.exporter import DotExporter
>>> DotExporter(root).to_dotfile("tree.dot")
The generated file should be handed over to the `dot` tool from the
http://www.graphviz.org/ package::
$ dot tree.dot -T png -o tree.png
### Response:
def to_dotfile(self, filename):
"""
Write graph to `filename`.
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> from anytree.exporter import DotExporter
>>> DotExporter(root).to_dotfile("tree.dot")
The generated file should be handed over to the `dot` tool from the
http://www.graphviz.org/ package::
$ dot tree.dot -T png -o tree.png
"""
with codecs.open(filename, "w", "utf-8") as file:
for line in self:
file.write("%s\n" % line) |
def p_state_invariant_section(self, p):
'''state_invariant_section : STATE_INVARIANTS LCURLY state_invariant_list RCURLY SEMI
| STATE_INVARIANTS LCURLY RCURLY SEMI'''
if len(p) == 6:
p[0] = ('invariants', p[3])
elif len(p) == 5:
p[0] = ('invariants', [])
self._print_verbose('invariants') | state_invariant_section : STATE_INVARIANTS LCURLY state_invariant_list RCURLY SEMI
| STATE_INVARIANTS LCURLY RCURLY SEMI | Below is the the instruction that describes the task:
### Input:
state_invariant_section : STATE_INVARIANTS LCURLY state_invariant_list RCURLY SEMI
| STATE_INVARIANTS LCURLY RCURLY SEMI
### Response:
def p_state_invariant_section(self, p):
'''state_invariant_section : STATE_INVARIANTS LCURLY state_invariant_list RCURLY SEMI
| STATE_INVARIANTS LCURLY RCURLY SEMI'''
if len(p) == 6:
p[0] = ('invariants', p[3])
elif len(p) == 5:
p[0] = ('invariants', [])
self._print_verbose('invariants') |
def get_fast_scanner(self):
"""
Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan.
"""
terms = self._terms
return KronFastScanner(self._Y, self._mean.A, self._mean.X, self._cov.Ge, terms) | Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan. | Below is the the instruction that describes the task:
### Input:
Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan.
### Response:
def get_fast_scanner(self):
"""
Return :class:`.FastScanner` for association scan.
Returns
-------
:class:`.FastScanner`
Instance of a class designed to perform very fast association scan.
"""
terms = self._terms
return KronFastScanner(self._Y, self._mean.A, self._mean.X, self._cov.Ge, terms) |
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper | A decorator. It performs memoization ignoring the arguments used to call
the function. | Below is the the instruction that describes the task:
### Input:
A decorator. It performs memoization ignoring the arguments used to call
the function.
### Response:
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper |
def read_histogram(self):
"""Read and reset the histogram. The expected return is a dictionary
containing the counts per bin, MToF for bins 1, 3, 5, and 7, temperature,
pressure, the sampling period, the checksum, PM1, PM2.5, and PM10.
**NOTE:** The sampling period for the OPCN1 seems to be incorrect.
:returns: dictionary
"""
resp = []
data = {}
# command byte
command = 0x30
# Send the command byte
self.cnxn.xfer([command])
# Wait 10 ms
sleep(10e-3)
# read the histogram
for i in range(62):
r = self.cnxn.xfer([0x00])[0]
resp.append(r)
# convert to real things and store in dictionary!
data['Bin 0'] = self._16bit_unsigned(resp[0], resp[1])
data['Bin 1'] = self._16bit_unsigned(resp[2], resp[3])
data['Bin 2'] = self._16bit_unsigned(resp[4], resp[5])
data['Bin 3'] = self._16bit_unsigned(resp[6], resp[7])
data['Bin 4'] = self._16bit_unsigned(resp[8], resp[9])
data['Bin 5'] = self._16bit_unsigned(resp[10], resp[11])
data['Bin 6'] = self._16bit_unsigned(resp[12], resp[13])
data['Bin 7'] = self._16bit_unsigned(resp[14], resp[15])
data['Bin 8'] = self._16bit_unsigned(resp[16], resp[17])
data['Bin 9'] = self._16bit_unsigned(resp[18], resp[19])
data['Bin 10'] = self._16bit_unsigned(resp[20], resp[21])
data['Bin 11'] = self._16bit_unsigned(resp[22], resp[23])
data['Bin 12'] = self._16bit_unsigned(resp[24], resp[25])
data['Bin 13'] = self._16bit_unsigned(resp[26], resp[27])
data['Bin 14'] = self._16bit_unsigned(resp[28], resp[29])
data['Bin 15'] = self._16bit_unsigned(resp[30], resp[31])
data['Bin1 MToF'] = self._calculate_mtof(resp[32])
data['Bin3 MToF'] = self._calculate_mtof(resp[33])
data['Bin5 MToF'] = self._calculate_mtof(resp[34])
data['Bin7 MToF'] = self._calculate_mtof(resp[35])
data['Temperature'] = self._calculate_temp(resp[36:40])
data['Pressure'] = self._calculate_pressure(resp[40:44])
data['Sampling Period'] = self._calculate_period(resp[44:48])
data['Checksum'] = self._16bit_unsigned(resp[48], resp[49])
data['PM1'] = self._calculate_float(resp[50:54])
data['PM2.5'] = self._calculate_float(resp[54:58])
data['PM10'] = self._calculate_float(resp[58:])
# Calculate the sum of the histogram bins
histogram_sum = data['Bin 0'] + data['Bin 1'] + data['Bin 2'] + \
data['Bin 3'] + data['Bin 4'] + data['Bin 5'] + data['Bin 6'] + \
data['Bin 7'] + data['Bin 8'] + data['Bin 9'] + data['Bin 10'] + \
data['Bin 11'] + data['Bin 12'] + data['Bin 13'] + data['Bin 14'] + \
data['Bin 15']
return data | Read and reset the histogram. The expected return is a dictionary
containing the counts per bin, MToF for bins 1, 3, 5, and 7, temperature,
pressure, the sampling period, the checksum, PM1, PM2.5, and PM10.
**NOTE:** The sampling period for the OPCN1 seems to be incorrect.
:returns: dictionary | Below is the the instruction that describes the task:
### Input:
Read and reset the histogram. The expected return is a dictionary
containing the counts per bin, MToF for bins 1, 3, 5, and 7, temperature,
pressure, the sampling period, the checksum, PM1, PM2.5, and PM10.
**NOTE:** The sampling period for the OPCN1 seems to be incorrect.
:returns: dictionary
### Response:
def read_histogram(self):
"""Read and reset the histogram. The expected return is a dictionary
containing the counts per bin, MToF for bins 1, 3, 5, and 7, temperature,
pressure, the sampling period, the checksum, PM1, PM2.5, and PM10.
**NOTE:** The sampling period for the OPCN1 seems to be incorrect.
:returns: dictionary
"""
resp = []
data = {}
# command byte
command = 0x30
# Send the command byte
self.cnxn.xfer([command])
# Wait 10 ms
sleep(10e-3)
# read the histogram
for i in range(62):
r = self.cnxn.xfer([0x00])[0]
resp.append(r)
# convert to real things and store in dictionary!
data['Bin 0'] = self._16bit_unsigned(resp[0], resp[1])
data['Bin 1'] = self._16bit_unsigned(resp[2], resp[3])
data['Bin 2'] = self._16bit_unsigned(resp[4], resp[5])
data['Bin 3'] = self._16bit_unsigned(resp[6], resp[7])
data['Bin 4'] = self._16bit_unsigned(resp[8], resp[9])
data['Bin 5'] = self._16bit_unsigned(resp[10], resp[11])
data['Bin 6'] = self._16bit_unsigned(resp[12], resp[13])
data['Bin 7'] = self._16bit_unsigned(resp[14], resp[15])
data['Bin 8'] = self._16bit_unsigned(resp[16], resp[17])
data['Bin 9'] = self._16bit_unsigned(resp[18], resp[19])
data['Bin 10'] = self._16bit_unsigned(resp[20], resp[21])
data['Bin 11'] = self._16bit_unsigned(resp[22], resp[23])
data['Bin 12'] = self._16bit_unsigned(resp[24], resp[25])
data['Bin 13'] = self._16bit_unsigned(resp[26], resp[27])
data['Bin 14'] = self._16bit_unsigned(resp[28], resp[29])
data['Bin 15'] = self._16bit_unsigned(resp[30], resp[31])
data['Bin1 MToF'] = self._calculate_mtof(resp[32])
data['Bin3 MToF'] = self._calculate_mtof(resp[33])
data['Bin5 MToF'] = self._calculate_mtof(resp[34])
data['Bin7 MToF'] = self._calculate_mtof(resp[35])
data['Temperature'] = self._calculate_temp(resp[36:40])
data['Pressure'] = self._calculate_pressure(resp[40:44])
data['Sampling Period'] = self._calculate_period(resp[44:48])
data['Checksum'] = self._16bit_unsigned(resp[48], resp[49])
data['PM1'] = self._calculate_float(resp[50:54])
data['PM2.5'] = self._calculate_float(resp[54:58])
data['PM10'] = self._calculate_float(resp[58:])
# Calculate the sum of the histogram bins
histogram_sum = data['Bin 0'] + data['Bin 1'] + data['Bin 2'] + \
data['Bin 3'] + data['Bin 4'] + data['Bin 5'] + data['Bin 6'] + \
data['Bin 7'] + data['Bin 8'] + data['Bin 9'] + data['Bin 10'] + \
data['Bin 11'] + data['Bin 12'] + data['Bin 13'] + data['Bin 14'] + \
data['Bin 15']
return data |
def _check_collections(self):
"""Checks node local collection storage sizes"""
self.collection_sizes = {}
self.collection_total = 0
for col in self.db.collection_names(include_system_collections=False):
self.collection_sizes[col] = self.db.command('collstats', col).get(
'storageSize', 0)
self.collection_total += self.collection_sizes[col]
sorted_x = sorted(self.collection_sizes.items(),
key=operator.itemgetter(1))
for item in sorted_x:
self.log("Collection size (%s): %.2f MB" % (
item[0], item[1] / 1024.0 / 1024),
lvl=verbose)
self.log("Total collection sizes: %.2f MB" % (self.collection_total /
1024.0 / 1024)) | Checks node local collection storage sizes | Below is the the instruction that describes the task:
### Input:
Checks node local collection storage sizes
### Response:
def _check_collections(self):
"""Checks node local collection storage sizes"""
self.collection_sizes = {}
self.collection_total = 0
for col in self.db.collection_names(include_system_collections=False):
self.collection_sizes[col] = self.db.command('collstats', col).get(
'storageSize', 0)
self.collection_total += self.collection_sizes[col]
sorted_x = sorted(self.collection_sizes.items(),
key=operator.itemgetter(1))
for item in sorted_x:
self.log("Collection size (%s): %.2f MB" % (
item[0], item[1] / 1024.0 / 1024),
lvl=verbose)
self.log("Total collection sizes: %.2f MB" % (self.collection_total /
1024.0 / 1024)) |
def update(gandi, resource, memory, cores, console, password, background,
reboot):
"""Update a virtual machine.
Resource can be a Hostname or an ID
"""
pwd = None
if password:
pwd = click.prompt('password', hide_input=True,
confirmation_prompt=True)
max_memory = None
if memory:
max_memory = gandi.iaas.required_max_memory(resource, memory)
if max_memory and not reboot:
gandi.echo('memory update must be done offline.')
if not click.confirm("reboot machine %s?" % resource):
return
result = gandi.iaas.update(resource, memory, cores, console, pwd,
background, max_memory)
if background:
gandi.pretty_echo(result)
return result | Update a virtual machine.
Resource can be a Hostname or an ID | Below is the the instruction that describes the task:
### Input:
Update a virtual machine.
Resource can be a Hostname or an ID
### Response:
def update(gandi, resource, memory, cores, console, password, background,
reboot):
"""Update a virtual machine.
Resource can be a Hostname or an ID
"""
pwd = None
if password:
pwd = click.prompt('password', hide_input=True,
confirmation_prompt=True)
max_memory = None
if memory:
max_memory = gandi.iaas.required_max_memory(resource, memory)
if max_memory and not reboot:
gandi.echo('memory update must be done offline.')
if not click.confirm("reboot machine %s?" % resource):
return
result = gandi.iaas.update(resource, memory, cores, console, pwd,
background, max_memory)
if background:
gandi.pretty_echo(result)
return result |
def add_population(self,pop):
"""Adds population to PopulationSet
"""
if pop.model in self.modelnames:
raise ValueError('%s model already in PopulationSet.' % pop.model)
self.modelnames.append(pop.model)
self.shortmodelnames.append(pop.modelshort)
self.poplist.append(pop) | Adds population to PopulationSet | Below is the the instruction that describes the task:
### Input:
Adds population to PopulationSet
### Response:
def add_population(self,pop):
"""Adds population to PopulationSet
"""
if pop.model in self.modelnames:
raise ValueError('%s model already in PopulationSet.' % pop.model)
self.modelnames.append(pop.model)
self.shortmodelnames.append(pop.modelshort)
self.poplist.append(pop) |
def option_attrname(self, opt, optdict=None):
"""get the config attribute corresponding to opt"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get("dest", opt.replace("-", "_")) | get the config attribute corresponding to opt | Below is the the instruction that describes the task:
### Input:
get the config attribute corresponding to opt
### Response:
def option_attrname(self, opt, optdict=None):
"""get the config attribute corresponding to opt"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get("dest", opt.replace("-", "_")) |
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks) | wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space. | Below is the the instruction that describes the task:
### Input:
wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
### Response:
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks) |
def serve(context, config, host, port, debug, livereload):
"""Start the web server."""
pymongo_config = dict(
MONGO_HOST=context.obj['host'],
MONGO_PORT=context.obj['port'],
MONGO_DBNAME=context.obj['mongodb'],
MONGO_USERNAME=context.obj['username'],
MONGO_PASSWORD=context.obj['password'],
)
valid_connection = check_connection(
host=pymongo_config['MONGO_HOST'],
port=pymongo_config['MONGO_PORT'],
username=pymongo_config['MONGO_USERNAME'],
password=pymongo_config['MONGO_PASSWORD'],
authdb=context.obj['authdb'],
)
log.info("Test if mongod is running")
if not valid_connection:
log.warning("Connection could not be established")
log.info("Is mongod running?")
context.abort()
config = os.path.abspath(config) if config else None
app = create_app(config=pymongo_config, config_file=config)
if livereload:
server = Server(app.wsgi_app)
server.serve(host=host, port=port, debug=debug)
else:
app.run(host=host, port=port, debug=debug) | Start the web server. | Below is the the instruction that describes the task:
### Input:
Start the web server.
### Response:
def serve(context, config, host, port, debug, livereload):
"""Start the web server."""
pymongo_config = dict(
MONGO_HOST=context.obj['host'],
MONGO_PORT=context.obj['port'],
MONGO_DBNAME=context.obj['mongodb'],
MONGO_USERNAME=context.obj['username'],
MONGO_PASSWORD=context.obj['password'],
)
valid_connection = check_connection(
host=pymongo_config['MONGO_HOST'],
port=pymongo_config['MONGO_PORT'],
username=pymongo_config['MONGO_USERNAME'],
password=pymongo_config['MONGO_PASSWORD'],
authdb=context.obj['authdb'],
)
log.info("Test if mongod is running")
if not valid_connection:
log.warning("Connection could not be established")
log.info("Is mongod running?")
context.abort()
config = os.path.abspath(config) if config else None
app = create_app(config=pymongo_config, config_file=config)
if livereload:
server = Server(app.wsgi_app)
server.serve(host=host, port=port, debug=debug)
else:
app.run(host=host, port=port, debug=debug) |
def is_deterministic(self):
"""Tests whether machine is deterministic."""
# naive quadratic algorithm
patterns = [t.lhs for t in self.transitions] + list(self.accept_configs)
for i, t1 in enumerate(patterns):
for t2 in patterns[:i]:
match = True
for in1, in2 in zip(t1, t2):
i = max(-in1.position, -in2.position)
while i+in1.position < len(in1) and i+in2.position < len(in2):
x1 = in1.values[i+in1.position]
x2 = in2.values[i+in2.position]
if x1 != x2:
match = False
i += 1
if match:
return False
return True | Tests whether machine is deterministic. | Below is the the instruction that describes the task:
### Input:
Tests whether machine is deterministic.
### Response:
def is_deterministic(self):
"""Tests whether machine is deterministic."""
# naive quadratic algorithm
patterns = [t.lhs for t in self.transitions] + list(self.accept_configs)
for i, t1 in enumerate(patterns):
for t2 in patterns[:i]:
match = True
for in1, in2 in zip(t1, t2):
i = max(-in1.position, -in2.position)
while i+in1.position < len(in1) and i+in2.position < len(in2):
x1 = in1.values[i+in1.position]
x2 = in2.values[i+in2.position]
if x1 != x2:
match = False
i += 1
if match:
return False
return True |
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode:
"""
Returns a new `SpotifyArtistNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node.
"""
if external_id is None:
graph: SpotifyArtistGraph = self._graph
items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name)
for item in items:
if item.name == name:
external_id = item.external_id
break
return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id) | Returns a new `SpotifyArtistNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node. | Below is the the instruction that describes the task:
### Input:
Returns a new `SpotifyArtistNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node.
### Response:
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode:
"""
Returns a new `SpotifyArtistNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node.
"""
if external_id is None:
graph: SpotifyArtistGraph = self._graph
items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name)
for item in items:
if item.name == name:
external_id = item.external_id
break
return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id) |
def punchcard(self, branch='master', limit=None, days=None, by=None, normalize=None, ignore_globs=None,
include_globs=None):
"""
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
ch = self.commit_history(
branch=branch,
limit=limit,
days=days,
ignore_globs=ignore_globs,
include_globs=include_globs
)
# add in the date fields
ch['day_of_week'] = ch.index.map(lambda x: x.weekday())
ch['hour_of_day'] = ch.index.map(lambda x: x.hour)
aggs = ['hour_of_day', 'day_of_week']
if by is not None:
aggs.append(by)
punch_card = ch.groupby(aggs).agg({
'lines': np.sum,
'insertions': np.sum,
'deletions': np.sum,
'net': np.sum
})
punch_card.reset_index(inplace=True)
# normalize all cols
if normalize is not None:
for col in ['lines', 'insertions', 'deletions', 'net']:
punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize
return punch_card | Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame | Below is the the instruction that describes the task:
### Input:
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
### Response:
def punchcard(self, branch='master', limit=None, days=None, by=None, normalize=None, ignore_globs=None,
include_globs=None):
"""
Returns a pandas DataFrame containing all of the data for a punchcard.
* day_of_week
* hour_of_day
* author / committer
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param days: (optional, default=None) number of days to return, if limit is None
:param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author'
:param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
ch = self.commit_history(
branch=branch,
limit=limit,
days=days,
ignore_globs=ignore_globs,
include_globs=include_globs
)
# add in the date fields
ch['day_of_week'] = ch.index.map(lambda x: x.weekday())
ch['hour_of_day'] = ch.index.map(lambda x: x.hour)
aggs = ['hour_of_day', 'day_of_week']
if by is not None:
aggs.append(by)
punch_card = ch.groupby(aggs).agg({
'lines': np.sum,
'insertions': np.sum,
'deletions': np.sum,
'net': np.sum
})
punch_card.reset_index(inplace=True)
# normalize all cols
if normalize is not None:
for col in ['lines', 'insertions', 'deletions', 'net']:
punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize
return punch_card |
def tabbed_parsing_token_generator(data_dir, tmp_dir, train, prefix,
source_vocab_size, target_vocab_size):
"""Generate source and target data from a single file."""
filename = "parsing_{0}.pairs".format("train" if train else "dev")
source_vocab = generator_utils.get_or_generate_tabbed_vocab(
data_dir, tmp_dir, filename, 0,
prefix + "_source.tokens.vocab.%d" % source_vocab_size, source_vocab_size)
target_vocab = generator_utils.get_or_generate_tabbed_vocab(
data_dir, tmp_dir, filename, 1,
prefix + "_target.tokens.vocab.%d" % target_vocab_size, target_vocab_size)
pair_filepath = os.path.join(tmp_dir, filename)
return text_problems.text2text_generate_encoded(
text_problems.text2text_txt_tab_iterator(pair_filepath), source_vocab,
target_vocab) | Generate source and target data from a single file. | Below is the the instruction that describes the task:
### Input:
Generate source and target data from a single file.
### Response:
def tabbed_parsing_token_generator(data_dir, tmp_dir, train, prefix,
source_vocab_size, target_vocab_size):
"""Generate source and target data from a single file."""
filename = "parsing_{0}.pairs".format("train" if train else "dev")
source_vocab = generator_utils.get_or_generate_tabbed_vocab(
data_dir, tmp_dir, filename, 0,
prefix + "_source.tokens.vocab.%d" % source_vocab_size, source_vocab_size)
target_vocab = generator_utils.get_or_generate_tabbed_vocab(
data_dir, tmp_dir, filename, 1,
prefix + "_target.tokens.vocab.%d" % target_vocab_size, target_vocab_size)
pair_filepath = os.path.join(tmp_dir, filename)
return text_problems.text2text_generate_encoded(
text_problems.text2text_txt_tab_iterator(pair_filepath), source_vocab,
target_vocab) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.