code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string):
'''Returns a tuple of (target, cmdline,) for the response
Raises IndexError if a user can't be looked up from all_slack_users
Returns (False, False) if the user doesn't have permission
These are returned together because the commandline and the targeting
interact with the group config (specifically aliases and targeting configuration)
so taking care of them together works out.
The cmdline that is returned is the actual list that should be
processed by salt, and not the alias.
'''
# Trim the trigger string from the front
# cmdline = _text[1:].split(' ', 1)
cmdline = self.commandline_to_list(text, trigger_string)
permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups)
log.debug('slack_user_name is %s and the permitted group is %s', slack_user_name, permitted_group)
if not permitted_group:
return (False, None, cmdline[0])
if not slack_user_name:
return (False, None, cmdline[0])
# maybe there are aliases, so check on that
if cmdline[0] in permitted_group[1].get('aliases', {}).keys():
use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]].get('cmd', ''), '')
# Include any additional elements from cmdline
use_cmdline.extend(cmdline[1:])
else:
use_cmdline = cmdline
target = self.get_target(permitted_group, cmdline, use_cmdline)
# Remove target and tgt_type from commandline
# that is sent along to Salt
use_cmdline = [item for item
in use_cmdline
if all(not item.startswith(x) for x in ('target', 'tgt_type'))]
return (True, target, use_cmdline) | Returns a tuple of (target, cmdline,) for the response
Raises IndexError if a user can't be looked up from all_slack_users
Returns (False, False) if the user doesn't have permission
These are returned together because the commandline and the targeting
interact with the group config (specifically aliases and targeting configuration)
so taking care of them together works out.
The cmdline that is returned is the actual list that should be
processed by salt, and not the alias. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of (target, cmdline,) for the response
Raises IndexError if a user can't be looked up from all_slack_users
Returns (False, False) if the user doesn't have permission
These are returned together because the commandline and the targeting
interact with the group config (specifically aliases and targeting configuration)
so taking care of them together works out.
The cmdline that is returned is the actual list that should be
processed by salt, and not the alias.
### Response:
def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string):
'''Returns a tuple of (target, cmdline,) for the response
Raises IndexError if a user can't be looked up from all_slack_users
Returns (False, False) if the user doesn't have permission
These are returned together because the commandline and the targeting
interact with the group config (specifically aliases and targeting configuration)
so taking care of them together works out.
The cmdline that is returned is the actual list that should be
processed by salt, and not the alias.
'''
# Trim the trigger string from the front
# cmdline = _text[1:].split(' ', 1)
cmdline = self.commandline_to_list(text, trigger_string)
permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups)
log.debug('slack_user_name is %s and the permitted group is %s', slack_user_name, permitted_group)
if not permitted_group:
return (False, None, cmdline[0])
if not slack_user_name:
return (False, None, cmdline[0])
# maybe there are aliases, so check on that
if cmdline[0] in permitted_group[1].get('aliases', {}).keys():
use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]].get('cmd', ''), '')
# Include any additional elements from cmdline
use_cmdline.extend(cmdline[1:])
else:
use_cmdline = cmdline
target = self.get_target(permitted_group, cmdline, use_cmdline)
# Remove target and tgt_type from commandline
# that is sent along to Salt
use_cmdline = [item for item
in use_cmdline
if all(not item.startswith(x) for x in ('target', 'tgt_type'))]
return (True, target, use_cmdline) |
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df | r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query. | Below is the the instruction that describes the task:
### Input:
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
### Response:
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df |
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5) | Generate a UUID from the SHA-1 hash of a namespace UUID and a name. | Below is the the instruction that describes the task:
### Input:
Generate a UUID from the SHA-1 hash of a namespace UUID and a name.
### Response:
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5) |
def set_dotted(self, key, value):
"""
qs.set_dotted('foo.bar', 'baz')
is equivelent to:
qs.foo.bar = baz
"""
parts = key.split('.')
cobj = self
if len(parts) > 1:
key = parts.pop()
for name in parts:
cobj = getattr(cobj, name)
setattr(cobj, key, value) | qs.set_dotted('foo.bar', 'baz')
is equivelent to:
qs.foo.bar = baz | Below is the the instruction that describes the task:
### Input:
qs.set_dotted('foo.bar', 'baz')
is equivelent to:
qs.foo.bar = baz
### Response:
def set_dotted(self, key, value):
"""
qs.set_dotted('foo.bar', 'baz')
is equivelent to:
qs.foo.bar = baz
"""
parts = key.split('.')
cobj = self
if len(parts) > 1:
key = parts.pop()
for name in parts:
cobj = getattr(cobj, name)
setattr(cobj, key, value) |
def create_app():
"""create_app."""
app = Flask(__name__)
app.logger.disabled = True
for h in app.logger.handlers[:]:
app.logger.removeHandler(h)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
def dated_url_for(endpoint, **values):
"""dated_url_for."""
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path, endpoint, filename)
values['_'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@app.context_processor
def override_url_for():
"""override_url_for."""
return dict(url_for=dated_url_for)
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
@app.route('/')
@app.route('/projects/<int:project_id>')
@app.route('/projects/<int:project_id>/results/<int:result_id>')
@app.route('/projects/<int:project_id>/results/<int:result_id>/assets')
def index(**kwargs):
"""render react app."""
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static', 'dist'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
# error handling
@app.errorhandler(OperationalError)
def handle_invalid_usage(error):
"""handle errors caused by db query."""
logger.error('caught exception from db: %s' % error.args)
response = jsonify({
'error': {
'type': 'DBOperationalError',
'message': 'Failed to send request to the database.'
}
})
response.status_code = 400 # Bad Request
return response
@app.before_request
def add_timestamp():
request._comming_at = datetime.datetime.now()
@app.after_request
def output_log(response):
now = datetime.datetime.now()
log_msg = '%s - - [%s] "%s %s %s" %d' % (
request.remote_addr, now.replace(microsecond=0),
request.method, request.full_path,
request.environ.get('SERVER_PROTOCOL'), response.status_code)
if response.content_length is not None:
log_msg += ' %d' % response.content_length
if request._comming_at is not None:
delta = (now - request._comming_at).total_seconds()
log_msg += ' %.6f' % delta
logger.info(log_msg)
return response
from chainerui.views.argument import ArgumentAPI
from chainerui.views.log import LogAPI
from chainerui.views.project import ProjectAPI
from chainerui.views.result import ResultAPI
from chainerui.views.result_asset import ResultAssetAPI
from chainerui.views.result_command import ResultCommandAPI
project_resource = ProjectAPI.as_view('project_resource')
result_resource = ResultAPI.as_view('result_resource')
log_resource = LogAPI.as_view('log_resource')
arg_resource = ArgumentAPI.as_view('arg_resource')
result_command_resource = ResultCommandAPI.as_view(
'result_command_resource')
result_assets_resource = ResultAssetAPI.as_view('result_assets_resource')
# project API
app.add_url_rule(
'/api/v1/projects',
defaults={'id': None}, view_func=project_resource, methods=['GET'])
app.add_url_rule(
'/api/v1/projects', view_func=project_resource, methods=['POST'])
app.add_url_rule(
'/api/v1/projects/<int:id>',
view_func=project_resource, methods=['GET', 'PUT', 'DELETE'])
# result API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results',
defaults={'id': None}, view_func=result_resource, methods=['GET'])
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results',
view_func=result_resource, methods=['POST'])
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:id>',
view_func=result_resource, methods=['GET', 'PUT', 'DELETE'])
# result log API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/logs',
view_func=log_resource, methods=['POST'])
# result argument API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/args',
view_func=arg_resource, methods=['POST'])
# result command API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/commands',
view_func=result_command_resource, methods=['POST'])
# result image API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/assets',
view_func=result_assets_resource, methods=['GET'])
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/assets/<int:content_id>', # NOQA
view_func=result_assets_resource, methods=['GET'])
return app | create_app. | Below is the the instruction that describes the task:
### Input:
create_app.
### Response:
def create_app():
"""create_app."""
app = Flask(__name__)
app.logger.disabled = True
for h in app.logger.handlers[:]:
app.logger.removeHandler(h)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
def dated_url_for(endpoint, **values):
"""dated_url_for."""
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path, endpoint, filename)
values['_'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@app.context_processor
def override_url_for():
"""override_url_for."""
return dict(url_for=dated_url_for)
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
@app.route('/')
@app.route('/projects/<int:project_id>')
@app.route('/projects/<int:project_id>/results/<int:result_id>')
@app.route('/projects/<int:project_id>/results/<int:result_id>/assets')
def index(**kwargs):
"""render react app."""
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static', 'dist'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
# error handling
@app.errorhandler(OperationalError)
def handle_invalid_usage(error):
"""handle errors caused by db query."""
logger.error('caught exception from db: %s' % error.args)
response = jsonify({
'error': {
'type': 'DBOperationalError',
'message': 'Failed to send request to the database.'
}
})
response.status_code = 400 # Bad Request
return response
@app.before_request
def add_timestamp():
request._comming_at = datetime.datetime.now()
@app.after_request
def output_log(response):
now = datetime.datetime.now()
log_msg = '%s - - [%s] "%s %s %s" %d' % (
request.remote_addr, now.replace(microsecond=0),
request.method, request.full_path,
request.environ.get('SERVER_PROTOCOL'), response.status_code)
if response.content_length is not None:
log_msg += ' %d' % response.content_length
if request._comming_at is not None:
delta = (now - request._comming_at).total_seconds()
log_msg += ' %.6f' % delta
logger.info(log_msg)
return response
from chainerui.views.argument import ArgumentAPI
from chainerui.views.log import LogAPI
from chainerui.views.project import ProjectAPI
from chainerui.views.result import ResultAPI
from chainerui.views.result_asset import ResultAssetAPI
from chainerui.views.result_command import ResultCommandAPI
project_resource = ProjectAPI.as_view('project_resource')
result_resource = ResultAPI.as_view('result_resource')
log_resource = LogAPI.as_view('log_resource')
arg_resource = ArgumentAPI.as_view('arg_resource')
result_command_resource = ResultCommandAPI.as_view(
'result_command_resource')
result_assets_resource = ResultAssetAPI.as_view('result_assets_resource')
# project API
app.add_url_rule(
'/api/v1/projects',
defaults={'id': None}, view_func=project_resource, methods=['GET'])
app.add_url_rule(
'/api/v1/projects', view_func=project_resource, methods=['POST'])
app.add_url_rule(
'/api/v1/projects/<int:id>',
view_func=project_resource, methods=['GET', 'PUT', 'DELETE'])
# result API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results',
defaults={'id': None}, view_func=result_resource, methods=['GET'])
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results',
view_func=result_resource, methods=['POST'])
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:id>',
view_func=result_resource, methods=['GET', 'PUT', 'DELETE'])
# result log API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/logs',
view_func=log_resource, methods=['POST'])
# result argument API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/args',
view_func=arg_resource, methods=['POST'])
# result command API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/commands',
view_func=result_command_resource, methods=['POST'])
# result image API
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/assets',
view_func=result_assets_resource, methods=['GET'])
app.add_url_rule(
'/api/v1/projects/<int:project_id>/results/<int:result_id>/assets/<int:content_id>', # NOQA
view_func=result_assets_resource, methods=['GET'])
return app |
def rerecord(ctx, rest):
"""Rerecord tests."""
run('tox -e py27 -- --cassette-mode all --record --credentials {0} -s'
.format(rest), pty=True)
run('tox -e py27 -- --resave --scrub --credentials test_credentials {0} -s'
.format(rest), pty=True) | Rerecord tests. | Below is the the instruction that describes the task:
### Input:
Rerecord tests.
### Response:
def rerecord(ctx, rest):
"""Rerecord tests."""
run('tox -e py27 -- --cassette-mode all --record --credentials {0} -s'
.format(rest), pty=True)
run('tox -e py27 -- --resave --scrub --credentials test_credentials {0} -s'
.format(rest), pty=True) |
def CALLDATALOAD(self, offset):
"""Get input data of current environment"""
if issymbolic(offset):
if solver.can_be_true(self._constraints, offset == self._used_calldata_size):
self.constraints.add(offset == self._used_calldata_size)
raise ConcretizeArgument(1, policy='SAMPLED')
self._use_calldata(offset, 32)
data_length = len(self.data)
bytes = []
for i in range(32):
try:
c = Operators.ITEBV(8, offset + i < data_length, self.data[offset + i], 0)
except IndexError:
# offset + i is concrete and outside data
c = 0
bytes.append(c)
return Operators.CONCAT(256, *bytes) | Get input data of current environment | Below is the the instruction that describes the task:
### Input:
Get input data of current environment
### Response:
def CALLDATALOAD(self, offset):
"""Get input data of current environment"""
if issymbolic(offset):
if solver.can_be_true(self._constraints, offset == self._used_calldata_size):
self.constraints.add(offset == self._used_calldata_size)
raise ConcretizeArgument(1, policy='SAMPLED')
self._use_calldata(offset, 32)
data_length = len(self.data)
bytes = []
for i in range(32):
try:
c = Operators.ITEBV(8, offset + i < data_length, self.data[offset + i], 0)
except IndexError:
# offset + i is concrete and outside data
c = 0
bytes.append(c)
return Operators.CONCAT(256, *bytes) |
def GET_save_timegrid(self) -> None:
"""Save the current simulation period."""
state.timegrids[self._id] = copy.deepcopy(hydpy.pub.timegrids.sim) | Save the current simulation period. | Below is the the instruction that describes the task:
### Input:
Save the current simulation period.
### Response:
def GET_save_timegrid(self) -> None:
"""Save the current simulation period."""
state.timegrids[self._id] = copy.deepcopy(hydpy.pub.timegrids.sim) |
def busy(self):
"""Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool
"""
if self.handle.isexecuting():
return True
elif self.used_by is None:
return False
return not self.used_by() is None | Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool
### Response:
def busy(self):
"""Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool
"""
if self.handle.isexecuting():
return True
elif self.used_by is None:
return False
return not self.used_by() is None |
def _create_safe_task(self, coroutine):
""" Calls self._loop.create_task with a safe (== with logged exception) coroutine. When run() ends, these tasks
are automatically cancelled"""
task = self._loop.create_task(coroutine)
self.__asyncio_tasks_running.add(task)
task.add_done_callback(self.__remove_safe_task) | Calls self._loop.create_task with a safe (== with logged exception) coroutine. When run() ends, these tasks
are automatically cancelled | Below is the the instruction that describes the task:
### Input:
Calls self._loop.create_task with a safe (== with logged exception) coroutine. When run() ends, these tasks
are automatically cancelled
### Response:
def _create_safe_task(self, coroutine):
""" Calls self._loop.create_task with a safe (== with logged exception) coroutine. When run() ends, these tasks
are automatically cancelled"""
task = self._loop.create_task(coroutine)
self.__asyncio_tasks_running.add(task)
task.add_done_callback(self.__remove_safe_task) |
def dedent(content):
"""
Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
"""
content = force_text(content)
whitespace_counts = [len(line) - len(line.lstrip(' '))
for line in content.splitlines()[1:] if line.lstrip()]
# unindent the content if needed
if whitespace_counts:
whitespace_pattern = '^' + (' ' * min(whitespace_counts))
content = re.sub(re.compile(whitespace_pattern, re.MULTILINE), '', content)
return content.strip() | Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line. | Below is the the instruction that describes the task:
### Input:
Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
### Response:
def dedent(content):
"""
Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
"""
content = force_text(content)
whitespace_counts = [len(line) - len(line.lstrip(' '))
for line in content.splitlines()[1:] if line.lstrip()]
# unindent the content if needed
if whitespace_counts:
whitespace_pattern = '^' + (' ' * min(whitespace_counts))
content = re.sub(re.compile(whitespace_pattern, re.MULTILINE), '', content)
return content.strip() |
def get_collection_by_id(cls, collection_id, **kwargs):
"""Find Collection
Return single instance of Collection by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_collection_by_id(collection_id, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to return (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_collection_by_id_with_http_info(collection_id, **kwargs)
else:
(data) = cls._get_collection_by_id_with_http_info(collection_id, **kwargs)
return data | Find Collection
Return single instance of Collection by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_collection_by_id(collection_id, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to return (required)
:return: Collection
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Find Collection
Return single instance of Collection by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_collection_by_id(collection_id, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to return (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
### Response:
def get_collection_by_id(cls, collection_id, **kwargs):
"""Find Collection
Return single instance of Collection by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_collection_by_id(collection_id, async=True)
>>> result = thread.get()
:param async bool
:param str collection_id: ID of collection to return (required)
:return: Collection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_collection_by_id_with_http_info(collection_id, **kwargs)
else:
(data) = cls._get_collection_by_id_with_http_info(collection_id, **kwargs)
return data |
def split_pow_tgh(self, text):
"""Split a power/toughness string on the correct slash.
Correctly accounts for curly braces to denote fractions.
E.g., '2/2' --> ['2', '2']
'3{1/2}/3{1/2}' --> ['3{1/2}', '3{1/2}']
"""
return [n for n in re.split(r"/(?=([^{}]*{[^{}]*})*[^{}]*$)", text)
if n is not None][:2] | Split a power/toughness string on the correct slash.
Correctly accounts for curly braces to denote fractions.
E.g., '2/2' --> ['2', '2']
'3{1/2}/3{1/2}' --> ['3{1/2}', '3{1/2}'] | Below is the the instruction that describes the task:
### Input:
Split a power/toughness string on the correct slash.
Correctly accounts for curly braces to denote fractions.
E.g., '2/2' --> ['2', '2']
'3{1/2}/3{1/2}' --> ['3{1/2}', '3{1/2}']
### Response:
def split_pow_tgh(self, text):
"""Split a power/toughness string on the correct slash.
Correctly accounts for curly braces to denote fractions.
E.g., '2/2' --> ['2', '2']
'3{1/2}/3{1/2}' --> ['3{1/2}', '3{1/2}']
"""
return [n for n in re.split(r"/(?=([^{}]*{[^{}]*})*[^{}]*$)", text)
if n is not None][:2] |
def make_input_from_factored_string(sentence_id: SentenceId,
factored_string: str,
translator: 'Translator',
delimiter: str = C.DEFAULT_FACTOR_DELIMITER) -> TranslatorInput:
"""
Returns a TranslatorInput object from a string with factor annotations on a token level, separated by delimiter.
If translator does not require any source factors, the string is parsed as a plain token string.
:param sentence_id: Sentence id.
:param factored_string: An input string with additional factors per token, separated by delimiter.
:param translator: A translator object.
:param delimiter: A factor delimiter. Default: '|'.
:return: A TranslatorInput.
"""
utils.check_condition(bool(delimiter) and not delimiter.isspace(),
"Factor delimiter can not be whitespace or empty.")
model_num_source_factors = translator.num_source_factors
if model_num_source_factors == 1:
return make_input_from_plain_string(sentence_id=sentence_id, string=factored_string)
tokens = [] # type: Tokens
factors = [[] for _ in range(model_num_source_factors - 1)] # type: List[Tokens]
for token_id, token in enumerate(data_io.get_tokens(factored_string)):
pieces = token.split(delimiter)
if not all(pieces) or len(pieces) != model_num_source_factors:
logger.error("Failed to parse %d factors at position %d ('%s') in '%s'" % (model_num_source_factors,
token_id, token,
factored_string.strip()))
return _bad_input(sentence_id, reason=factored_string)
tokens.append(pieces[0])
for i, factor in enumerate(factors):
factors[i].append(pieces[i + 1])
return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors) | Returns a TranslatorInput object from a string with factor annotations on a token level, separated by delimiter.
If translator does not require any source factors, the string is parsed as a plain token string.
:param sentence_id: Sentence id.
:param factored_string: An input string with additional factors per token, separated by delimiter.
:param translator: A translator object.
:param delimiter: A factor delimiter. Default: '|'.
:return: A TranslatorInput. | Below is the the instruction that describes the task:
### Input:
Returns a TranslatorInput object from a string with factor annotations on a token level, separated by delimiter.
If translator does not require any source factors, the string is parsed as a plain token string.
:param sentence_id: Sentence id.
:param factored_string: An input string with additional factors per token, separated by delimiter.
:param translator: A translator object.
:param delimiter: A factor delimiter. Default: '|'.
:return: A TranslatorInput.
### Response:
def make_input_from_factored_string(sentence_id: SentenceId,
factored_string: str,
translator: 'Translator',
delimiter: str = C.DEFAULT_FACTOR_DELIMITER) -> TranslatorInput:
"""
Returns a TranslatorInput object from a string with factor annotations on a token level, separated by delimiter.
If translator does not require any source factors, the string is parsed as a plain token string.
:param sentence_id: Sentence id.
:param factored_string: An input string with additional factors per token, separated by delimiter.
:param translator: A translator object.
:param delimiter: A factor delimiter. Default: '|'.
:return: A TranslatorInput.
"""
utils.check_condition(bool(delimiter) and not delimiter.isspace(),
"Factor delimiter can not be whitespace or empty.")
model_num_source_factors = translator.num_source_factors
if model_num_source_factors == 1:
return make_input_from_plain_string(sentence_id=sentence_id, string=factored_string)
tokens = [] # type: Tokens
factors = [[] for _ in range(model_num_source_factors - 1)] # type: List[Tokens]
for token_id, token in enumerate(data_io.get_tokens(factored_string)):
pieces = token.split(delimiter)
if not all(pieces) or len(pieces) != model_num_source_factors:
logger.error("Failed to parse %d factors at position %d ('%s') in '%s'" % (model_num_source_factors,
token_id, token,
factored_string.strip()))
return _bad_input(sentence_id, reason=factored_string)
tokens.append(pieces[0])
for i, factor in enumerate(factors):
factors[i].append(pieces[i + 1])
return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors) |
def get_image_set(self):
"""
Obtain existing ImageSet if `pk` is specified, otherwise
create a new ImageSet for the user.
"""
image_set_pk = self.kwargs.get("pk", None)
if image_set_pk is None:
return self.request.user.image_sets.create()
return get_object_or_404(self.get_queryset(), pk=image_set_pk) | Obtain existing ImageSet if `pk` is specified, otherwise
create a new ImageSet for the user. | Below is the the instruction that describes the task:
### Input:
Obtain existing ImageSet if `pk` is specified, otherwise
create a new ImageSet for the user.
### Response:
def get_image_set(self):
"""
Obtain existing ImageSet if `pk` is specified, otherwise
create a new ImageSet for the user.
"""
image_set_pk = self.kwargs.get("pk", None)
if image_set_pk is None:
return self.request.user.image_sets.create()
return get_object_or_404(self.get_queryset(), pk=image_set_pk) |
def set_feedback_from_tpl(tpl_name, parameters, problem_id=None, append=False):
""" Parse a template, using the given parameters, and set it as the feedback message.
tpl_name must indicate a file. Given that XX_XX is the lang code of the current user ('en_US' or 'fr_FR', for example),
this function will search template file in different locations, in the following order:
- [current_dir]/tpl_name.XX_XX.tpl
- [task_dir]/lang/XX_XX/tpl_name.tpl (this is the preferred way, as it contributes to store all translations in the same folder)
- [current_dir]/tpl_name.tpl
Note that you can indicate "../otherdir/mytpl" to force the function to search in the "../otherdir" directory. Simply omit the final ".tpl".
If no file is found or a parsing exception occured, an error is displayed as feedback message, and False is returned.
If everything went well, True is returned.
The parsing uses Jinja2.
Parameters is a dictionnary that will be given to the Jinja template.
"""
inginious.lang.init()
lang = get_lang()
tpl_location = None
possible_locations = [".".join([tpl_name, lang, "tpl"]),
os.path.join(inginious.lang.get_lang_dir_path(), lang, tpl_name) + ".tpl",
".".join([tpl_name, "tpl"])]
for path in possible_locations:
if os.path.exists(path):
tpl_location = path
break
if tpl_location is None:
output = """
.. error::
Unable to find template named %s. Please contact your administrator.
""" % tpl_name
if problem_id is None:
set_global_feedback(output, append)
else:
set_problem_feedback(output, problem_id, append)
return False
try:
template = Template(open(tpl_location, 'r').read())
parameters.update({"_": _})
output = template.render(parameters)
valid = True
except Exception:
output = """
.. error::
An error occured while parsing the feedback template. Here is the full error:
::
"""
output += "\n".join(["\t\t"+line for line in traceback.format_exc().split("\n")])
output += "\n\tPlease contact your administrator.\n"
valid = False
if problem_id is None:
set_global_feedback(output, append)
else:
set_problem_feedback(output, problem_id, append)
return valid | Parse a template, using the given parameters, and set it as the feedback message.
tpl_name must indicate a file. Given that XX_XX is the lang code of the current user ('en_US' or 'fr_FR', for example),
this function will search template file in different locations, in the following order:
- [current_dir]/tpl_name.XX_XX.tpl
- [task_dir]/lang/XX_XX/tpl_name.tpl (this is the preferred way, as it contributes to store all translations in the same folder)
- [current_dir]/tpl_name.tpl
Note that you can indicate "../otherdir/mytpl" to force the function to search in the "../otherdir" directory. Simply omit the final ".tpl".
If no file is found or a parsing exception occured, an error is displayed as feedback message, and False is returned.
If everything went well, True is returned.
The parsing uses Jinja2.
Parameters is a dictionnary that will be given to the Jinja template. | Below is the the instruction that describes the task:
### Input:
Parse a template, using the given parameters, and set it as the feedback message.
tpl_name must indicate a file. Given that XX_XX is the lang code of the current user ('en_US' or 'fr_FR', for example),
this function will search template file in different locations, in the following order:
- [current_dir]/tpl_name.XX_XX.tpl
- [task_dir]/lang/XX_XX/tpl_name.tpl (this is the preferred way, as it contributes to store all translations in the same folder)
- [current_dir]/tpl_name.tpl
Note that you can indicate "../otherdir/mytpl" to force the function to search in the "../otherdir" directory. Simply omit the final ".tpl".
If no file is found or a parsing exception occured, an error is displayed as feedback message, and False is returned.
If everything went well, True is returned.
The parsing uses Jinja2.
Parameters is a dictionnary that will be given to the Jinja template.
### Response:
def set_feedback_from_tpl(tpl_name, parameters, problem_id=None, append=False):
""" Parse a template, using the given parameters, and set it as the feedback message.
tpl_name must indicate a file. Given that XX_XX is the lang code of the current user ('en_US' or 'fr_FR', for example),
this function will search template file in different locations, in the following order:
- [current_dir]/tpl_name.XX_XX.tpl
- [task_dir]/lang/XX_XX/tpl_name.tpl (this is the preferred way, as it contributes to store all translations in the same folder)
- [current_dir]/tpl_name.tpl
Note that you can indicate "../otherdir/mytpl" to force the function to search in the "../otherdir" directory. Simply omit the final ".tpl".
If no file is found or a parsing exception occured, an error is displayed as feedback message, and False is returned.
If everything went well, True is returned.
The parsing uses Jinja2.
Parameters is a dictionnary that will be given to the Jinja template.
"""
inginious.lang.init()
lang = get_lang()
tpl_location = None
possible_locations = [".".join([tpl_name, lang, "tpl"]),
os.path.join(inginious.lang.get_lang_dir_path(), lang, tpl_name) + ".tpl",
".".join([tpl_name, "tpl"])]
for path in possible_locations:
if os.path.exists(path):
tpl_location = path
break
if tpl_location is None:
output = """
.. error::
Unable to find template named %s. Please contact your administrator.
""" % tpl_name
if problem_id is None:
set_global_feedback(output, append)
else:
set_problem_feedback(output, problem_id, append)
return False
try:
template = Template(open(tpl_location, 'r').read())
parameters.update({"_": _})
output = template.render(parameters)
valid = True
except Exception:
output = """
.. error::
An error occured while parsing the feedback template. Here is the full error:
::
"""
output += "\n".join(["\t\t"+line for line in traceback.format_exc().split("\n")])
output += "\n\tPlease contact your administrator.\n"
valid = False
if problem_id is None:
set_global_feedback(output, append)
else:
set_problem_feedback(output, problem_id, append)
return valid |
def run_create_sm(self, tenant_id, fw_dict, is_fw_virt):
"""Runs the create State Machine.
Goes through every state function until the end or when one state
returns failure.
"""
ret = True
serv_obj = self.get_service_obj(tenant_id)
state = serv_obj.get_state()
# Preserve the ordering of the next lines till while
new_state = serv_obj.fixup_state(fw_const.FW_CR_OP, state)
serv_obj.store_local_final_result(fw_const.RESULT_FW_CREATE_INIT)
if state != new_state:
state = new_state
serv_obj.store_state(state)
while ret:
try:
ret = self.fabric_fsm[state][0](tenant_id, fw_dict,
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Exception %(exc)s for state %(state)s",
{'exc': str(exc), 'state':
fw_const.fw_state_fn_dict.get(state)})
ret = False
if ret:
LOG.info("State %s return successfully",
fw_const.fw_state_fn_dict.get(state))
state = self.get_next_state(state, ret, fw_const.FW_CR_OP)
serv_obj.store_state(state)
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
break
if ret:
serv_obj.store_local_final_result(fw_const.RESULT_FW_CREATE_DONE)
return ret | Runs the create State Machine.
Goes through every state function until the end or when one state
returns failure. | Below is the the instruction that describes the task:
### Input:
Runs the create State Machine.
Goes through every state function until the end or when one state
returns failure.
### Response:
def run_create_sm(self, tenant_id, fw_dict, is_fw_virt):
"""Runs the create State Machine.
Goes through every state function until the end or when one state
returns failure.
"""
ret = True
serv_obj = self.get_service_obj(tenant_id)
state = serv_obj.get_state()
# Preserve the ordering of the next lines till while
new_state = serv_obj.fixup_state(fw_const.FW_CR_OP, state)
serv_obj.store_local_final_result(fw_const.RESULT_FW_CREATE_INIT)
if state != new_state:
state = new_state
serv_obj.store_state(state)
while ret:
try:
ret = self.fabric_fsm[state][0](tenant_id, fw_dict,
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Exception %(exc)s for state %(state)s",
{'exc': str(exc), 'state':
fw_const.fw_state_fn_dict.get(state)})
ret = False
if ret:
LOG.info("State %s return successfully",
fw_const.fw_state_fn_dict.get(state))
state = self.get_next_state(state, ret, fw_const.FW_CR_OP)
serv_obj.store_state(state)
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
break
if ret:
serv_obj.store_local_final_result(fw_const.RESULT_FW_CREATE_DONE)
return ret |
def pin_to_ipfs(
manifest: Manifest, *, backend: BaseIPFSBackend, prettify: Optional[bool] = False
) -> List[Dict[str, str]]:
"""
Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.
`pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin
data and not the manifest.
"""
contents = format_manifest(manifest, prettify=prettify)
with tempfile.NamedTemporaryFile() as temp:
temp.write(to_bytes(text=contents))
temp.seek(0)
return backend.pin_assets(Path(temp.name)) | Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.
`pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin
data and not the manifest. | Below is the the instruction that describes the task:
### Input:
Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.
`pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin
data and not the manifest.
### Response:
def pin_to_ipfs(
manifest: Manifest, *, backend: BaseIPFSBackend, prettify: Optional[bool] = False
) -> List[Dict[str, str]]:
"""
Returns the IPFS pin data after pinning the manifest to the provided IPFS Backend.
`pin_to_ipfs()` Should *always* be the last argument in a builder, as it will return the pin
data and not the manifest.
"""
contents = format_manifest(manifest, prettify=prettify)
with tempfile.NamedTemporaryFile() as temp:
temp.write(to_bytes(text=contents))
temp.seek(0)
return backend.pin_assets(Path(temp.name)) |
def connect_delete_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
"""connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
### Response:
def connect_delete_namespaced_service_proxy(self, name, namespace, **kwargs): # noqa: E501
"""connect_delete_namespaced_service_proxy # noqa: E501
connect DELETE requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_delete_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def recurse(desc, pre='pre_recursion', post=None, python_path=None):
"""
Depth first recursion through a dictionary containing type constructors
The arguments pre, post and children are independently either:
* None, which means to do nothing
* a string, which means to use the static class method of that name on the
class being constructed, or
* a callable, to be called at each recursion
Arguments:
dictionary -- a project dictionary or one of its subdictionaries
pre -- called before children are visited node in the recursion
post -- called after children are visited in the recursion
python_path -- relative path to start resolving typenames
"""
def call(f, desc):
if isinstance(f, str):
# f is the name of a static class method on the datatype.
f = getattr(datatype, f, None)
return f and f(desc)
# Automatically load strings that look like JSON or Yaml filenames.
desc = load.load_if_filename(desc) or desc
desc = construct.to_type_constructor(desc, python_path)
datatype = desc.get('datatype')
desc = call(pre, desc) or desc
for child_name in getattr(datatype, 'CHILDREN', []):
child = desc.get(child_name)
if child:
is_plural = child_name.endswith('s')
remove_s = is_plural and child_name != 'drivers'
# This is because it's the "drivers" directory, whereas
# the others are animation, control, layout, project
# without the s. TODO: rename drivers/ to driver/ in v4
cname = child_name[:-1] if remove_s else child_name
new_path = python_path or ('bibliopixel.' + cname)
if is_plural:
if isinstance(child, (dict, str)):
child = [child]
for i, c in enumerate(child):
child[i] = recurse(c, pre, post, new_path)
desc[child_name] = child
else:
desc[child_name] = recurse(child, pre, post, new_path)
d = call(post, desc)
return desc if d is None else d | Depth first recursion through a dictionary containing type constructors
The arguments pre, post and children are independently either:
* None, which means to do nothing
* a string, which means to use the static class method of that name on the
class being constructed, or
* a callable, to be called at each recursion
Arguments:
dictionary -- a project dictionary or one of its subdictionaries
pre -- called before children are visited node in the recursion
post -- called after children are visited in the recursion
python_path -- relative path to start resolving typenames | Below is the the instruction that describes the task:
### Input:
Depth first recursion through a dictionary containing type constructors
The arguments pre, post and children are independently either:
* None, which means to do nothing
* a string, which means to use the static class method of that name on the
class being constructed, or
* a callable, to be called at each recursion
Arguments:
dictionary -- a project dictionary or one of its subdictionaries
pre -- called before children are visited node in the recursion
post -- called after children are visited in the recursion
python_path -- relative path to start resolving typenames
### Response:
def recurse(desc, pre='pre_recursion', post=None, python_path=None):
"""
Depth first recursion through a dictionary containing type constructors
The arguments pre, post and children are independently either:
* None, which means to do nothing
* a string, which means to use the static class method of that name on the
class being constructed, or
* a callable, to be called at each recursion
Arguments:
dictionary -- a project dictionary or one of its subdictionaries
pre -- called before children are visited node in the recursion
post -- called after children are visited in the recursion
python_path -- relative path to start resolving typenames
"""
def call(f, desc):
if isinstance(f, str):
# f is the name of a static class method on the datatype.
f = getattr(datatype, f, None)
return f and f(desc)
# Automatically load strings that look like JSON or Yaml filenames.
desc = load.load_if_filename(desc) or desc
desc = construct.to_type_constructor(desc, python_path)
datatype = desc.get('datatype')
desc = call(pre, desc) or desc
for child_name in getattr(datatype, 'CHILDREN', []):
child = desc.get(child_name)
if child:
is_plural = child_name.endswith('s')
remove_s = is_plural and child_name != 'drivers'
# This is because it's the "drivers" directory, whereas
# the others are animation, control, layout, project
# without the s. TODO: rename drivers/ to driver/ in v4
cname = child_name[:-1] if remove_s else child_name
new_path = python_path or ('bibliopixel.' + cname)
if is_plural:
if isinstance(child, (dict, str)):
child = [child]
for i, c in enumerate(child):
child[i] = recurse(c, pre, post, new_path)
desc[child_name] = child
else:
desc[child_name] = recurse(child, pre, post, new_path)
d = call(post, desc)
return desc if d is None else d |
def expose_event(self, widget, event):
"""When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area.
"""
x, y, width, height = event.area
self.logger.debug("surface is %s" % self.surface)
if self.surface is not None:
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
# Paint from off-screen surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False | When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area. | Below is the the instruction that describes the task:
### Input:
When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area.
### Response:
def expose_event(self, widget, event):
"""When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area.
"""
x, y, width, height = event.area
self.logger.debug("surface is %s" % self.surface)
if self.surface is not None:
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
# Paint from off-screen surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False |
async def create_revocation_state(blob_storage_reader_handle: int,
rev_reg_def_json: str,
rev_reg_delta_json: str,
timestamp: int,
cred_rev_id: str) -> str:
"""
Create revocation state for a credential in the particular time moment.
:param blob_storage_reader_handle: configuration of blob storage reader handle that will allow to read revocation tails
:param rev_reg_def_json: revocation registry definition json
:param rev_reg_delta_json: revocation registry definition delta json
:param timestamp: time represented as a total number of seconds from Unix Epoch
:param cred_rev_id: user credential revocation id in revocation registry
:return: revocation state json {
"rev_reg": <revocation registry>,
"witness": <witness>,
"timestamp" : integer
}
"""
logger = logging.getLogger(__name__)
logger.debug("create_revocation_info: >>> blob_storage_reader_handle: %r, rev_reg_def_json: %r,"
" rev_reg_delta_json: %r, timestamp: %r, cred_rev_id: %r",
blob_storage_reader_handle,
rev_reg_def_json,
rev_reg_delta_json,
timestamp,
cred_rev_id)
if not hasattr(create_revocation_state, "cb"):
logger.debug("create_revocation_state: Creating callback")
create_revocation_state.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_reg_def_json = c_char_p(rev_reg_def_json.encode('utf-8'))
c_rev_reg_delta_json = c_char_p(rev_reg_delta_json.encode('utf-8'))
c_timestamp = c_uint64(timestamp)
c_cred_rev_id = c_char_p(cred_rev_id.encode('utf-8'))
rev_state_json = await do_call('indy_create_revocation_state',
c_blob_storage_reader_handle,
c_rev_reg_def_json,
c_rev_reg_delta_json,
c_timestamp,
c_cred_rev_id,
create_revocation_state.cb)
res = rev_state_json.decode()
logger.debug("create_revocation_state: <<< res: %r", res)
return res | Create revocation state for a credential in the particular time moment.
:param blob_storage_reader_handle: configuration of blob storage reader handle that will allow to read revocation tails
:param rev_reg_def_json: revocation registry definition json
:param rev_reg_delta_json: revocation registry definition delta json
:param timestamp: time represented as a total number of seconds from Unix Epoch
:param cred_rev_id: user credential revocation id in revocation registry
:return: revocation state json {
"rev_reg": <revocation registry>,
"witness": <witness>,
"timestamp" : integer
} | Below is the the instruction that describes the task:
### Input:
Create revocation state for a credential in the particular time moment.
:param blob_storage_reader_handle: configuration of blob storage reader handle that will allow to read revocation tails
:param rev_reg_def_json: revocation registry definition json
:param rev_reg_delta_json: revocation registry definition delta json
:param timestamp: time represented as a total number of seconds from Unix Epoch
:param cred_rev_id: user credential revocation id in revocation registry
:return: revocation state json {
"rev_reg": <revocation registry>,
"witness": <witness>,
"timestamp" : integer
}
### Response:
async def create_revocation_state(blob_storage_reader_handle: int,
rev_reg_def_json: str,
rev_reg_delta_json: str,
timestamp: int,
cred_rev_id: str) -> str:
"""
Create revocation state for a credential in the particular time moment.
:param blob_storage_reader_handle: configuration of blob storage reader handle that will allow to read revocation tails
:param rev_reg_def_json: revocation registry definition json
:param rev_reg_delta_json: revocation registry definition delta json
:param timestamp: time represented as a total number of seconds from Unix Epoch
:param cred_rev_id: user credential revocation id in revocation registry
:return: revocation state json {
"rev_reg": <revocation registry>,
"witness": <witness>,
"timestamp" : integer
}
"""
logger = logging.getLogger(__name__)
logger.debug("create_revocation_info: >>> blob_storage_reader_handle: %r, rev_reg_def_json: %r,"
" rev_reg_delta_json: %r, timestamp: %r, cred_rev_id: %r",
blob_storage_reader_handle,
rev_reg_def_json,
rev_reg_delta_json,
timestamp,
cred_rev_id)
if not hasattr(create_revocation_state, "cb"):
logger.debug("create_revocation_state: Creating callback")
create_revocation_state.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_reg_def_json = c_char_p(rev_reg_def_json.encode('utf-8'))
c_rev_reg_delta_json = c_char_p(rev_reg_delta_json.encode('utf-8'))
c_timestamp = c_uint64(timestamp)
c_cred_rev_id = c_char_p(cred_rev_id.encode('utf-8'))
rev_state_json = await do_call('indy_create_revocation_state',
c_blob_storage_reader_handle,
c_rev_reg_def_json,
c_rev_reg_delta_json,
c_timestamp,
c_cred_rev_id,
create_revocation_state.cb)
res = rev_state_json.decode()
logger.debug("create_revocation_state: <<< res: %r", res)
return res |
def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result | Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray | Below is the the instruction that describes the task:
### Input:
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
### Response:
def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result |
def random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:
"""Get a random graph by keeping a certain percentage of original edges.
:param graph: A BEL graph
:param percentage: What percentage of eges to take
"""
percentage = percentage or 0.9
assert 0 < percentage <= 1
edges = graph.edges(keys=True)
n = int(graph.number_of_edges() * percentage)
subedges = random.sample(edges, n)
rv = graph.fresh_copy()
for u, v, k in subedges:
safe_add_edge(rv, u, v, k, graph[u][v][k])
update_node_helper(graph, rv)
return rv | Get a random graph by keeping a certain percentage of original edges.
:param graph: A BEL graph
:param percentage: What percentage of eges to take | Below is the the instruction that describes the task:
### Input:
Get a random graph by keeping a certain percentage of original edges.
:param graph: A BEL graph
:param percentage: What percentage of eges to take
### Response:
def random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:
"""Get a random graph by keeping a certain percentage of original edges.
:param graph: A BEL graph
:param percentage: What percentage of eges to take
"""
percentage = percentage or 0.9
assert 0 < percentage <= 1
edges = graph.edges(keys=True)
n = int(graph.number_of_edges() * percentage)
subedges = random.sample(edges, n)
rv = graph.fresh_copy()
for u, v, k in subedges:
safe_add_edge(rv, u, v, k, graph[u][v][k])
update_node_helper(graph, rv)
return rv |
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
atts = (
(PROV['type'], NIDM_ERROR_MODEL),
(NIDM_HAS_ERROR_DISTRIBUTION, self.error_distribution),
(NIDM_ERROR_VARIANCE_HOMOGENEOUS, self.variance_homo),
(NIDM_VARIANCE_SPATIAL_MODEL, self.variance_spatial),
(NIDM_HAS_ERROR_DEPENDENCE, self.dependance))
# If the error covariance is independent then there is no associated
# spatial model
if self.dependance_spatial is not None:
atts = atts + (
((NIDM_DEPENDENCE_SPATIAL_MODEL, self.dependance_spatial),))
# Create "Error Model" entity
self.add_attributes(atts) | Create prov entities and activities. | Below is the the instruction that describes the task:
### Input:
Create prov entities and activities.
### Response:
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
atts = (
(PROV['type'], NIDM_ERROR_MODEL),
(NIDM_HAS_ERROR_DISTRIBUTION, self.error_distribution),
(NIDM_ERROR_VARIANCE_HOMOGENEOUS, self.variance_homo),
(NIDM_VARIANCE_SPATIAL_MODEL, self.variance_spatial),
(NIDM_HAS_ERROR_DEPENDENCE, self.dependance))
# If the error covariance is independent then there is no associated
# spatial model
if self.dependance_spatial is not None:
atts = atts + (
((NIDM_DEPENDENCE_SPATIAL_MODEL, self.dependance_spatial),))
# Create "Error Model" entity
self.add_attributes(atts) |
def does_match_definition(given, main, secondary):
"""implementation details"""
assert isinstance(secondary, tuple)
assert len(secondary) == 2 # general solution could be provided
types = decompose_type(given)
if isinstance(types[0], main):
return True
if len(types) >= 2:
cond1 = isinstance(types[0], main)
cond2 = isinstance(types[1], secondary)
cond3 = isinstance(types[1], main)
cond4 = isinstance(types[0], secondary)
if (cond1 and cond2) or (cond3 and cond4):
return True
if len(types) >= 3:
classes = set([tp.__class__ for tp in types[:3]])
desired = set([main] + list(secondary))
diff = classes.symmetric_difference(desired)
if not diff:
return True
if len(diff) == 2:
items = list(diff)
return (
issubclass(
items[0], items[1]) or issubclass(items[1], items[0]))
return False
else:
return False | implementation details | Below is the the instruction that describes the task:
### Input:
implementation details
### Response:
def does_match_definition(given, main, secondary):
"""implementation details"""
assert isinstance(secondary, tuple)
assert len(secondary) == 2 # general solution could be provided
types = decompose_type(given)
if isinstance(types[0], main):
return True
if len(types) >= 2:
cond1 = isinstance(types[0], main)
cond2 = isinstance(types[1], secondary)
cond3 = isinstance(types[1], main)
cond4 = isinstance(types[0], secondary)
if (cond1 and cond2) or (cond3 and cond4):
return True
if len(types) >= 3:
classes = set([tp.__class__ for tp in types[:3]])
desired = set([main] + list(secondary))
diff = classes.symmetric_difference(desired)
if not diff:
return True
if len(diff) == 2:
items = list(diff)
return (
issubclass(
items[0], items[1]) or issubclass(items[1], items[0]))
return False
else:
return False |
def get_media_list_by_selector(
self, media_selector, media_attribute="src"
):
"""Return a list of media."""
page_url = urlparse.urlparse(self.uri)
return [
mediafile.get_instance(
urlparse.urljoin(
"%s://%s" % (
page_url.scheme,
page_url.netloc
),
urlparse.urlparse(
media.attrib[media_attribute],
scheme="http"
).geturl()
)
)
for media in self.parsedpage.get_nodes_by_selector(media_selector)
] | Return a list of media. | Below is the the instruction that describes the task:
### Input:
Return a list of media.
### Response:
def get_media_list_by_selector(
self, media_selector, media_attribute="src"
):
"""Return a list of media."""
page_url = urlparse.urlparse(self.uri)
return [
mediafile.get_instance(
urlparse.urljoin(
"%s://%s" % (
page_url.scheme,
page_url.netloc
),
urlparse.urlparse(
media.attrib[media_attribute],
scheme="http"
).geturl()
)
)
for media in self.parsedpage.get_nodes_by_selector(media_selector)
] |
def next_power_of_2(x):
"""Finds the next power of 2 value
Args:
x: Input value
Returns:
power_of_2: Next power of 2 value
"""
power_of_2 = 1 if x == 0 else 2 ** np.ceil(np.log2(x))
return power_of_2 | Finds the next power of 2 value
Args:
x: Input value
Returns:
power_of_2: Next power of 2 value | Below is the the instruction that describes the task:
### Input:
Finds the next power of 2 value
Args:
x: Input value
Returns:
power_of_2: Next power of 2 value
### Response:
def next_power_of_2(x):
"""Finds the next power of 2 value
Args:
x: Input value
Returns:
power_of_2: Next power of 2 value
"""
power_of_2 = 1 if x == 0 else 2 ** np.ceil(np.log2(x))
return power_of_2 |
def _check_errors(self):
"""Call this function after parsing the args to see if there are any
errors in the way things are input. Specifically for glotk-sweep, make
sure that all of the parameters for sweep have arguemnts if at least one
does."""
# use this to make sure that sweep, sstart, sstop, and sinterval
# all have values if one of them has values
# (P^Q^R^S) OR (-P and -Q and -R and -S)
# above is the minimal form for this logical statement in DNF
if not ((self._inn(self.args.sweep) and self._inn(self.args.sstart)
and self._inn(self.args.sstop) and self._inn(self.args.sinterval))
or
(not self._inn(self.args.sweep) and not self._inn(self.args.sstart) and not
self._inn(self.args.sstop) and not self._inn(self.args.sinterval))):
mutually_inclusive = ["sweep", "sstart", "sstop", "sinterval"]
print_str = ""
for each in mutually_inclusive:
print_str += " {0}: {1}\n".format(each, getattr(self.args, each))
raise AssertionError("""You specified one or more of the --sweep,
--sstart, --stop, or --sinterval arguments but did not specify all of
them. All of them are required when running the program in sweep mode.
Do not specify any of these arguments if not running the program in
sweep mode.\n{0}""".format(print_str)) | Call this function after parsing the args to see if there are any
errors in the way things are input. Specifically for glotk-sweep, make
sure that all of the parameters for sweep have arguemnts if at least one
does. | Below is the the instruction that describes the task:
### Input:
Call this function after parsing the args to see if there are any
errors in the way things are input. Specifically for glotk-sweep, make
sure that all of the parameters for sweep have arguemnts if at least one
does.
### Response:
def _check_errors(self):
"""Call this function after parsing the args to see if there are any
errors in the way things are input. Specifically for glotk-sweep, make
sure that all of the parameters for sweep have arguemnts if at least one
does."""
# use this to make sure that sweep, sstart, sstop, and sinterval
# all have values if one of them has values
# (P^Q^R^S) OR (-P and -Q and -R and -S)
# above is the minimal form for this logical statement in DNF
if not ((self._inn(self.args.sweep) and self._inn(self.args.sstart)
and self._inn(self.args.sstop) and self._inn(self.args.sinterval))
or
(not self._inn(self.args.sweep) and not self._inn(self.args.sstart) and not
self._inn(self.args.sstop) and not self._inn(self.args.sinterval))):
mutually_inclusive = ["sweep", "sstart", "sstop", "sinterval"]
print_str = ""
for each in mutually_inclusive:
print_str += " {0}: {1}\n".format(each, getattr(self.args, each))
raise AssertionError("""You specified one or more of the --sweep,
--sstart, --stop, or --sinterval arguments but did not specify all of
them. All of them are required when running the program in sweep mode.
Do not specify any of these arguments if not running the program in
sweep mode.\n{0}""".format(print_str)) |
def ISIs(self,time_dimension=0,units=None,min_t=None,max_t=None):
"""
returns the Inter Spike Intervals
`time_dimension`: which dimension contains the spike times (by default the first)
`units`,`min_t`,`max_t`: define the units of the output and the range of spikes that should be considered
"""
units = self._default_units(units)
converted_dimension,st = self.spike_times.get_converted(time_dimension,units)
if min_t is None:
min_t = converted_dimension.min
if max_t is None:
max_t = converted_dimension.max
return np.diff(sorted(st[(st>min_t) * (st <max_t)])) | returns the Inter Spike Intervals
`time_dimension`: which dimension contains the spike times (by default the first)
`units`,`min_t`,`max_t`: define the units of the output and the range of spikes that should be considered | Below is the the instruction that describes the task:
### Input:
returns the Inter Spike Intervals
`time_dimension`: which dimension contains the spike times (by default the first)
`units`,`min_t`,`max_t`: define the units of the output and the range of spikes that should be considered
### Response:
def ISIs(self,time_dimension=0,units=None,min_t=None,max_t=None):
"""
returns the Inter Spike Intervals
`time_dimension`: which dimension contains the spike times (by default the first)
`units`,`min_t`,`max_t`: define the units of the output and the range of spikes that should be considered
"""
units = self._default_units(units)
converted_dimension,st = self.spike_times.get_converted(time_dimension,units)
if min_t is None:
min_t = converted_dimension.min
if max_t is None:
max_t = converted_dimension.max
return np.diff(sorted(st[(st>min_t) * (st <max_t)])) |
def course(self, dept, course_number):
"""Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120')
"""
response = self._request(path.join(ENDPOINTS['CATALOG'], dept, course_number))
return response['result_data'][0] | Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120') | Below is the the instruction that describes the task:
### Input:
Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120')
### Response:
def course(self, dept, course_number):
"""Return an object of semester-independent course info. All arguments
should be strings.
>>> cis120 = r.course('cis', '120')
"""
response = self._request(path.join(ENDPOINTS['CATALOG'], dept, course_number))
return response['result_data'][0] |
def validate(self, value, param_name, exc=None, logger=None):
"""
:param value: value to validate
:param param_name: name of the value (for logging purpose)
:param exc: exception to raise (default is "ValidatorError")
:param logger: logger to use (default will be "Validator.logger")
"""
if exc is not None:
self.exc = exc
if logger is not None:
self.logger = logger
if self.type is not None and not type(value) == self.type: # pylint: disable=unidiomatic-typecheck
self.error(
f'invalid type for parameter "{param_name}": {type(value)} (value: {value}) -- expected {self.type}'
)
if self.instance is not None and not isinstance(value, self.instance):
self.error(
f'invalid instance for parameter "{param_name}": {type(value)} (value: {value})'
f' -- expected {self.instance}'
)
if self.min is not None and value < self.min:
self.error(f'invalid value for parameter "{param_name}" (under minima of {self.min}): {value}')
if self.max is not None and value > self.max:
self.error(f'invalid value for parameter "{param_name}" (over maxima if {self.max}): {value}')
if self.regex is not None and not re_full_match(self.regex, value):
self.error(f'invalid value for parameter "{param_name}" (should match: "{self.regex}"): {value}')
if self.in_list is not None and value not in self.in_list:
self.error(f'invalid value for parameter "{param_name}"; "{value}" is not in list: {self.in_list}')
if self.path_exists and not exists(value):
self.error(f'"{param_name}" file does not exist: {value}')
return True | :param value: value to validate
:param param_name: name of the value (for logging purpose)
:param exc: exception to raise (default is "ValidatorError")
:param logger: logger to use (default will be "Validator.logger") | Below is the the instruction that describes the task:
### Input:
:param value: value to validate
:param param_name: name of the value (for logging purpose)
:param exc: exception to raise (default is "ValidatorError")
:param logger: logger to use (default will be "Validator.logger")
### Response:
def validate(self, value, param_name, exc=None, logger=None):
"""
:param value: value to validate
:param param_name: name of the value (for logging purpose)
:param exc: exception to raise (default is "ValidatorError")
:param logger: logger to use (default will be "Validator.logger")
"""
if exc is not None:
self.exc = exc
if logger is not None:
self.logger = logger
if self.type is not None and not type(value) == self.type: # pylint: disable=unidiomatic-typecheck
self.error(
f'invalid type for parameter "{param_name}": {type(value)} (value: {value}) -- expected {self.type}'
)
if self.instance is not None and not isinstance(value, self.instance):
self.error(
f'invalid instance for parameter "{param_name}": {type(value)} (value: {value})'
f' -- expected {self.instance}'
)
if self.min is not None and value < self.min:
self.error(f'invalid value for parameter "{param_name}" (under minima of {self.min}): {value}')
if self.max is not None and value > self.max:
self.error(f'invalid value for parameter "{param_name}" (over maxima if {self.max}): {value}')
if self.regex is not None and not re_full_match(self.regex, value):
self.error(f'invalid value for parameter "{param_name}" (should match: "{self.regex}"): {value}')
if self.in_list is not None and value not in self.in_list:
self.error(f'invalid value for parameter "{param_name}"; "{value}" is not in list: {self.in_list}')
if self.path_exists and not exists(value):
self.error(f'"{param_name}" file does not exist: {value}')
return True |
def p_gate_op_1(self, program):
"""
gate_op : CX id ',' id ';'
"""
program[0] = node.Cnot([program[2], program[4]])
self.verify_declared_bit(program[2])
self.verify_declared_bit(program[4])
self.verify_distinct([program[2], program[4]]) | gate_op : CX id ',' id ';' | Below is the the instruction that describes the task:
### Input:
gate_op : CX id ',' id ';'
### Response:
def p_gate_op_1(self, program):
"""
gate_op : CX id ',' id ';'
"""
program[0] = node.Cnot([program[2], program[4]])
self.verify_declared_bit(program[2])
self.verify_declared_bit(program[4])
self.verify_distinct([program[2], program[4]]) |
def query(self, string, repeat_n_times=None):
"""
This method performs the operations onto self.g
:param string: The list of operations to perform. The sequences of commands should be separated by a semicolon
An example might be
CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2),
{'tag': 'PLACE', 'text': 'London'}(v2)
MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b)
WHERE (= (get _a "text") "joseph")
RETURN _a,_b;
:param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of
the return list. If None then the value is set by the function
self.__determine_how_many_times_to_repeat_query(string)
:return: If the RETURN command is called with a list of variables names, a list of JSON with
the corresponding properties is returned. If the RETURN command is used alone, a list with the entire
graph is returned. Otherwise it returns an empty list
"""
if not repeat_n_times:
repeat_n_times = self.__determine_how_many_times_to_repeat_query(string)
lines = self.__get_command_lines(string)
return_list = []
for line in lines:
lst = self.__query_n_times(line, repeat_n_times)
if lst and lst[0]:
return_list = lst
return return_list | This method performs the operations onto self.g
:param string: The list of operations to perform. The sequences of commands should be separated by a semicolon
An example might be
CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2),
{'tag': 'PLACE', 'text': 'London'}(v2)
MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b)
WHERE (= (get _a "text") "joseph")
RETURN _a,_b;
:param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of
the return list. If None then the value is set by the function
self.__determine_how_many_times_to_repeat_query(string)
:return: If the RETURN command is called with a list of variables names, a list of JSON with
the corresponding properties is returned. If the RETURN command is used alone, a list with the entire
graph is returned. Otherwise it returns an empty list | Below is the the instruction that describes the task:
### Input:
This method performs the operations onto self.g
:param string: The list of operations to perform. The sequences of commands should be separated by a semicolon
An example might be
CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2),
{'tag': 'PLACE', 'text': 'London'}(v2)
MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b)
WHERE (= (get _a "text") "joseph")
RETURN _a,_b;
:param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of
the return list. If None then the value is set by the function
self.__determine_how_many_times_to_repeat_query(string)
:return: If the RETURN command is called with a list of variables names, a list of JSON with
the corresponding properties is returned. If the RETURN command is used alone, a list with the entire
graph is returned. Otherwise it returns an empty list
### Response:
def query(self, string, repeat_n_times=None):
"""
This method performs the operations onto self.g
:param string: The list of operations to perform. The sequences of commands should be separated by a semicolon
An example might be
CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2),
{'tag': 'PLACE', 'text': 'London'}(v2)
MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b)
WHERE (= (get _a "text") "joseph")
RETURN _a,_b;
:param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of
the return list. If None then the value is set by the function
self.__determine_how_many_times_to_repeat_query(string)
:return: If the RETURN command is called with a list of variables names, a list of JSON with
the corresponding properties is returned. If the RETURN command is used alone, a list with the entire
graph is returned. Otherwise it returns an empty list
"""
if not repeat_n_times:
repeat_n_times = self.__determine_how_many_times_to_repeat_query(string)
lines = self.__get_command_lines(string)
return_list = []
for line in lines:
lst = self.__query_n_times(line, repeat_n_times)
if lst and lst[0]:
return_list = lst
return return_list |
def _add_eutils_api_key(url):
"""Adds eutils api key to the query
:param url: eutils url with a query string
:return: url with api_key parameter set to the value of environment
variable 'NCBI_API_KEY' if available
"""
apikey = os.environ.get("NCBI_API_KEY")
if apikey:
url += "&api_key={apikey}".format(apikey=apikey)
return url | Adds eutils api key to the query
:param url: eutils url with a query string
:return: url with api_key parameter set to the value of environment
variable 'NCBI_API_KEY' if available | Below is the the instruction that describes the task:
### Input:
Adds eutils api key to the query
:param url: eutils url with a query string
:return: url with api_key parameter set to the value of environment
variable 'NCBI_API_KEY' if available
### Response:
def _add_eutils_api_key(url):
"""Adds eutils api key to the query
:param url: eutils url with a query string
:return: url with api_key parameter set to the value of environment
variable 'NCBI_API_KEY' if available
"""
apikey = os.environ.get("NCBI_API_KEY")
if apikey:
url += "&api_key={apikey}".format(apikey=apikey)
return url |
def focusOutEvent(self, event):
""" The default 'focusOutEvent' implementation.
"""
widget = self.widget
type(widget).focusOutEvent(widget, event)
self.declaration.focus_lost() | The default 'focusOutEvent' implementation. | Below is the the instruction that describes the task:
### Input:
The default 'focusOutEvent' implementation.
### Response:
def focusOutEvent(self, event):
""" The default 'focusOutEvent' implementation.
"""
widget = self.widget
type(widget).focusOutEvent(widget, event)
self.declaration.focus_lost() |
def get_rmsd(self, mol1, mol2):
"""
Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different
"""
label1, label2 = self._mapper.uniform_labels(mol1, mol2)
if label1 is None or label2 is None:
return float("Inf")
return self._calc_rms(mol1, mol2, label1, label2) | Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different | Below is the the instruction that describes the task:
### Input:
Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different
### Response:
def get_rmsd(self, mol1, mol2):
"""
Get RMSD between two molecule with arbitrary atom order.
Returns:
RMSD if topology of the two molecules are the same
Infinite if the topology is different
"""
label1, label2 = self._mapper.uniform_labels(mol1, mol2)
if label1 is None or label2 is None:
return float("Inf")
return self._calc_rms(mol1, mol2, label1, label2) |
def _get_gos_upper(self, ntpltgo1, max_upper, go2parentids):
"""Plot a GO DAG for the upper portion of a single Group of user GOs."""
# Get GO IDs which are in the hdrgo path
goids_possible = ntpltgo1.gosubdag.go2obj.keys()
# Get upper GO IDs which have the most descendants
return self._get_gosrcs_upper(goids_possible, max_upper, go2parentids) | Plot a GO DAG for the upper portion of a single Group of user GOs. | Below is the the instruction that describes the task:
### Input:
Plot a GO DAG for the upper portion of a single Group of user GOs.
### Response:
def _get_gos_upper(self, ntpltgo1, max_upper, go2parentids):
"""Plot a GO DAG for the upper portion of a single Group of user GOs."""
# Get GO IDs which are in the hdrgo path
goids_possible = ntpltgo1.gosubdag.go2obj.keys()
# Get upper GO IDs which have the most descendants
return self._get_gosrcs_upper(goids_possible, max_upper, go2parentids) |
def run_line_magic(self, magic_name, line):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
error(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
args.append(sys._getframe(stack_depth).f_locals)
with self.builtin_trap:
result = fn(*args)
return result | Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string. | Below is the the instruction that describes the task:
### Input:
Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
### Response:
def run_line_magic(self, magic_name, line):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
error(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
args.append(sys._getframe(stack_depth).f_locals)
with self.builtin_trap:
result = fn(*args)
return result |
def _discover_uri_type(uri):
"""Given a ``uri``, determine if it is internal or external."""
parsed_uri = urlparse(uri)
if not parsed_uri.netloc:
if parsed_uri.scheme == 'data':
type_ = INLINE_REFERENCE_TYPE
else:
type_ = INTERNAL_REFERENCE_TYPE
else:
type_ = EXTERNAL_REFERENCE_TYPE
return type_ | Given a ``uri``, determine if it is internal or external. | Below is the the instruction that describes the task:
### Input:
Given a ``uri``, determine if it is internal or external.
### Response:
def _discover_uri_type(uri):
"""Given a ``uri``, determine if it is internal or external."""
parsed_uri = urlparse(uri)
if not parsed_uri.netloc:
if parsed_uri.scheme == 'data':
type_ = INLINE_REFERENCE_TYPE
else:
type_ = INTERNAL_REFERENCE_TYPE
else:
type_ = EXTERNAL_REFERENCE_TYPE
return type_ |
def get_session_id(self):
"""
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
"""
max_session = '0'
try:
with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f:
for _ in f:
txt = f.readline()
if txt.strip('\n') != '':
max_session = txt
except Exception:
max_session = '1'
this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX
with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2:
f2.write(this_session + '\n')
return this_session | get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time. | Below is the the instruction that describes the task:
### Input:
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
### Response:
def get_session_id(self):
"""
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
"""
max_session = '0'
try:
with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f:
for _ in f:
txt = f.readline()
if txt.strip('\n') != '':
max_session = txt
except Exception:
max_session = '1'
this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX
with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2:
f2.write(this_session + '\n')
return this_session |
def lv_to_pypsa(network):
"""
Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit'
"""
generators = []
loads = []
branch_tees = []
lines = []
lv_stations = []
storages = []
for lv_grid in network.mv_grid.lv_grids:
generators.extend(lv_grid.graph.nodes_by_attribute('generator'))
loads.extend(lv_grid.graph.nodes_by_attribute('load'))
branch_tees.extend(lv_grid.graph.nodes_by_attribute('branch_tee'))
lines.extend(lv_grid.graph.lines())
lv_stations.extend(lv_grid.graph.nodes_by_attribute('lv_station'))
storages.extend(lv_grid.graph.nodes_by_attribute('storage'))
omega = 2 * pi * 50
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': []
}
bus = {'name': [], 'v_nom': [], 'x': [], 'y': []}
load = {'name': [], 'bus': []}
line = {'name': [],
'bus0': [],
'bus1': [],
'type': [],
'x': [],
'r': [],
's_nom': [],
's_nom_min': [],
's_max_pu': [],
's_nom_extendable': [],
'capital_cost': [],
'length': []}
storage = {
'name': [],
'bus': [],
'p_nom': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': [],
'max_hours': []}
# create dictionary representing generators and associated buses
for gen in generators:
bus_name = '_'.join(['Bus', repr(gen)])
generator['name'].append(repr(gen))
generator['bus'].append(bus_name)
generator['control'].append('PQ')
generator['p_nom'].append(gen.nominal_capacity / 1e3)
generator['type'].append('_'.join([gen.type, gen.subtype]))
generator['p_nom_extendable'].append(False)
generator['p_nom_min'].append(0) # 0.3
generator['p_nom_max'].append(0)
generator['capital_cost'].append(0)
bus['name'].append(bus_name)
bus['v_nom'].append(gen.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dictionary representing branch tees
for bt in branch_tees:
bus['name'].append('_'.join(['Bus', repr(bt)]))
bus['v_nom'].append(bt.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframes representing loads and associated buses
for lo in loads:
bus_name = '_'.join(['Bus', repr(lo)])
load['name'].append(repr(lo))
load['bus'].append(bus_name)
bus['name'].append(bus_name)
bus['v_nom'].append(lo.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframe for lines
for l in lines:
line['name'].append(repr(l['line']))
if l['adj_nodes'][0] in lv_stations:
line['bus0'].append(
'_'.join(['Bus', l['adj_nodes'][0].__repr__(side='lv')]))
else:
line['bus0'].append('_'.join(['Bus', repr(l['adj_nodes'][0])]))
if l['adj_nodes'][1] in lv_stations:
line['bus1'].append(
'_'.join(['Bus', l['adj_nodes'][1].__repr__(side='lv')]))
else:
line['bus1'].append('_'.join(['Bus', repr(l['adj_nodes'][1])]))
line['type'].append("")
line['x'].append(
l['line'].type['L'] * omega / 1e3 * l['line'].length)
line['r'].append(l['line'].type['R'] * l['line'].length)
s_nom = sqrt(3) * l['line'].type['I_max_th'] * \
l['line'].type['U_n'] / 1e3
line['s_nom'].append(s_nom)
line['s_nom_min'].append(s_nom)
line['s_max_pu'].append(0.6)
line['s_nom_extendable'].append(True)
line['capital_cost'].append(100)
line['length'].append(l['line'].length)
lv_components = {
'Generator': pd.DataFrame(generator).set_index('name'),
'Bus': pd.DataFrame(bus).set_index('name'),
'Load': pd.DataFrame(load).set_index('name'),
'Line': pd.DataFrame(line).set_index('name'),
'StorageUnit': pd.DataFrame(storage).set_index('name')}
return lv_components | Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit' | Below is the the instruction that describes the task:
### Input:
Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit'
### Response:
def lv_to_pypsa(network):
"""
Convert LV grid topology to PyPSA representation
Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids`
Parameters
----------
network : Network
eDisGo grid container
Returns
-------
dict of :pandas:`pandas.DataFrame<dataframe>`
A DataFrame for each type of PyPSA components constituting the grid
topology. Keys included
* 'Generator'
* 'Load'
* 'Line'
* 'BranchTee'
* 'StorageUnit'
"""
generators = []
loads = []
branch_tees = []
lines = []
lv_stations = []
storages = []
for lv_grid in network.mv_grid.lv_grids:
generators.extend(lv_grid.graph.nodes_by_attribute('generator'))
loads.extend(lv_grid.graph.nodes_by_attribute('load'))
branch_tees.extend(lv_grid.graph.nodes_by_attribute('branch_tee'))
lines.extend(lv_grid.graph.lines())
lv_stations.extend(lv_grid.graph.nodes_by_attribute('lv_station'))
storages.extend(lv_grid.graph.nodes_by_attribute('storage'))
omega = 2 * pi * 50
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': []
}
bus = {'name': [], 'v_nom': [], 'x': [], 'y': []}
load = {'name': [], 'bus': []}
line = {'name': [],
'bus0': [],
'bus1': [],
'type': [],
'x': [],
'r': [],
's_nom': [],
's_nom_min': [],
's_max_pu': [],
's_nom_extendable': [],
'capital_cost': [],
'length': []}
storage = {
'name': [],
'bus': [],
'p_nom': [],
'p_nom_extendable': [],
'p_nom_min': [],
'p_nom_max': [],
'capital_cost': [],
'max_hours': []}
# create dictionary representing generators and associated buses
for gen in generators:
bus_name = '_'.join(['Bus', repr(gen)])
generator['name'].append(repr(gen))
generator['bus'].append(bus_name)
generator['control'].append('PQ')
generator['p_nom'].append(gen.nominal_capacity / 1e3)
generator['type'].append('_'.join([gen.type, gen.subtype]))
generator['p_nom_extendable'].append(False)
generator['p_nom_min'].append(0) # 0.3
generator['p_nom_max'].append(0)
generator['capital_cost'].append(0)
bus['name'].append(bus_name)
bus['v_nom'].append(gen.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dictionary representing branch tees
for bt in branch_tees:
bus['name'].append('_'.join(['Bus', repr(bt)]))
bus['v_nom'].append(bt.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframes representing loads and associated buses
for lo in loads:
bus_name = '_'.join(['Bus', repr(lo)])
load['name'].append(repr(lo))
load['bus'].append(bus_name)
bus['name'].append(bus_name)
bus['v_nom'].append(lo.grid.voltage_nom)
bus['x'].append(None)
bus['y'].append(None)
# create dataframe for lines
for l in lines:
line['name'].append(repr(l['line']))
if l['adj_nodes'][0] in lv_stations:
line['bus0'].append(
'_'.join(['Bus', l['adj_nodes'][0].__repr__(side='lv')]))
else:
line['bus0'].append('_'.join(['Bus', repr(l['adj_nodes'][0])]))
if l['adj_nodes'][1] in lv_stations:
line['bus1'].append(
'_'.join(['Bus', l['adj_nodes'][1].__repr__(side='lv')]))
else:
line['bus1'].append('_'.join(['Bus', repr(l['adj_nodes'][1])]))
line['type'].append("")
line['x'].append(
l['line'].type['L'] * omega / 1e3 * l['line'].length)
line['r'].append(l['line'].type['R'] * l['line'].length)
s_nom = sqrt(3) * l['line'].type['I_max_th'] * \
l['line'].type['U_n'] / 1e3
line['s_nom'].append(s_nom)
line['s_nom_min'].append(s_nom)
line['s_max_pu'].append(0.6)
line['s_nom_extendable'].append(True)
line['capital_cost'].append(100)
line['length'].append(l['line'].length)
lv_components = {
'Generator': pd.DataFrame(generator).set_index('name'),
'Bus': pd.DataFrame(bus).set_index('name'),
'Load': pd.DataFrame(load).set_index('name'),
'Line': pd.DataFrame(line).set_index('name'),
'StorageUnit': pd.DataFrame(storage).set_index('name')}
return lv_components |
def get_column_header_for_number(self, column_var_names, header=False):
"""This function subtracts 1 from inputted column number to comply
with programmers counting (i.e. from 0, not from 1). For TSV data."""
if not header:
header = self.oldheader
for col in column_var_names:
value = getattr(self, col)
if not value or value is None:
continue
setattr(self, col, self.number_to_headerfield(value, header)) | This function subtracts 1 from inputted column number to comply
with programmers counting (i.e. from 0, not from 1). For TSV data. | Below is the the instruction that describes the task:
### Input:
This function subtracts 1 from inputted column number to comply
with programmers counting (i.e. from 0, not from 1). For TSV data.
### Response:
def get_column_header_for_number(self, column_var_names, header=False):
"""This function subtracts 1 from inputted column number to comply
with programmers counting (i.e. from 0, not from 1). For TSV data."""
if not header:
header = self.oldheader
for col in column_var_names:
value = getattr(self, col)
if not value or value is None:
continue
setattr(self, col, self.number_to_headerfield(value, header)) |
def run_mutation(config, filename, mutation_id):
"""
:type config: Config
:type filename: str
:type mutation_id: MutationID
:return: (computed or cached) status of the tested mutant
:rtype: str
"""
context = Context(
mutation_id=mutation_id,
filename=filename,
exclude=config.exclude_callback,
dict_synonyms=config.dict_synonyms,
config=config,
)
cached_status = cached_mutation_status(filename, mutation_id, config.hash_of_tests)
if cached_status == BAD_SURVIVED:
config.surviving_mutants += 1
elif cached_status == BAD_TIMEOUT:
config.surviving_mutants_timeout += 1
elif cached_status == OK_KILLED:
config.killed_mutants += 1
elif cached_status == OK_SUSPICIOUS:
config.suspicious_mutants += 1
else:
assert cached_status == UNTESTED, cached_status
config.print_progress()
if cached_status != UNTESTED:
return cached_status
if config.pre_mutation:
result = subprocess.check_output(config.pre_mutation, shell=True).decode().strip()
if result:
print(result)
try:
number_of_mutations_performed = mutate_file(
backup=True,
context=context
)
assert number_of_mutations_performed
start = time()
try:
survived = tests_pass(config)
except TimeoutError:
context.config.surviving_mutants_timeout += 1
return BAD_TIMEOUT
time_elapsed = time() - start
if time_elapsed > config.test_time_base + (config.baseline_time_elapsed * config.test_time_multipler):
config.suspicious_mutants += 1
return OK_SUSPICIOUS
if survived:
context.config.surviving_mutants += 1
return BAD_SURVIVED
else:
context.config.killed_mutants += 1
return OK_KILLED
finally:
move(filename + '.bak', filename)
if config.post_mutation:
result = subprocess.check_output(config.post_mutation, shell=True).decode().strip()
if result:
print(result) | :type config: Config
:type filename: str
:type mutation_id: MutationID
:return: (computed or cached) status of the tested mutant
:rtype: str | Below is the the instruction that describes the task:
### Input:
:type config: Config
:type filename: str
:type mutation_id: MutationID
:return: (computed or cached) status of the tested mutant
:rtype: str
### Response:
def run_mutation(config, filename, mutation_id):
"""
:type config: Config
:type filename: str
:type mutation_id: MutationID
:return: (computed or cached) status of the tested mutant
:rtype: str
"""
context = Context(
mutation_id=mutation_id,
filename=filename,
exclude=config.exclude_callback,
dict_synonyms=config.dict_synonyms,
config=config,
)
cached_status = cached_mutation_status(filename, mutation_id, config.hash_of_tests)
if cached_status == BAD_SURVIVED:
config.surviving_mutants += 1
elif cached_status == BAD_TIMEOUT:
config.surviving_mutants_timeout += 1
elif cached_status == OK_KILLED:
config.killed_mutants += 1
elif cached_status == OK_SUSPICIOUS:
config.suspicious_mutants += 1
else:
assert cached_status == UNTESTED, cached_status
config.print_progress()
if cached_status != UNTESTED:
return cached_status
if config.pre_mutation:
result = subprocess.check_output(config.pre_mutation, shell=True).decode().strip()
if result:
print(result)
try:
number_of_mutations_performed = mutate_file(
backup=True,
context=context
)
assert number_of_mutations_performed
start = time()
try:
survived = tests_pass(config)
except TimeoutError:
context.config.surviving_mutants_timeout += 1
return BAD_TIMEOUT
time_elapsed = time() - start
if time_elapsed > config.test_time_base + (config.baseline_time_elapsed * config.test_time_multipler):
config.suspicious_mutants += 1
return OK_SUSPICIOUS
if survived:
context.config.surviving_mutants += 1
return BAD_SURVIVED
else:
context.config.killed_mutants += 1
return OK_KILLED
finally:
move(filename + '.bak', filename)
if config.post_mutation:
result = subprocess.check_output(config.post_mutation, shell=True).decode().strip()
if result:
print(result) |
def _buildTraitCovar(self, trait_covar_type='freeform', rank=1, fixed_trait_covar=None, jitter=1e-4):
"""
Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix
"""
assert trait_covar_type in ['freeform', 'diag', 'lowrank', 'lowrank_id', 'lowrank_diag', 'block', 'block_id', 'block_diag', 'fixed'], 'VarianceDecomposition:: trait_covar_type not valid'
if trait_covar_type=='freeform':
cov = FreeFormCov(self.P, jitter=jitter)
elif trait_covar_type=='fixed':
assert fixed_trait_covar is not None, 'VarianceDecomposition:: set fixed_trait_covar'
assert fixed_trait_covar.shape[0]==self.P, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
assert fixed_trait_covar.shape[1]==self.P, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
cov = FixedCov(fixed_trait_covar)
elif trait_covar_type=='diag':
cov = DiagonalCov(self.P)
elif trait_covar_type=='lowrank':
cov = LowRankCov(self.P, rank=rank)
elif trait_covar_type=='lowrank_id':
cov = SumCov(LowRankCov(self.P, rank=rank), FixedCov(sp.eye(self.P)))
elif trait_covar_type=='lowrank_diag':
cov = SumCov(LowRankCov(self.P, rank=rank), DiagonalCov(self.P))
elif trait_covar_type=='block':
cov = FixedCov(sp.ones([self.P, self.P]))
elif trait_covar_type=='block_id':
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
elif trait_covar_type=='block_diag':
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
return cov | Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix | Below is the the instruction that describes the task:
### Input:
Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix
### Response:
def _buildTraitCovar(self, trait_covar_type='freeform', rank=1, fixed_trait_covar=None, jitter=1e-4):
"""
Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix
"""
assert trait_covar_type in ['freeform', 'diag', 'lowrank', 'lowrank_id', 'lowrank_diag', 'block', 'block_id', 'block_diag', 'fixed'], 'VarianceDecomposition:: trait_covar_type not valid'
if trait_covar_type=='freeform':
cov = FreeFormCov(self.P, jitter=jitter)
elif trait_covar_type=='fixed':
assert fixed_trait_covar is not None, 'VarianceDecomposition:: set fixed_trait_covar'
assert fixed_trait_covar.shape[0]==self.P, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
assert fixed_trait_covar.shape[1]==self.P, 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
cov = FixedCov(fixed_trait_covar)
elif trait_covar_type=='diag':
cov = DiagonalCov(self.P)
elif trait_covar_type=='lowrank':
cov = LowRankCov(self.P, rank=rank)
elif trait_covar_type=='lowrank_id':
cov = SumCov(LowRankCov(self.P, rank=rank), FixedCov(sp.eye(self.P)))
elif trait_covar_type=='lowrank_diag':
cov = SumCov(LowRankCov(self.P, rank=rank), DiagonalCov(self.P))
elif trait_covar_type=='block':
cov = FixedCov(sp.ones([self.P, self.P]))
elif trait_covar_type=='block_id':
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
elif trait_covar_type=='block_diag':
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
return cov |
def fix_sign_with_K(dataframe):
"""Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe
"""
# check for required columns
if 'k' not in dataframe or 'r' not in dataframe:
raise Exception('k and r columns required!')
indices_negative = (dataframe['k'] < 0) & (dataframe['r'] < 0)
if np.where(indices_negative)[0].size == 0:
# nothing to do here
return dataframe
dataframe.ix[indices_negative, ['k', 'r']] *= -1
# switch potential electrodes
indices_switched_ab = indices_negative & (dataframe['a'] > dataframe['b'])
indices_switched_mn = indices_negative & (dataframe['a'] < dataframe['b'])
dataframe.ix[indices_switched_ab, ['a', 'b']] = dataframe.ix[
indices_switched_ab, ['b', 'a']
].values
dataframe.ix[indices_switched_mn, ['m', 'n']] = dataframe.ix[
indices_switched_mn, ['n', 'm']
].values
# switch sign of voltages
if 'Vmn' in dataframe:
dataframe.ix[indices_negative, 'Vmn'] *= -1
if 'Zt' in dataframe:
dataframe.ix[indices_negative, 'Zt'] *= -1
if 'rho_a' in dataframe:
dataframe['rho_a'] = dataframe['r'] * dataframe['k']
if 'Mx' in dataframe:
# for now we have to loop here because we store numpy arrays within
# each cell
for index in np.where(indices_negative)[0]:
# import IPython
# IPython.embed()
# exit()
dataframe.at[index, 'Mx'] *= -1
# recompute phase values
if 'rpha' in dataframe:
if 'Zt' in dataframe:
# recompute
dataframe['rpha'] = np.arctan2(
dataframe['Zt'].imag, dataframe['Zt'].real
) * 1e3
else:
raise Exception(
'Recomputation of phase without Zt not implemented yet. ' +
'See source code for more information'
)
"""
when the complex number is located in the fourth sector instead of
the first, this corresponds to a phase shift by pi. For all values
where magnitude < 0 and phase < 3000 mrad reverse this shift by pi
by multiplying the complex number by -1:
new_value = - 1 * (Magnitude * exp(i phi))
Test this function by setting one measurement to
-85.02069 -183.25 in radic column 6 and 7, should get -58 mrad when
converted
"""
# Make sure a, b, m, n stay integers.
for col in ('a', 'b', 'm', 'n'):
dataframe[col] = dataframe[col].astype(int)
return dataframe | Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe | Below is the the instruction that describes the task:
### Input:
Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe
### Response:
def fix_sign_with_K(dataframe):
"""Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe
"""
# check for required columns
if 'k' not in dataframe or 'r' not in dataframe:
raise Exception('k and r columns required!')
indices_negative = (dataframe['k'] < 0) & (dataframe['r'] < 0)
if np.where(indices_negative)[0].size == 0:
# nothing to do here
return dataframe
dataframe.ix[indices_negative, ['k', 'r']] *= -1
# switch potential electrodes
indices_switched_ab = indices_negative & (dataframe['a'] > dataframe['b'])
indices_switched_mn = indices_negative & (dataframe['a'] < dataframe['b'])
dataframe.ix[indices_switched_ab, ['a', 'b']] = dataframe.ix[
indices_switched_ab, ['b', 'a']
].values
dataframe.ix[indices_switched_mn, ['m', 'n']] = dataframe.ix[
indices_switched_mn, ['n', 'm']
].values
# switch sign of voltages
if 'Vmn' in dataframe:
dataframe.ix[indices_negative, 'Vmn'] *= -1
if 'Zt' in dataframe:
dataframe.ix[indices_negative, 'Zt'] *= -1
if 'rho_a' in dataframe:
dataframe['rho_a'] = dataframe['r'] * dataframe['k']
if 'Mx' in dataframe:
# for now we have to loop here because we store numpy arrays within
# each cell
for index in np.where(indices_negative)[0]:
# import IPython
# IPython.embed()
# exit()
dataframe.at[index, 'Mx'] *= -1
# recompute phase values
if 'rpha' in dataframe:
if 'Zt' in dataframe:
# recompute
dataframe['rpha'] = np.arctan2(
dataframe['Zt'].imag, dataframe['Zt'].real
) * 1e3
else:
raise Exception(
'Recomputation of phase without Zt not implemented yet. ' +
'See source code for more information'
)
"""
when the complex number is located in the fourth sector instead of
the first, this corresponds to a phase shift by pi. For all values
where magnitude < 0 and phase < 3000 mrad reverse this shift by pi
by multiplying the complex number by -1:
new_value = - 1 * (Magnitude * exp(i phi))
Test this function by setting one measurement to
-85.02069 -183.25 in radic column 6 and 7, should get -58 mrad when
converted
"""
# Make sure a, b, m, n stay integers.
for col in ('a', 'b', 'm', 'n'):
dataframe[col] = dataframe[col].astype(int)
return dataframe |
async def claim_work(context):
"""Find and claim the next pending task in the queue, if any.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dict containing a list of the task definitions of the tasks claimed.
"""
log.debug("Calling claimWork...")
payload = {
'workerGroup': context.config['worker_group'],
'workerId': context.config['worker_id'],
# Hardcode one task at a time. Make this a pref if we allow for
# parallel tasks in multiple `work_dir`s.
'tasks': 1,
}
try:
return await context.queue.claimWork(
context.config['provisioner_id'],
context.config['worker_type'],
payload
)
except (taskcluster.exceptions.TaskclusterFailure, aiohttp.ClientError) as exc:
log.warning("{} {}".format(exc.__class__, exc)) | Find and claim the next pending task in the queue, if any.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dict containing a list of the task definitions of the tasks claimed. | Below is the the instruction that describes the task:
### Input:
Find and claim the next pending task in the queue, if any.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dict containing a list of the task definitions of the tasks claimed.
### Response:
async def claim_work(context):
"""Find and claim the next pending task in the queue, if any.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: a dict containing a list of the task definitions of the tasks claimed.
"""
log.debug("Calling claimWork...")
payload = {
'workerGroup': context.config['worker_group'],
'workerId': context.config['worker_id'],
# Hardcode one task at a time. Make this a pref if we allow for
# parallel tasks in multiple `work_dir`s.
'tasks': 1,
}
try:
return await context.queue.claimWork(
context.config['provisioner_id'],
context.config['worker_type'],
payload
)
except (taskcluster.exceptions.TaskclusterFailure, aiohttp.ClientError) as exc:
log.warning("{} {}".format(exc.__class__, exc)) |
def set_state(self, state):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"powered": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | :param state: a boolean of true (on) or false ('off')
:return: nothing | Below is the the instruction that describes the task:
### Input:
:param state: a boolean of true (on) or false ('off')
:return: nothing
### Response:
def set_state(self, state):
"""
:param state: a boolean of true (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"powered": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) |
def db_chainstate_get_block(cls, cur, block_height):
"""
Get the list of virtualchain transactions accepted at a given block.
Returns the list of rows, where each row is a dict.
"""
query = 'SELECT * FROM chainstate WHERE block_id = ? ORDER BY vtxindex;'
args = (block_height,)
rows = cls.db_query_execute(cur, query, args, verbose=False)
ret = []
for r in rows:
rowdata = {
'txid': str(r['txid']),
'block_id': r['block_id'],
'txindex': r['txindex'],
'vtxindex': r['vtxindex'],
'opcode': str(r['opcode']),
'data_hex': str(r['data_hex']),
'senders': simplejson.loads(r['senders']),
'tx_hex': str(r['tx_hex']),
'tx_merkle_path': str(r['tx_merkle_path']),
'fee': r['fee']
}
ret.append(rowdata)
return ret | Get the list of virtualchain transactions accepted at a given block.
Returns the list of rows, where each row is a dict. | Below is the the instruction that describes the task:
### Input:
Get the list of virtualchain transactions accepted at a given block.
Returns the list of rows, where each row is a dict.
### Response:
def db_chainstate_get_block(cls, cur, block_height):
"""
Get the list of virtualchain transactions accepted at a given block.
Returns the list of rows, where each row is a dict.
"""
query = 'SELECT * FROM chainstate WHERE block_id = ? ORDER BY vtxindex;'
args = (block_height,)
rows = cls.db_query_execute(cur, query, args, verbose=False)
ret = []
for r in rows:
rowdata = {
'txid': str(r['txid']),
'block_id': r['block_id'],
'txindex': r['txindex'],
'vtxindex': r['vtxindex'],
'opcode': str(r['opcode']),
'data_hex': str(r['data_hex']),
'senders': simplejson.loads(r['senders']),
'tx_hex': str(r['tx_hex']),
'tx_merkle_path': str(r['tx_merkle_path']),
'fee': r['fee']
}
ret.append(rowdata)
return ret |
def create(self, **kwargs):
"""
Creates entity instance and related Attr instances.
Note that while entity instances may filter schemata by fields, that
filtering does not take place here. Attribute of any schema will be saved
successfully as long as such schema exists.
Note that we cannot create attribute with no pre-defined schema because
we must know attribute type in order to properly put value into the DB.
"""
fields = self.model._meta.get_all_field_names()
schemata = dict((s.name, s) for s in self.model.get_schemata_for_model())
# check if all attributes are known
possible_names = set(fields) | set(schemata.keys())
wrong_names = set(kwargs.keys()) - possible_names
if wrong_names:
raise NameError('Cannot create %s: unknown attribute(s) "%s". '
'Available fields: (%s). Available schemata: (%s).'
% (self.model._meta.object_name, '", "'.join(wrong_names),
', '.join(fields), ', '.join(schemata)))
# init entity with fields
instance = self.model(**dict((k,v) for k,v in kwargs.items() if k in fields))
# set attributes; instance will check schemata on save
for name, value in kwargs.items():
setattr(instance, name, value)
# save instance and EAV attributes
instance.save(force_insert=True)
return instance | Creates entity instance and related Attr instances.
Note that while entity instances may filter schemata by fields, that
filtering does not take place here. Attribute of any schema will be saved
successfully as long as such schema exists.
Note that we cannot create attribute with no pre-defined schema because
we must know attribute type in order to properly put value into the DB. | Below is the the instruction that describes the task:
### Input:
Creates entity instance and related Attr instances.
Note that while entity instances may filter schemata by fields, that
filtering does not take place here. Attribute of any schema will be saved
successfully as long as such schema exists.
Note that we cannot create attribute with no pre-defined schema because
we must know attribute type in order to properly put value into the DB.
### Response:
def create(self, **kwargs):
"""
Creates entity instance and related Attr instances.
Note that while entity instances may filter schemata by fields, that
filtering does not take place here. Attribute of any schema will be saved
successfully as long as such schema exists.
Note that we cannot create attribute with no pre-defined schema because
we must know attribute type in order to properly put value into the DB.
"""
fields = self.model._meta.get_all_field_names()
schemata = dict((s.name, s) for s in self.model.get_schemata_for_model())
# check if all attributes are known
possible_names = set(fields) | set(schemata.keys())
wrong_names = set(kwargs.keys()) - possible_names
if wrong_names:
raise NameError('Cannot create %s: unknown attribute(s) "%s". '
'Available fields: (%s). Available schemata: (%s).'
% (self.model._meta.object_name, '", "'.join(wrong_names),
', '.join(fields), ', '.join(schemata)))
# init entity with fields
instance = self.model(**dict((k,v) for k,v in kwargs.items() if k in fields))
# set attributes; instance will check schemata on save
for name, value in kwargs.items():
setattr(instance, name, value)
# save instance and EAV attributes
instance.save(force_insert=True)
return instance |
def get_last_commit(git_path=None):
"""
Get the HEAD commit SHA1 of repository in current dir.
"""
if git_path is None: git_path = GIT_PATH
line = get_last_commit_line(git_path)
revision_id = line.split()[1]
return revision_id | Get the HEAD commit SHA1 of repository in current dir. | Below is the the instruction that describes the task:
### Input:
Get the HEAD commit SHA1 of repository in current dir.
### Response:
def get_last_commit(git_path=None):
"""
Get the HEAD commit SHA1 of repository in current dir.
"""
if git_path is None: git_path = GIT_PATH
line = get_last_commit_line(git_path)
revision_id = line.split()[1]
return revision_id |
def load(obj, env=None, silent=True, key=None):
"""Loads envvars with prefixes:
`DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`
"""
global_env = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
if global_env is False or global_env.upper() != "DYNACONF":
load_from_env(IDENTIFIER + "_global", key, "DYNACONF", obj, silent)
# Load the global env if exists and overwrite everything
load_from_env(IDENTIFIER + "_global", key, global_env, obj, silent) | Loads envvars with prefixes:
`DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_` | Below is the the instruction that describes the task:
### Input:
Loads envvars with prefixes:
`DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`
### Response:
def load(obj, env=None, silent=True, key=None):
"""Loads envvars with prefixes:
`DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`
"""
global_env = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
if global_env is False or global_env.upper() != "DYNACONF":
load_from_env(IDENTIFIER + "_global", key, "DYNACONF", obj, silent)
# Load the global env if exists and overwrite everything
load_from_env(IDENTIFIER + "_global", key, global_env, obj, silent) |
def get_private_xml(self, query_xso):
"""
Get the private XML data for the element `query_xso` from the
server.
:param query_xso: the object to retrieve.
:returns: the stored private XML data.
`query_xso` *must* serialize to an empty XML node of the
wanted namespace and type and *must* be registered as private
XML :class:`~private_xml_xso.Query` payload.
"""
iq = aioxmpp.IQ(
type_=aioxmpp.IQType.GET,
payload=private_xml_xso.Query(query_xso)
)
return (yield from self.client.send(iq)) | Get the private XML data for the element `query_xso` from the
server.
:param query_xso: the object to retrieve.
:returns: the stored private XML data.
`query_xso` *must* serialize to an empty XML node of the
wanted namespace and type and *must* be registered as private
XML :class:`~private_xml_xso.Query` payload. | Below is the the instruction that describes the task:
### Input:
Get the private XML data for the element `query_xso` from the
server.
:param query_xso: the object to retrieve.
:returns: the stored private XML data.
`query_xso` *must* serialize to an empty XML node of the
wanted namespace and type and *must* be registered as private
XML :class:`~private_xml_xso.Query` payload.
### Response:
def get_private_xml(self, query_xso):
"""
Get the private XML data for the element `query_xso` from the
server.
:param query_xso: the object to retrieve.
:returns: the stored private XML data.
`query_xso` *must* serialize to an empty XML node of the
wanted namespace and type and *must* be registered as private
XML :class:`~private_xml_xso.Query` payload.
"""
iq = aioxmpp.IQ(
type_=aioxmpp.IQType.GET,
payload=private_xml_xso.Query(query_xso)
)
return (yield from self.client.send(iq)) |
def values_above_sweep(abf,dataI,dataY,ylabel="",useFigure=None):
"""
To make plots like AP frequency over original trace.
dataI=[i] #the i of the sweep
dataY=[1.234] #something like inst freq
"""
xOffset = abf.currentSweep*abf.sweepInterval
if not useFigure: #just passing the figure makes it persistant!
pylab.figure(figsize=(8,6))
ax=pylab.subplot(221)
pylab.grid(alpha=.5)
if len(dataI):
pylab.plot(abf.dataX[dataI],dataY,'.',ms=10,alpha=.5,
color=abf.colormap[abf.currentSweep])
pylab.margins(0,.1)
pylab.ylabel(ylabel)
pylab.subplot(223,sharex=ax)
pylab.grid(alpha=.5)
pylab.plot(abf.dataX,abf.dataY,color=abf.colormap[abf.currentSweep],alpha=.5)
pylab.ylabel("raw data (%s)"%abf.units)
ax2=pylab.subplot(222)
pylab.grid(alpha=.5)
if len(dataI):
pylab.plot(abf.dataX[dataI]+xOffset,dataY,'.',ms=10,alpha=.5,
color=abf.colormap[abf.currentSweep])
pylab.margins(0,.1)
pylab.ylabel(ylabel)
pylab.subplot(224,sharex=ax2)
pylab.grid(alpha=.5)
pylab.plot(abf.dataX+xOffset,abf.dataY,color=abf.colormap[abf.currentSweep])
pylab.ylabel("raw data (%s)"%abf.units)
pylab.tight_layout() | To make plots like AP frequency over original trace.
dataI=[i] #the i of the sweep
dataY=[1.234] #something like inst freq | Below is the the instruction that describes the task:
### Input:
To make plots like AP frequency over original trace.
dataI=[i] #the i of the sweep
dataY=[1.234] #something like inst freq
### Response:
def values_above_sweep(abf,dataI,dataY,ylabel="",useFigure=None):
"""
To make plots like AP frequency over original trace.
dataI=[i] #the i of the sweep
dataY=[1.234] #something like inst freq
"""
xOffset = abf.currentSweep*abf.sweepInterval
if not useFigure: #just passing the figure makes it persistant!
pylab.figure(figsize=(8,6))
ax=pylab.subplot(221)
pylab.grid(alpha=.5)
if len(dataI):
pylab.plot(abf.dataX[dataI],dataY,'.',ms=10,alpha=.5,
color=abf.colormap[abf.currentSweep])
pylab.margins(0,.1)
pylab.ylabel(ylabel)
pylab.subplot(223,sharex=ax)
pylab.grid(alpha=.5)
pylab.plot(abf.dataX,abf.dataY,color=abf.colormap[abf.currentSweep],alpha=.5)
pylab.ylabel("raw data (%s)"%abf.units)
ax2=pylab.subplot(222)
pylab.grid(alpha=.5)
if len(dataI):
pylab.plot(abf.dataX[dataI]+xOffset,dataY,'.',ms=10,alpha=.5,
color=abf.colormap[abf.currentSweep])
pylab.margins(0,.1)
pylab.ylabel(ylabel)
pylab.subplot(224,sharex=ax2)
pylab.grid(alpha=.5)
pylab.plot(abf.dataX+xOffset,abf.dataY,color=abf.colormap[abf.currentSweep])
pylab.ylabel("raw data (%s)"%abf.units)
pylab.tight_layout() |
def wait(self, timeout=None, raise_error=True):
""" alias of get
Args:
timeout (float): timeout seconds
raise_error (bool): default true, whether to raise error if element not found
Raises:
WDAElementNotFoundError
"""
return self.get(timeout=timeout, raise_error=raise_error) | alias of get
Args:
timeout (float): timeout seconds
raise_error (bool): default true, whether to raise error if element not found
Raises:
WDAElementNotFoundError | Below is the the instruction that describes the task:
### Input:
alias of get
Args:
timeout (float): timeout seconds
raise_error (bool): default true, whether to raise error if element not found
Raises:
WDAElementNotFoundError
### Response:
def wait(self, timeout=None, raise_error=True):
""" alias of get
Args:
timeout (float): timeout seconds
raise_error (bool): default true, whether to raise error if element not found
Raises:
WDAElementNotFoundError
"""
return self.get(timeout=timeout, raise_error=raise_error) |
def remove_many(self, it):
"""Removes a collection of objects from the table."""
# find indicies of objects in iterable
to_be_deleted = list(it)
del_indices = []
for i, ob in enumerate(self.obs):
try:
tbd_index = to_be_deleted.index(ob)
except ValueError:
continue
else:
del_indices.append(i)
to_be_deleted.pop(tbd_index)
# quit early if we have found them all
if not to_be_deleted:
break
for i in sorted(del_indices, reverse=True):
self.pop(i)
return self | Removes a collection of objects from the table. | Below is the the instruction that describes the task:
### Input:
Removes a collection of objects from the table.
### Response:
def remove_many(self, it):
"""Removes a collection of objects from the table."""
# find indicies of objects in iterable
to_be_deleted = list(it)
del_indices = []
for i, ob in enumerate(self.obs):
try:
tbd_index = to_be_deleted.index(ob)
except ValueError:
continue
else:
del_indices.append(i)
to_be_deleted.pop(tbd_index)
# quit early if we have found them all
if not to_be_deleted:
break
for i in sorted(del_indices, reverse=True):
self.pop(i)
return self |
def getKeyType(self, account, pub):
""" Get key type
"""
for authority in ["owner", "active"]:
for key in account[authority]["key_auths"]:
if str(pub) == key[0]:
return authority
if str(pub) == account["options"]["memo_key"]:
return "memo"
return None | Get key type | Below is the the instruction that describes the task:
### Input:
Get key type
### Response:
def getKeyType(self, account, pub):
""" Get key type
"""
for authority in ["owner", "active"]:
for key in account[authority]["key_auths"]:
if str(pub) == key[0]:
return authority
if str(pub) == account["options"]["memo_key"]:
return "memo"
return None |
def peek(iterable):
"""
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
"""
peeker, original = itertools.tee(iterable)
return next(peeker), original | Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3] | Below is the the instruction that describes the task:
### Input:
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
### Response:
def peek(iterable):
"""
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
"""
peeker, original = itertools.tee(iterable)
return next(peeker), original |
def absent(name,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named group is absent
name
The groupname of the group to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or defaul
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if group exists and remove it
if __salt__['postgres.user_exists'](name, **db_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Group {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.group_remove'](name, **db_args):
ret['comment'] = 'Group {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Group {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret | Ensure that the named group is absent
name
The groupname of the group to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or defaul
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default | Below is the the instruction that describes the task:
### Input:
Ensure that the named group is absent
name
The groupname of the group to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or defaul
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
### Response:
def absent(name,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named group is absent
name
The groupname of the group to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or defaul
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if group exists and remove it
if __salt__['postgres.user_exists'](name, **db_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Group {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.group_remove'](name, **db_args):
ret['comment'] = 'Group {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Group {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret |
def attachable(name, path=None):
'''
Return True if the named container can be attached to via the lxc-attach
command
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt 'minion' lxc.attachable ubuntu
'''
cachekey = 'lxc.attachable{0}{1}'.format(name, path)
try:
return __context__[cachekey]
except KeyError:
_ensure_exists(name, path=path)
# Can't use run() here because it uses attachable() and would
# endlessly recurse, resulting in a traceback
log.debug('Checking if LXC container %s is attachable', name)
cmd = 'lxc-attach'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
cmd += ' --clear-env -n {0} -- /usr/bin/env'.format(name)
result = __salt__['cmd.retcode'](cmd,
python_shell=False,
output_loglevel='quiet',
ignore_retcode=True) == 0
__context__[cachekey] = result
return __context__[cachekey] | Return True if the named container can be attached to via the lxc-attach
command
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt 'minion' lxc.attachable ubuntu | Below is the the instruction that describes the task:
### Input:
Return True if the named container can be attached to via the lxc-attach
command
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt 'minion' lxc.attachable ubuntu
### Response:
def attachable(name, path=None):
'''
Return True if the named container can be attached to via the lxc-attach
command
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt 'minion' lxc.attachable ubuntu
'''
cachekey = 'lxc.attachable{0}{1}'.format(name, path)
try:
return __context__[cachekey]
except KeyError:
_ensure_exists(name, path=path)
# Can't use run() here because it uses attachable() and would
# endlessly recurse, resulting in a traceback
log.debug('Checking if LXC container %s is attachable', name)
cmd = 'lxc-attach'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
cmd += ' --clear-env -n {0} -- /usr/bin/env'.format(name)
result = __salt__['cmd.retcode'](cmd,
python_shell=False,
output_loglevel='quiet',
ignore_retcode=True) == 0
__context__[cachekey] = result
return __context__[cachekey] |
def calc2dcoords(mol):
""" Calculate optimal 2D coordinates of chemical structure
"""
topology.recognize(mol)
g = set(i for i, _ in mol.atoms_iter())
# 1: get nodes in scaffolds
scaffolds = []
belongs = {}
for i, rkeys in enumerate(sorted(mol.scaffolds, key=len)):
scf = []
for rkey in rkeys:
ring = mol.rings[rkey]
for r in ring:
belongs[r] = i
scf.append(ring)
g -= set(ring)
scaffolds.append(scf)
# 2: traverse nodes and scaffolds
# the node and scaffold graph should be a tree (no cycles)
f = True
# print(scaffolds)
coords = {}
while g:
if f and scaffolds: # largest scaffold is first
stack = [scaffolds[-1][0][0]]
f = False
else:
stack = [g.pop()]
pred = {}
branch = {}
while stack:
# print("stack: {}".format(stack))
tail = stack.pop()
# print("tail: {}".format(tail))
if tail in belongs: # scaffolds
scf = scaffold_coords(scaffolds[belongs[tail]])
# print(scf.keys())
# rotate and translate
if not coords:
coords = scf
else:
u = coords[pred[tail]]
v = scf[tail]
op = [u[0] + math.cos(u[2]), u[1] + math.sin(u[2])]
translate(scf, gm.vector(v[:2], op))
rotate(scf, op, gm.rad(u[2] + math.pi - v[2]))
coords.update(scf)
# stack nbrs of scaffold
for k in scf.keys():
pred[k] = None
for nbr in mol.neighbors(k):
if nbr not in scf.keys():
stack.append(nbr)
pred[nbr] = k
else: # append linker
if tail not in pred: # isolated
coords[tail] = [0, 0, 0, 1]
continue
p = pred[tail]
x, y, ang, d = coords[p]
# TODO: ring configuration
coords[tail] = [x + math.cos(ang), y + math.sin(ang),
ang + d * math.pi / 3, d * -1]
if p not in branch:
coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d)
branch[p] = 1
elif branch[p] == 1:
coords[p][2] = gm.rad(coords[p][2] + math.pi * d)
branch[p] += 1
elif branch[p] == 2:
coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d)
branch[p] += 1
for nbr in mol.neighbors(tail):
if nbr not in pred:
stack.append(nbr)
pred[nbr] = tail
g -= set(pred)
resolve_overlap(coords)
for i, a in mol.atoms_iter():
mol.atom(i).coords = coords[i][:2] | Calculate optimal 2D coordinates of chemical structure | Below is the the instruction that describes the task:
### Input:
Calculate optimal 2D coordinates of chemical structure
### Response:
def calc2dcoords(mol):
""" Calculate optimal 2D coordinates of chemical structure
"""
topology.recognize(mol)
g = set(i for i, _ in mol.atoms_iter())
# 1: get nodes in scaffolds
scaffolds = []
belongs = {}
for i, rkeys in enumerate(sorted(mol.scaffolds, key=len)):
scf = []
for rkey in rkeys:
ring = mol.rings[rkey]
for r in ring:
belongs[r] = i
scf.append(ring)
g -= set(ring)
scaffolds.append(scf)
# 2: traverse nodes and scaffolds
# the node and scaffold graph should be a tree (no cycles)
f = True
# print(scaffolds)
coords = {}
while g:
if f and scaffolds: # largest scaffold is first
stack = [scaffolds[-1][0][0]]
f = False
else:
stack = [g.pop()]
pred = {}
branch = {}
while stack:
# print("stack: {}".format(stack))
tail = stack.pop()
# print("tail: {}".format(tail))
if tail in belongs: # scaffolds
scf = scaffold_coords(scaffolds[belongs[tail]])
# print(scf.keys())
# rotate and translate
if not coords:
coords = scf
else:
u = coords[pred[tail]]
v = scf[tail]
op = [u[0] + math.cos(u[2]), u[1] + math.sin(u[2])]
translate(scf, gm.vector(v[:2], op))
rotate(scf, op, gm.rad(u[2] + math.pi - v[2]))
coords.update(scf)
# stack nbrs of scaffold
for k in scf.keys():
pred[k] = None
for nbr in mol.neighbors(k):
if nbr not in scf.keys():
stack.append(nbr)
pred[nbr] = k
else: # append linker
if tail not in pred: # isolated
coords[tail] = [0, 0, 0, 1]
continue
p = pred[tail]
x, y, ang, d = coords[p]
# TODO: ring configuration
coords[tail] = [x + math.cos(ang), y + math.sin(ang),
ang + d * math.pi / 3, d * -1]
if p not in branch:
coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d)
branch[p] = 1
elif branch[p] == 1:
coords[p][2] = gm.rad(coords[p][2] + math.pi * d)
branch[p] += 1
elif branch[p] == 2:
coords[p][2] = gm.rad(coords[p][2] + math.pi * 2 / 3 * d)
branch[p] += 1
for nbr in mol.neighbors(tail):
if nbr not in pred:
stack.append(nbr)
pred[nbr] = tail
g -= set(pred)
resolve_overlap(coords)
for i, a in mol.atoms_iter():
mol.atom(i).coords = coords[i][:2] |
def generate_to(self, worksheet, row):
'''Generate row report.
Generates a row report of the item represented by this instance and
inserts it into a given worksheet at a specified row number.
:param worksheet: Reference to a worksheet in which to insert row
report.
:param row: Row number.
'''
super(ItemText, self).generate_to(worksheet, row)
fmt = self.generator.formats
worksheet.write(row, 3, "text", fmt['type_text'])
worksheet.write(row, 4, uni(self.data), fmt['default']) | Generate row report.
Generates a row report of the item represented by this instance and
inserts it into a given worksheet at a specified row number.
:param worksheet: Reference to a worksheet in which to insert row
report.
:param row: Row number. | Below is the the instruction that describes the task:
### Input:
Generate row report.
Generates a row report of the item represented by this instance and
inserts it into a given worksheet at a specified row number.
:param worksheet: Reference to a worksheet in which to insert row
report.
:param row: Row number.
### Response:
def generate_to(self, worksheet, row):
'''Generate row report.
Generates a row report of the item represented by this instance and
inserts it into a given worksheet at a specified row number.
:param worksheet: Reference to a worksheet in which to insert row
report.
:param row: Row number.
'''
super(ItemText, self).generate_to(worksheet, row)
fmt = self.generator.formats
worksheet.write(row, 3, "text", fmt['type_text'])
worksheet.write(row, 4, uni(self.data), fmt['default']) |
def isolated():
"""Returns a chroot for third_party isolated from the ``sys.path``.
PEX will typically be installed in site-packages flat alongside many other distributions; as such,
adding the location of the pex distribution to the ``sys.path`` will typically expose many other
distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure
of pex.
:return: The path of the chroot.
:rtype: str
"""
global _ISOLATED
if _ISOLATED is None:
from pex import vendor
from pex.common import safe_mkdtemp, Chroot
chroot = Chroot(safe_mkdtemp())
with _tracer().timed('Isolating pex in {}'.format(chroot)):
pex_path = os.path.join(vendor.VendorSpec.ROOT, 'pex')
for root, _, files in os.walk(pex_path):
for f in files:
if not f.endswith('.pyc'):
abs_file_path = os.path.join(root, f)
relpath = os.path.relpath(abs_file_path, pex_path)
chroot.copy(abs_file_path, os.path.join('pex', relpath), label='pex')
_ISOLATED = chroot
return _ISOLATED.path() | Returns a chroot for third_party isolated from the ``sys.path``.
PEX will typically be installed in site-packages flat alongside many other distributions; as such,
adding the location of the pex distribution to the ``sys.path`` will typically expose many other
distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure
of pex.
:return: The path of the chroot.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns a chroot for third_party isolated from the ``sys.path``.
PEX will typically be installed in site-packages flat alongside many other distributions; as such,
adding the location of the pex distribution to the ``sys.path`` will typically expose many other
distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure
of pex.
:return: The path of the chroot.
:rtype: str
### Response:
def isolated():
"""Returns a chroot for third_party isolated from the ``sys.path``.
PEX will typically be installed in site-packages flat alongside many other distributions; as such,
adding the location of the pex distribution to the ``sys.path`` will typically expose many other
distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure
of pex.
:return: The path of the chroot.
:rtype: str
"""
global _ISOLATED
if _ISOLATED is None:
from pex import vendor
from pex.common import safe_mkdtemp, Chroot
chroot = Chroot(safe_mkdtemp())
with _tracer().timed('Isolating pex in {}'.format(chroot)):
pex_path = os.path.join(vendor.VendorSpec.ROOT, 'pex')
for root, _, files in os.walk(pex_path):
for f in files:
if not f.endswith('.pyc'):
abs_file_path = os.path.join(root, f)
relpath = os.path.relpath(abs_file_path, pex_path)
chroot.copy(abs_file_path, os.path.join('pex', relpath), label='pex')
_ISOLATED = chroot
return _ISOLATED.path() |
def gps_status_encode(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr):
'''
The positioning status, as reported by GPS. This message is intended
to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION
for the global position estimate. This message can
contain information for up to 20 satellites.
satellites_visible : Number of satellites visible (uint8_t)
satellite_prn : Global satellite ID (uint8_t)
satellite_used : 0: Satellite not used, 1: used for localization (uint8_t)
satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t)
satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t)
satellite_snr : Signal to noise ratio of satellite (uint8_t)
'''
return MAVLink_gps_status_message(satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr) | The positioning status, as reported by GPS. This message is intended
to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION
for the global position estimate. This message can
contain information for up to 20 satellites.
satellites_visible : Number of satellites visible (uint8_t)
satellite_prn : Global satellite ID (uint8_t)
satellite_used : 0: Satellite not used, 1: used for localization (uint8_t)
satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t)
satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t)
satellite_snr : Signal to noise ratio of satellite (uint8_t) | Below is the the instruction that describes the task:
### Input:
The positioning status, as reported by GPS. This message is intended
to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION
for the global position estimate. This message can
contain information for up to 20 satellites.
satellites_visible : Number of satellites visible (uint8_t)
satellite_prn : Global satellite ID (uint8_t)
satellite_used : 0: Satellite not used, 1: used for localization (uint8_t)
satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t)
satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t)
satellite_snr : Signal to noise ratio of satellite (uint8_t)
### Response:
def gps_status_encode(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr):
'''
The positioning status, as reported by GPS. This message is intended
to display status information about each satellite
visible to the receiver. See message GLOBAL_POSITION
for the global position estimate. This message can
contain information for up to 20 satellites.
satellites_visible : Number of satellites visible (uint8_t)
satellite_prn : Global satellite ID (uint8_t)
satellite_used : 0: Satellite not used, 1: used for localization (uint8_t)
satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t)
satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t)
satellite_snr : Signal to noise ratio of satellite (uint8_t)
'''
return MAVLink_gps_status_message(satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr) |
def determine_timestamp_format(datetime_str, warn=True):
r"""
Args:
datetime_str (str):
Returns:
str:
References:
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
CommandLine:
python -m utool.util_time --exec-determine_timestamp_format
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_time import * # NOQA
>>> import utool as ut
>>> datetime_str_list = [
>>> '0000:00:00 00:00:00',
>>> ' : : : : ',
>>> '2015:04:01 00:00:00',
>>> '2080/04/01 00:00:00',
>>> '2005-10-27T14:35:20+02:00',
>>> '6:35:01\x002006:03:19 1',
>>> '2016/05/03 16:34:57 EST'
>>> ]
>>> result = ut.repr4([determine_timestamp_format(datetime_str)
>>> for datetime_str in datetime_str_list])
>>> print(result)
"""
import re
# try to determine the format
clean_datetime_str = datetime_str.replace('\x00', ' ').strip(';').strip()
if len(clean_datetime_str) == 25 and 'T' in clean_datetime_str:
# Delete last colon from ISO 8601 format
# clean_datetime_str = clean_datetime_str[:-3] + clean_datetime_str[-2:]
if True or six.PY2:
if warn:
print('WARNING: Python 2.7 does not support %z directive '
'in strptime, ignoring timezone in parsing: ' +
clean_datetime_str)
clean_datetime_str = clean_datetime_str[:-6]
year_regex = r'(\d\d)?\d\d'
month_regex = '[0-1]?[0-9]'
day_regex = '[0-3]?[0-9]'
time_regex = r'[0-6]?[0-9]:[0-6]?[0-9]:[0-6]?[0-9]'
#odd_time_regex = r'[0-6]?[0-9]:[0-6]?[0-9]:[0-6 ]?[0-9]'
date_regex1 = '/'.join([year_regex, month_regex, day_regex])
date_regex2 = ':'.join([year_regex, month_regex, day_regex])
date_regex3 = '-'.join([year_regex, month_regex, day_regex])
datetime_regex1 = date_regex1 + ' ' + time_regex
datetime_regex2 = date_regex2 + ' ' + time_regex
datetime_regex3 = date_regex3 + 'T' + time_regex # + r'\+[0-2]?[0-9]?[0-6]?[0-9]'
datetime_regex4 = time_regex + ' ' + date_regex2 + ' 1'
timefmt = None
if re.match(datetime_regex1, clean_datetime_str):
timefmt = '%Y/%m/%d %H:%M:%S'
elif re.match(datetime_regex2, clean_datetime_str):
timefmt = '%Y:%m:%d %H:%M:%S'
elif re.match(datetime_regex3, clean_datetime_str):
# timefmt = '%Y-%m-%dT%H:%M:%S%z'
timefmt = '%Y-%m-%dT%H:%M:%S'
elif re.match(datetime_regex4, clean_datetime_str):
# timefmt = '%Y-%m-%dT%H:%M:%S%z'
timefmt = '%H:%M:%S %Y:%m:%d 1'
# Just dont accept this bad format
#elif re.match(datetime_regex3, clean_datetime_str):
# timefmt = '%Y:%m:%d %H:%M: %S'
else:
if isinstance(clean_datetime_str, six.string_types):
if len(clean_datetime_str.strip()) == 0:
return None
elif len(clean_datetime_str.strip(':/ ')) == 0:
return None
elif clean_datetime_str.find('No EXIF Data') == 0:
return None
elif clean_datetime_str.find('Invalid') == 0:
return None
elif clean_datetime_str == '0000:00:00 00:00:00':
return None
elif [ ord(_) >= 128 for _ in clean_datetime_str ].count(True) > 1:
return None
#return -1
#import utool as ut
#ut.embed()
msg = 'Unknown format: datetime_str=%r' % (datetime_str,)
print(msg)
return None
#raise NotImplementedError(msg)
return timefmt | r"""
Args:
datetime_str (str):
Returns:
str:
References:
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
CommandLine:
python -m utool.util_time --exec-determine_timestamp_format
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_time import * # NOQA
>>> import utool as ut
>>> datetime_str_list = [
>>> '0000:00:00 00:00:00',
>>> ' : : : : ',
>>> '2015:04:01 00:00:00',
>>> '2080/04/01 00:00:00',
>>> '2005-10-27T14:35:20+02:00',
>>> '6:35:01\x002006:03:19 1',
>>> '2016/05/03 16:34:57 EST'
>>> ]
>>> result = ut.repr4([determine_timestamp_format(datetime_str)
>>> for datetime_str in datetime_str_list])
>>> print(result) | Below is the the instruction that describes the task:
### Input:
r"""
Args:
datetime_str (str):
Returns:
str:
References:
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
CommandLine:
python -m utool.util_time --exec-determine_timestamp_format
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_time import * # NOQA
>>> import utool as ut
>>> datetime_str_list = [
>>> '0000:00:00 00:00:00',
>>> ' : : : : ',
>>> '2015:04:01 00:00:00',
>>> '2080/04/01 00:00:00',
>>> '2005-10-27T14:35:20+02:00',
>>> '6:35:01\x002006:03:19 1',
>>> '2016/05/03 16:34:57 EST'
>>> ]
>>> result = ut.repr4([determine_timestamp_format(datetime_str)
>>> for datetime_str in datetime_str_list])
>>> print(result)
### Response:
def determine_timestamp_format(datetime_str, warn=True):
r"""
Args:
datetime_str (str):
Returns:
str:
References:
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
CommandLine:
python -m utool.util_time --exec-determine_timestamp_format
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_time import * # NOQA
>>> import utool as ut
>>> datetime_str_list = [
>>> '0000:00:00 00:00:00',
>>> ' : : : : ',
>>> '2015:04:01 00:00:00',
>>> '2080/04/01 00:00:00',
>>> '2005-10-27T14:35:20+02:00',
>>> '6:35:01\x002006:03:19 1',
>>> '2016/05/03 16:34:57 EST'
>>> ]
>>> result = ut.repr4([determine_timestamp_format(datetime_str)
>>> for datetime_str in datetime_str_list])
>>> print(result)
"""
import re
# try to determine the format
clean_datetime_str = datetime_str.replace('\x00', ' ').strip(';').strip()
if len(clean_datetime_str) == 25 and 'T' in clean_datetime_str:
# Delete last colon from ISO 8601 format
# clean_datetime_str = clean_datetime_str[:-3] + clean_datetime_str[-2:]
if True or six.PY2:
if warn:
print('WARNING: Python 2.7 does not support %z directive '
'in strptime, ignoring timezone in parsing: ' +
clean_datetime_str)
clean_datetime_str = clean_datetime_str[:-6]
year_regex = r'(\d\d)?\d\d'
month_regex = '[0-1]?[0-9]'
day_regex = '[0-3]?[0-9]'
time_regex = r'[0-6]?[0-9]:[0-6]?[0-9]:[0-6]?[0-9]'
#odd_time_regex = r'[0-6]?[0-9]:[0-6]?[0-9]:[0-6 ]?[0-9]'
date_regex1 = '/'.join([year_regex, month_regex, day_regex])
date_regex2 = ':'.join([year_regex, month_regex, day_regex])
date_regex3 = '-'.join([year_regex, month_regex, day_regex])
datetime_regex1 = date_regex1 + ' ' + time_regex
datetime_regex2 = date_regex2 + ' ' + time_regex
datetime_regex3 = date_regex3 + 'T' + time_regex # + r'\+[0-2]?[0-9]?[0-6]?[0-9]'
datetime_regex4 = time_regex + ' ' + date_regex2 + ' 1'
timefmt = None
if re.match(datetime_regex1, clean_datetime_str):
timefmt = '%Y/%m/%d %H:%M:%S'
elif re.match(datetime_regex2, clean_datetime_str):
timefmt = '%Y:%m:%d %H:%M:%S'
elif re.match(datetime_regex3, clean_datetime_str):
# timefmt = '%Y-%m-%dT%H:%M:%S%z'
timefmt = '%Y-%m-%dT%H:%M:%S'
elif re.match(datetime_regex4, clean_datetime_str):
# timefmt = '%Y-%m-%dT%H:%M:%S%z'
timefmt = '%H:%M:%S %Y:%m:%d 1'
# Just dont accept this bad format
#elif re.match(datetime_regex3, clean_datetime_str):
# timefmt = '%Y:%m:%d %H:%M: %S'
else:
if isinstance(clean_datetime_str, six.string_types):
if len(clean_datetime_str.strip()) == 0:
return None
elif len(clean_datetime_str.strip(':/ ')) == 0:
return None
elif clean_datetime_str.find('No EXIF Data') == 0:
return None
elif clean_datetime_str.find('Invalid') == 0:
return None
elif clean_datetime_str == '0000:00:00 00:00:00':
return None
elif [ ord(_) >= 128 for _ in clean_datetime_str ].count(True) > 1:
return None
#return -1
#import utool as ut
#ut.embed()
msg = 'Unknown format: datetime_str=%r' % (datetime_str,)
print(msg)
return None
#raise NotImplementedError(msg)
return timefmt |
def yellow(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('yellow', string, auto=auto) | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color | Below is the the instruction that describes the task:
### Input:
Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
### Response:
def yellow(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('yellow', string, auto=auto) |
def snapped_speed_limits(client, path):
"""Returns the posted speed limit (in km/h) for given road segments.
The provided points will first be snapped to the most likely roads the
vehicle was traveling along.
:param path: The path of points to be snapped.
:type path: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:rtype: dict with a list of speed limits and a list of the snapped points.
"""
params = {"path": convert.location_list(path)}
return client._request("/v1/speedLimits", params,
base_url=_ROADS_BASE_URL,
accepts_clientid=False,
extract_body=_roads_extract) | Returns the posted speed limit (in km/h) for given road segments.
The provided points will first be snapped to the most likely roads the
vehicle was traveling along.
:param path: The path of points to be snapped.
:type path: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:rtype: dict with a list of speed limits and a list of the snapped points. | Below is the the instruction that describes the task:
### Input:
Returns the posted speed limit (in km/h) for given road segments.
The provided points will first be snapped to the most likely roads the
vehicle was traveling along.
:param path: The path of points to be snapped.
:type path: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:rtype: dict with a list of speed limits and a list of the snapped points.
### Response:
def snapped_speed_limits(client, path):
"""Returns the posted speed limit (in km/h) for given road segments.
The provided points will first be snapped to the most likely roads the
vehicle was traveling along.
:param path: The path of points to be snapped.
:type path: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:rtype: dict with a list of speed limits and a list of the snapped points.
"""
params = {"path": convert.location_list(path)}
return client._request("/v1/speedLimits", params,
base_url=_ROADS_BASE_URL,
accepts_clientid=False,
extract_body=_roads_extract) |
def cost_loss(y_true, y_pred, cost_mat):
#TODO: update description
"""Cost classification loss.
This function calculates the cost of using y_pred on y_true with
cost-matrix cost-mat. It differ from traditional classification evaluation
measures since measures such as accuracy asing the same cost to different
errors, but that is not the real case in several real-world classification
problems as they are example-dependent cost-sensitive in nature, where the
costs due to misclassification vary between examples.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted labels, as returned by a classifier.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
loss : float
Cost of a using y_pred on y_true with cost-matrix cost-mat
References
----------
.. [1] C. Elkan, "The foundations of Cost-Sensitive Learning",
in Seventeenth International Joint Conference on Artificial Intelligence,
973-978, 2001.
.. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,
`"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,
677-685, 2014.
See also
--------
savings_score
Examples
--------
>>> import numpy as np
>>> from costcla.metrics import cost_loss
>>> y_pred = [0, 1, 0, 0]
>>> y_true = [0, 1, 1, 0]
>>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])
>>> cost_loss(y_true, y_pred, cost_mat)
3
"""
#TODO: Check consistency of cost_mat
y_true = column_or_1d(y_true)
y_true = (y_true == 1).astype(np.float)
y_pred = column_or_1d(y_pred)
y_pred = (y_pred == 1).astype(np.float)
cost = y_true * ((1 - y_pred) * cost_mat[:, 1] + y_pred * cost_mat[:, 2])
cost += (1 - y_true) * (y_pred * cost_mat[:, 0] + (1 - y_pred) * cost_mat[:, 3])
return np.sum(cost) | Cost classification loss.
This function calculates the cost of using y_pred on y_true with
cost-matrix cost-mat. It differ from traditional classification evaluation
measures since measures such as accuracy asing the same cost to different
errors, but that is not the real case in several real-world classification
problems as they are example-dependent cost-sensitive in nature, where the
costs due to misclassification vary between examples.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted labels, as returned by a classifier.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
loss : float
Cost of a using y_pred on y_true with cost-matrix cost-mat
References
----------
.. [1] C. Elkan, "The foundations of Cost-Sensitive Learning",
in Seventeenth International Joint Conference on Artificial Intelligence,
973-978, 2001.
.. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,
`"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,
677-685, 2014.
See also
--------
savings_score
Examples
--------
>>> import numpy as np
>>> from costcla.metrics import cost_loss
>>> y_pred = [0, 1, 0, 0]
>>> y_true = [0, 1, 1, 0]
>>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])
>>> cost_loss(y_true, y_pred, cost_mat)
3 | Below is the the instruction that describes the task:
### Input:
Cost classification loss.
This function calculates the cost of using y_pred on y_true with
cost-matrix cost-mat. It differ from traditional classification evaluation
measures since measures such as accuracy asing the same cost to different
errors, but that is not the real case in several real-world classification
problems as they are example-dependent cost-sensitive in nature, where the
costs due to misclassification vary between examples.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted labels, as returned by a classifier.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
loss : float
Cost of a using y_pred on y_true with cost-matrix cost-mat
References
----------
.. [1] C. Elkan, "The foundations of Cost-Sensitive Learning",
in Seventeenth International Joint Conference on Artificial Intelligence,
973-978, 2001.
.. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,
`"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,
677-685, 2014.
See also
--------
savings_score
Examples
--------
>>> import numpy as np
>>> from costcla.metrics import cost_loss
>>> y_pred = [0, 1, 0, 0]
>>> y_true = [0, 1, 1, 0]
>>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])
>>> cost_loss(y_true, y_pred, cost_mat)
3
### Response:
def cost_loss(y_true, y_pred, cost_mat):
#TODO: update description
"""Cost classification loss.
This function calculates the cost of using y_pred on y_true with
cost-matrix cost-mat. It differ from traditional classification evaluation
measures since measures such as accuracy asing the same cost to different
errors, but that is not the real case in several real-world classification
problems as they are example-dependent cost-sensitive in nature, where the
costs due to misclassification vary between examples.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted labels, as returned by a classifier.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
loss : float
Cost of a using y_pred on y_true with cost-matrix cost-mat
References
----------
.. [1] C. Elkan, "The foundations of Cost-Sensitive Learning",
in Seventeenth International Joint Conference on Artificial Intelligence,
973-978, 2001.
.. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,
`"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,
677-685, 2014.
See also
--------
savings_score
Examples
--------
>>> import numpy as np
>>> from costcla.metrics import cost_loss
>>> y_pred = [0, 1, 0, 0]
>>> y_true = [0, 1, 1, 0]
>>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])
>>> cost_loss(y_true, y_pred, cost_mat)
3
"""
#TODO: Check consistency of cost_mat
y_true = column_or_1d(y_true)
y_true = (y_true == 1).astype(np.float)
y_pred = column_or_1d(y_pred)
y_pred = (y_pred == 1).astype(np.float)
cost = y_true * ((1 - y_pred) * cost_mat[:, 1] + y_pred * cost_mat[:, 2])
cost += (1 - y_true) * (y_pred * cost_mat[:, 0] + (1 - y_pred) * cost_mat[:, 3])
return np.sum(cost) |
def get_analysis_question(hazard, exposure):
"""Construct analysis question based on hazard and exposure.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: An exposure definition.
:type exposure: dict
:returns: Analysis question based on reporting standards.
:rtype: str
"""
# First we look for a translated hardcoded question.
question = specific_analysis_question(hazard, exposure)
if question:
return question
if hazard == hazard_generic:
# Secondly, if the hazard is generic, we don't need the hazard.
question = tr(
'In each of the hazard zones {exposure_measure} {exposure_name} '
'might be affected?').format(
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
return question
# Then, we fallback on a generated string on the fly.
question = tr(
'In the event of a {hazard_name}, {exposure_measure} {exposure_name} '
'might be affected?').format(
hazard_name=hazard['name'],
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
return question | Construct analysis question based on hazard and exposure.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: An exposure definition.
:type exposure: dict
:returns: Analysis question based on reporting standards.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Construct analysis question based on hazard and exposure.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: An exposure definition.
:type exposure: dict
:returns: Analysis question based on reporting standards.
:rtype: str
### Response:
def get_analysis_question(hazard, exposure):
"""Construct analysis question based on hazard and exposure.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: An exposure definition.
:type exposure: dict
:returns: Analysis question based on reporting standards.
:rtype: str
"""
# First we look for a translated hardcoded question.
question = specific_analysis_question(hazard, exposure)
if question:
return question
if hazard == hazard_generic:
# Secondly, if the hazard is generic, we don't need the hazard.
question = tr(
'In each of the hazard zones {exposure_measure} {exposure_name} '
'might be affected?').format(
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
return question
# Then, we fallback on a generated string on the fly.
question = tr(
'In the event of a {hazard_name}, {exposure_measure} {exposure_name} '
'might be affected?').format(
hazard_name=hazard['name'],
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
return question |
def base(ctx, verbose, config):
"""Puzzle: manage DNA variant resources."""
# configure root logger to print to STDERR
loglevel = LEVELS.get(min(verbose, 3))
configure_stream(level=loglevel)
ctx.obj = {}
if config and os.path.exists(config):
ctx.obj = yaml.load(open(config, 'r')) or {}
ctx.obj['config_path'] = config
# launch the command line interface
logger.debug('Booting up command line interface') | Puzzle: manage DNA variant resources. | Below is the the instruction that describes the task:
### Input:
Puzzle: manage DNA variant resources.
### Response:
def base(ctx, verbose, config):
"""Puzzle: manage DNA variant resources."""
# configure root logger to print to STDERR
loglevel = LEVELS.get(min(verbose, 3))
configure_stream(level=loglevel)
ctx.obj = {}
if config and os.path.exists(config):
ctx.obj = yaml.load(open(config, 'r')) or {}
ctx.obj['config_path'] = config
# launch the command line interface
logger.debug('Booting up command line interface') |
def list(self, resource=None, sort=None):
"""Get a list of tags.
:param resource: (optional) Restrict to given resource type as string.
:param sort: (optional) Sort fields to apply as string list.
:return: :class:`tags.Tag <tags.Tag>` list
"""
schema = TagSchema()
resp = self.service.get(self.base, params={'resource': resource, 'sort': sort})
return self.service.decode(schema, resp, many=True) | Get a list of tags.
:param resource: (optional) Restrict to given resource type as string.
:param sort: (optional) Sort fields to apply as string list.
:return: :class:`tags.Tag <tags.Tag>` list | Below is the the instruction that describes the task:
### Input:
Get a list of tags.
:param resource: (optional) Restrict to given resource type as string.
:param sort: (optional) Sort fields to apply as string list.
:return: :class:`tags.Tag <tags.Tag>` list
### Response:
def list(self, resource=None, sort=None):
"""Get a list of tags.
:param resource: (optional) Restrict to given resource type as string.
:param sort: (optional) Sort fields to apply as string list.
:return: :class:`tags.Tag <tags.Tag>` list
"""
schema = TagSchema()
resp = self.service.get(self.base, params={'resource': resource, 'sort': sort})
return self.service.decode(schema, resp, many=True) |
def writes(self, nb, metadata=None, **kwargs):
"""Return the text representation of the notebook"""
if self.fmt.get('format_name') == 'pandoc':
metadata = insert_jupytext_info_and_filter_metadata(metadata, self.ext, self.implementation)
cells = []
for cell in nb.cells:
cell_metadata = filter_metadata(copy(cell.metadata),
self.fmt.get('cell_metadata_filter'),
_IGNORE_CELL_METADATA)
if cell.cell_type == 'code':
cells.append(new_code_cell(source=cell.source, metadata=cell_metadata))
else:
cells.append(NotebookNode(source=cell.source, metadata=cell_metadata, cell_type=cell.cell_type))
return notebook_to_md(new_notebook(metadata=metadata, cells=cells))
# Copy the notebook, in order to be sure we do not modify the original notebook
nb = new_notebook(cells=nb.cells, metadata=deepcopy(metadata or nb.metadata))
metadata = nb.metadata
default_language = default_language_from_metadata_and_ext(metadata, self.implementation.extension)
self.update_fmt_with_notebook_options(nb.metadata)
if 'main_language' in metadata.get('jupytext', {}):
del metadata['jupytext']['main_language']
header = encoding_and_executable(nb, metadata, self.ext)
header_content, header_lines_to_next_cell = metadata_and_cell_to_header(nb, metadata,
self.implementation, self.ext)
header.extend(header_content)
cell_exporters = []
looking_for_first_markdown_cell = (self.implementation.format_name and
self.implementation.format_name.startswith('sphinx'))
split_at_heading = self.fmt.get('split_at_heading', False)
for cell in nb.cells:
if looking_for_first_markdown_cell and cell.cell_type == 'markdown':
cell.metadata.setdefault('cell_marker', '"""')
looking_for_first_markdown_cell = False
cell_exporters.append(self.implementation.cell_exporter_class(cell, default_language, self.fmt))
texts = [cell.cell_to_text() for cell in cell_exporters]
lines = []
# concatenate cells in reverse order to determine how many blank lines (pep8)
for i, cell in reversed(list(enumerate(cell_exporters))):
text = cell.remove_eoc_marker(texts[i], lines)
if i == 0 and self.implementation.format_name and \
self.implementation.format_name.startswith('sphinx') and \
(text in [['%matplotlib inline'], ['# %matplotlib inline']]):
continue
lines_to_next_cell = cell.lines_to_next_cell
if lines_to_next_cell is None:
lines_to_next_cell = pep8_lines_between_cells(text, lines, self.implementation.extension)
text.extend([''] * lines_to_next_cell)
# two blank lines between markdown cells in Rmd when those do not have explicit region markers
if self.ext in ['.Rmd', '.md'] and not cell.is_code():
if (i + 1 < len(cell_exporters) and not cell_exporters[i + 1].is_code() and
not texts[i][0].startswith('<!-- #region') and
not texts[i + 1][0].startswith('<!-- #region') and
(not split_at_heading or not (texts[i + 1] and texts[i + 1][0].startswith('#')))):
text.append('')
# "" between two consecutive code cells in sphinx
if self.implementation.format_name.startswith('sphinx') and cell.is_code():
if i + 1 < len(cell_exporters) and cell_exporters[i + 1].is_code():
text.append('""')
if i + 1 < len(cell_exporters):
lines = cell_exporters[i + 1].simplify_soc_marker(lines, text)
lines = text + lines
if header_lines_to_next_cell is None:
header_lines_to_next_cell = pep8_lines_between_cells(header_content, lines, self.implementation.extension)
header.extend([''] * header_lines_to_next_cell)
if cell_exporters:
lines = cell_exporters[0].simplify_soc_marker(lines, header)
return '\n'.join(header + lines) | Return the text representation of the notebook | Below is the the instruction that describes the task:
### Input:
Return the text representation of the notebook
### Response:
def writes(self, nb, metadata=None, **kwargs):
"""Return the text representation of the notebook"""
if self.fmt.get('format_name') == 'pandoc':
metadata = insert_jupytext_info_and_filter_metadata(metadata, self.ext, self.implementation)
cells = []
for cell in nb.cells:
cell_metadata = filter_metadata(copy(cell.metadata),
self.fmt.get('cell_metadata_filter'),
_IGNORE_CELL_METADATA)
if cell.cell_type == 'code':
cells.append(new_code_cell(source=cell.source, metadata=cell_metadata))
else:
cells.append(NotebookNode(source=cell.source, metadata=cell_metadata, cell_type=cell.cell_type))
return notebook_to_md(new_notebook(metadata=metadata, cells=cells))
# Copy the notebook, in order to be sure we do not modify the original notebook
nb = new_notebook(cells=nb.cells, metadata=deepcopy(metadata or nb.metadata))
metadata = nb.metadata
default_language = default_language_from_metadata_and_ext(metadata, self.implementation.extension)
self.update_fmt_with_notebook_options(nb.metadata)
if 'main_language' in metadata.get('jupytext', {}):
del metadata['jupytext']['main_language']
header = encoding_and_executable(nb, metadata, self.ext)
header_content, header_lines_to_next_cell = metadata_and_cell_to_header(nb, metadata,
self.implementation, self.ext)
header.extend(header_content)
cell_exporters = []
looking_for_first_markdown_cell = (self.implementation.format_name and
self.implementation.format_name.startswith('sphinx'))
split_at_heading = self.fmt.get('split_at_heading', False)
for cell in nb.cells:
if looking_for_first_markdown_cell and cell.cell_type == 'markdown':
cell.metadata.setdefault('cell_marker', '"""')
looking_for_first_markdown_cell = False
cell_exporters.append(self.implementation.cell_exporter_class(cell, default_language, self.fmt))
texts = [cell.cell_to_text() for cell in cell_exporters]
lines = []
# concatenate cells in reverse order to determine how many blank lines (pep8)
for i, cell in reversed(list(enumerate(cell_exporters))):
text = cell.remove_eoc_marker(texts[i], lines)
if i == 0 and self.implementation.format_name and \
self.implementation.format_name.startswith('sphinx') and \
(text in [['%matplotlib inline'], ['# %matplotlib inline']]):
continue
lines_to_next_cell = cell.lines_to_next_cell
if lines_to_next_cell is None:
lines_to_next_cell = pep8_lines_between_cells(text, lines, self.implementation.extension)
text.extend([''] * lines_to_next_cell)
# two blank lines between markdown cells in Rmd when those do not have explicit region markers
if self.ext in ['.Rmd', '.md'] and not cell.is_code():
if (i + 1 < len(cell_exporters) and not cell_exporters[i + 1].is_code() and
not texts[i][0].startswith('<!-- #region') and
not texts[i + 1][0].startswith('<!-- #region') and
(not split_at_heading or not (texts[i + 1] and texts[i + 1][0].startswith('#')))):
text.append('')
# "" between two consecutive code cells in sphinx
if self.implementation.format_name.startswith('sphinx') and cell.is_code():
if i + 1 < len(cell_exporters) and cell_exporters[i + 1].is_code():
text.append('""')
if i + 1 < len(cell_exporters):
lines = cell_exporters[i + 1].simplify_soc_marker(lines, text)
lines = text + lines
if header_lines_to_next_cell is None:
header_lines_to_next_cell = pep8_lines_between_cells(header_content, lines, self.implementation.extension)
header.extend([''] * header_lines_to_next_cell)
if cell_exporters:
lines = cell_exporters[0].simplify_soc_marker(lines, header)
return '\n'.join(header + lines) |
def taper(self):
"""Taper the spectrum by adding zero flux to each end.
This is similar to :meth:`SpectralElement.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-flux entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutSpec : `TabularSourceSpectrum`
Tapered spectrum.
"""
OutSpec = TabularSourceSpectrum()
wcopy = N.zeros(self._wavetable.size+2, dtype=N.float64)
fcopy = N.zeros(self._fluxtable.size+2, dtype=N.float64)
wcopy[1:-1] = self._wavetable
fcopy[1:-1] = self._fluxtable
fcopy[0] = 0.0
fcopy[-1] = 0.0
# The wavelengths to use for the first and last points are
# calculated by using the same ratio as for the 2 interior points
wcopy[0] = wcopy[1]*wcopy[1]/wcopy[2]
wcopy[-1] = wcopy[-2]*wcopy[-2]/wcopy[-3]
OutSpec._wavetable = wcopy
OutSpec._fluxtable = fcopy
OutSpec.waveunits = units.Units(str(self.waveunits))
OutSpec.fluxunits = units.Units(str(self.fluxunits))
return OutSpec | Taper the spectrum by adding zero flux to each end.
This is similar to :meth:`SpectralElement.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-flux entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutSpec : `TabularSourceSpectrum`
Tapered spectrum. | Below is the the instruction that describes the task:
### Input:
Taper the spectrum by adding zero flux to each end.
This is similar to :meth:`SpectralElement.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-flux entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutSpec : `TabularSourceSpectrum`
Tapered spectrum.
### Response:
def taper(self):
"""Taper the spectrum by adding zero flux to each end.
This is similar to :meth:`SpectralElement.taper`.
There is no check to see if the spectrum is already tapered.
Hence, calling this on a tapered spectrum will result in
multiple zero-flux entries at both ends.
The wavelengths to use for the new first and last points are
calculated by using the same ratio as for the two interior points
used at each end.
Returns
-------
OutSpec : `TabularSourceSpectrum`
Tapered spectrum.
"""
OutSpec = TabularSourceSpectrum()
wcopy = N.zeros(self._wavetable.size+2, dtype=N.float64)
fcopy = N.zeros(self._fluxtable.size+2, dtype=N.float64)
wcopy[1:-1] = self._wavetable
fcopy[1:-1] = self._fluxtable
fcopy[0] = 0.0
fcopy[-1] = 0.0
# The wavelengths to use for the first and last points are
# calculated by using the same ratio as for the 2 interior points
wcopy[0] = wcopy[1]*wcopy[1]/wcopy[2]
wcopy[-1] = wcopy[-2]*wcopy[-2]/wcopy[-3]
OutSpec._wavetable = wcopy
OutSpec._fluxtable = fcopy
OutSpec.waveunits = units.Units(str(self.waveunits))
OutSpec.fluxunits = units.Units(str(self.fluxunits))
return OutSpec |
def update(self):
"""Gets the latest version of your metadata from the infrastructure and updates your local copy
Returns `True` if successful, `False` otherwise - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
"""
graph = Graph()
graph.parse(data=self.__parent.get_meta_rdf(fmt=self.__fmt), format=self.__fmt)
self._graph = graph | Gets the latest version of your metadata from the infrastructure and updates your local copy
Returns `True` if successful, `False` otherwise - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure | Below is the the instruction that describes the task:
### Input:
Gets the latest version of your metadata from the infrastructure and updates your local copy
Returns `True` if successful, `False` otherwise - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
### Response:
def update(self):
"""Gets the latest version of your metadata from the infrastructure and updates your local copy
Returns `True` if successful, `False` otherwise - OR -
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
"""
graph = Graph()
graph.parse(data=self.__parent.get_meta_rdf(fmt=self.__fmt), format=self.__fmt)
self._graph = graph |
def memoize(fn):
"""
Simple memoization decorator for functions and methods,
assumes that all arguments to the function can be hashed and
compared.
"""
memoized_values = {}
@wraps(fn)
def wrapped_fn(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
try:
return memoized_values[key]
except KeyError:
memoized_values[key] = fn(*args, **kwargs)
return memoized_values[key]
return wrapped_fn | Simple memoization decorator for functions and methods,
assumes that all arguments to the function can be hashed and
compared. | Below is the the instruction that describes the task:
### Input:
Simple memoization decorator for functions and methods,
assumes that all arguments to the function can be hashed and
compared.
### Response:
def memoize(fn):
"""
Simple memoization decorator for functions and methods,
assumes that all arguments to the function can be hashed and
compared.
"""
memoized_values = {}
@wraps(fn)
def wrapped_fn(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
try:
return memoized_values[key]
except KeyError:
memoized_values[key] = fn(*args, **kwargs)
return memoized_values[key]
return wrapped_fn |
def discover(url, options={}):
"""
Retrieve the API definition from the given URL and construct
a Patchboard to interface with it.
"""
try:
resp = requests.get(url, headers=Patchboard.default_headers)
except Exception as e:
raise PatchboardError("Problem discovering API: {0}".format(e))
# Parse as JSON (Requests uses json.loads())
try:
api_spec = resp.json()
except ValueError as e:
raise PatchboardError("Unparseable API description: {0}".format(e))
# Return core handle object
return Patchboard(api_spec, options) | Retrieve the API definition from the given URL and construct
a Patchboard to interface with it. | Below is the the instruction that describes the task:
### Input:
Retrieve the API definition from the given URL and construct
a Patchboard to interface with it.
### Response:
def discover(url, options={}):
"""
Retrieve the API definition from the given URL and construct
a Patchboard to interface with it.
"""
try:
resp = requests.get(url, headers=Patchboard.default_headers)
except Exception as e:
raise PatchboardError("Problem discovering API: {0}".format(e))
# Parse as JSON (Requests uses json.loads())
try:
api_spec = resp.json()
except ValueError as e:
raise PatchboardError("Unparseable API description: {0}".format(e))
# Return core handle object
return Patchboard(api_spec, options) |
def fix_tour(self, tour):
"""
Test each scaffold if dropping does not decrease LMS.
"""
scaffolds, oos = zip(*tour)
keep = set()
for mlg in self.linkage_groups:
lg = mlg.lg
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s, o)
score_with = lms(L + M + U)[0]
score_without = lms(L + U)[0]
assert score_with >= score_without
if score_with > score_without:
keep.add(s)
dropped = len(tour) - len(keep)
logging.debug("Dropped {0} minor scaffolds".format(dropped))
return [(s, o) for (s, o) in tour if s in keep] | Test each scaffold if dropping does not decrease LMS. | Below is the the instruction that describes the task:
### Input:
Test each scaffold if dropping does not decrease LMS.
### Response:
def fix_tour(self, tour):
"""
Test each scaffold if dropping does not decrease LMS.
"""
scaffolds, oos = zip(*tour)
keep = set()
for mlg in self.linkage_groups:
lg = mlg.lg
for s, o in tour:
i = scaffolds.index(s)
L = [self.get_series(lg, x, xo) for x, xo in tour[:i]]
U = [self.get_series(lg, x, xo) for x, xo in tour[i + 1:]]
L, U = list(flatten(L)), list(flatten(U))
M = self.get_series(lg, s, o)
score_with = lms(L + M + U)[0]
score_without = lms(L + U)[0]
assert score_with >= score_without
if score_with > score_without:
keep.add(s)
dropped = len(tour) - len(keep)
logging.debug("Dropped {0} minor scaffolds".format(dropped))
return [(s, o) for (s, o) in tour if s in keep] |
def validate(self):
"""Validate that the FoldCountContextField is correctly representable."""
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u'Expected FoldScopeLocation fold_scope_location, got: {} {}'.format(
type(self.fold_scope_location), self.fold_scope_location))
if self.fold_scope_location.field != COUNT_META_FIELD_NAME:
raise AssertionError(u'Unexpected field in the FoldScopeLocation of this '
u'FoldCountContextField object: {} {}'
.format(self.fold_scope_location, self)) | Validate that the FoldCountContextField is correctly representable. | Below is the the instruction that describes the task:
### Input:
Validate that the FoldCountContextField is correctly representable.
### Response:
def validate(self):
"""Validate that the FoldCountContextField is correctly representable."""
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u'Expected FoldScopeLocation fold_scope_location, got: {} {}'.format(
type(self.fold_scope_location), self.fold_scope_location))
if self.fold_scope_location.field != COUNT_META_FIELD_NAME:
raise AssertionError(u'Unexpected field in the FoldScopeLocation of this '
u'FoldCountContextField object: {} {}'
.format(self.fold_scope_location, self)) |
def handle_execution_mode(self, container_state, next_child_state_to_execute=None):
"""Checks the current execution status and returns it.
Depending on the execution state, the calling thread (currently only hierarchy states) waits for the
execution to continue.
If the execution mode is any of the step modes, a condition variable stops the current execution,
until it gets notified by the step_*() or backward_step() functions.
:param container_state: the container_state, for which the execution mode is handled
:param next_child_state_to_execute: is the next child state of :param state to be executed
:return: the current state machine execution status
"""
self.state_counter_lock.acquire()
self.state_counter += 1
# logger.verbose("Increase state_counter!" + str(self.state_counter))
self.state_counter_lock.release()
woke_up_from_pause_or_step_mode = False
if (self._status.execution_mode is StateMachineExecutionStatus.PAUSED) \
or (self._status.execution_mode is StateMachineExecutionStatus.STEP_MODE):
self._wait_while_in_pause_or_in_step_mode()
# new command was triggered => execution command has to handled
container_state.execution_history.new_execution_command_handled = False
woke_up_from_pause_or_step_mode = True
# no elif here: if the execution woke up from e.g. paused mode, it has to check the current execution mode
if self._status.execution_mode is StateMachineExecutionStatus.STARTED:
# logger.debug("Execution engine started!")
pass
elif self._status.execution_mode is StateMachineExecutionStatus.STOPPED:
logger.debug("Execution engine stopped. State '{0}' is going to quit in the case of "
"no preemption handling has to be done!".format(container_state.name))
elif self._status.execution_mode is StateMachineExecutionStatus.FINISHED:
# this must never happen during execution of the execution engine
raise Exception
else: # all other step modes
logger.verbose("before wait")
self._wait_if_required(container_state, next_child_state_to_execute, woke_up_from_pause_or_step_mode)
logger.verbose("after wait")
# calculate states to which should be run
if self._status.execution_mode is StateMachineExecutionStatus.BACKWARD:
pass
elif self._status.execution_mode is StateMachineExecutionStatus.FORWARD_INTO:
pass
elif self._status.execution_mode is StateMachineExecutionStatus.FORWARD_OVER:
if not container_state.execution_history.new_execution_command_handled:
# the state that called this method is a hierarchy state => thus we save this state and wait until
# thise very state will execute its next state; only then we will wait on the condition variable
self.run_to_states.append(container_state.get_path())
else:
pass
elif self._status.execution_mode is StateMachineExecutionStatus.FORWARD_OUT:
from rafcon.core.states.state import State
if isinstance(container_state.parent, State):
if not container_state.execution_history.new_execution_command_handled:
from rafcon.core.states.library_state import LibraryState
if isinstance(container_state.parent, LibraryState):
parent_path = container_state.parent.parent.get_path()
else:
parent_path = container_state.parent.get_path()
self.run_to_states.append(parent_path)
else:
pass
else:
# if step_out is called from the highest level just run the state machine to the end
self.run_to_states = []
self.set_execution_mode(StateMachineExecutionStatus.STARTED)
elif self._status.execution_mode is StateMachineExecutionStatus.RUN_TO_SELECTED_STATE:
# "run_to_states" were already updated thus doing nothing
pass
container_state.execution_history.new_execution_command_handled = True
# in the case that the stop method wakes up the paused or step mode a StateMachineExecutionStatus.STOPPED
# will be returned
return_value = self._status.execution_mode
return return_value | Checks the current execution status and returns it.
Depending on the execution state, the calling thread (currently only hierarchy states) waits for the
execution to continue.
If the execution mode is any of the step modes, a condition variable stops the current execution,
until it gets notified by the step_*() or backward_step() functions.
:param container_state: the container_state, for which the execution mode is handled
:param next_child_state_to_execute: is the next child state of :param state to be executed
:return: the current state machine execution status | Below is the the instruction that describes the task:
### Input:
Checks the current execution status and returns it.
Depending on the execution state, the calling thread (currently only hierarchy states) waits for the
execution to continue.
If the execution mode is any of the step modes, a condition variable stops the current execution,
until it gets notified by the step_*() or backward_step() functions.
:param container_state: the container_state, for which the execution mode is handled
:param next_child_state_to_execute: is the next child state of :param state to be executed
:return: the current state machine execution status
### Response:
def handle_execution_mode(self, container_state, next_child_state_to_execute=None):
"""Checks the current execution status and returns it.
Depending on the execution state, the calling thread (currently only hierarchy states) waits for the
execution to continue.
If the execution mode is any of the step modes, a condition variable stops the current execution,
until it gets notified by the step_*() or backward_step() functions.
:param container_state: the container_state, for which the execution mode is handled
:param next_child_state_to_execute: is the next child state of :param state to be executed
:return: the current state machine execution status
"""
self.state_counter_lock.acquire()
self.state_counter += 1
# logger.verbose("Increase state_counter!" + str(self.state_counter))
self.state_counter_lock.release()
woke_up_from_pause_or_step_mode = False
if (self._status.execution_mode is StateMachineExecutionStatus.PAUSED) \
or (self._status.execution_mode is StateMachineExecutionStatus.STEP_MODE):
self._wait_while_in_pause_or_in_step_mode()
# new command was triggered => execution command has to handled
container_state.execution_history.new_execution_command_handled = False
woke_up_from_pause_or_step_mode = True
# no elif here: if the execution woke up from e.g. paused mode, it has to check the current execution mode
if self._status.execution_mode is StateMachineExecutionStatus.STARTED:
# logger.debug("Execution engine started!")
pass
elif self._status.execution_mode is StateMachineExecutionStatus.STOPPED:
logger.debug("Execution engine stopped. State '{0}' is going to quit in the case of "
"no preemption handling has to be done!".format(container_state.name))
elif self._status.execution_mode is StateMachineExecutionStatus.FINISHED:
# this must never happen during execution of the execution engine
raise Exception
else: # all other step modes
logger.verbose("before wait")
self._wait_if_required(container_state, next_child_state_to_execute, woke_up_from_pause_or_step_mode)
logger.verbose("after wait")
# calculate states to which should be run
if self._status.execution_mode is StateMachineExecutionStatus.BACKWARD:
pass
elif self._status.execution_mode is StateMachineExecutionStatus.FORWARD_INTO:
pass
elif self._status.execution_mode is StateMachineExecutionStatus.FORWARD_OVER:
if not container_state.execution_history.new_execution_command_handled:
# the state that called this method is a hierarchy state => thus we save this state and wait until
# thise very state will execute its next state; only then we will wait on the condition variable
self.run_to_states.append(container_state.get_path())
else:
pass
elif self._status.execution_mode is StateMachineExecutionStatus.FORWARD_OUT:
from rafcon.core.states.state import State
if isinstance(container_state.parent, State):
if not container_state.execution_history.new_execution_command_handled:
from rafcon.core.states.library_state import LibraryState
if isinstance(container_state.parent, LibraryState):
parent_path = container_state.parent.parent.get_path()
else:
parent_path = container_state.parent.get_path()
self.run_to_states.append(parent_path)
else:
pass
else:
# if step_out is called from the highest level just run the state machine to the end
self.run_to_states = []
self.set_execution_mode(StateMachineExecutionStatus.STARTED)
elif self._status.execution_mode is StateMachineExecutionStatus.RUN_TO_SELECTED_STATE:
# "run_to_states" were already updated thus doing nothing
pass
container_state.execution_history.new_execution_command_handled = True
# in the case that the stop method wakes up the paused or step mode a StateMachineExecutionStatus.STOPPED
# will be returned
return_value = self._status.execution_mode
return return_value |
def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False,
variables=None):
r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]])
"""
if Ne > 9:
comma = ","
name = r"\rho"
open_brace = "_{"
close_brace = "}"
else:
comma = ""
name = "rho"
open_brace = ""
close_brace = ""
rho = []
for i in range(Ne):
row_rho = []
for j in range(Ne):
if i == j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables,
positive=True)]
elif i > j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
else:
if explicitly_hermitian:
row_rho += [conjugate(define_symbol(name, open_brace,
comma, j, i,
close_brace,
variables))]
else:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
rho += [row_rho]
if normalized:
rho11 = 1-sum([rho[i][i] for i in range(1, Ne)])
rho[0][0] = rho11
rho = Matrix(rho)
return rho | r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]]) | Below is the the instruction that describes the task:
### Input:
r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]])
### Response:
def define_density_matrix(Ne, explicitly_hermitian=False, normalized=False,
variables=None):
r"""Return a symbolic density matrix.
The arguments are
Ne (integer):
The number of atomic states.
explicitly_hermitian (boolean):
Whether to make $\rho_{ij}=\bar{\rho}_{ij}$ for $i<j$
normalized (boolean):
Whether to make $\rho_{11}=1-\sum_{i>1} \rho_{ii}$
A very simple example:
>>> define_density_matrix(2)
Matrix([
[rho11, rho12],
[rho21, rho22]])
The density matrix can be made explicitly hermitian
>>> define_density_matrix(2, explicitly_hermitian=True)
Matrix([
[rho11, conjugate(rho21)],
[rho21, rho22]])
or normalized
>>> define_density_matrix(2, normalized=True)
Matrix([
[-rho22 + 1, rho12],
[ rho21, rho22]])
or it can be made an explicit function of given variables
>>> from sympy import symbols
>>> t, z = symbols("t, z", positive=True)
>>> define_density_matrix(2, variables=[t, z])
Matrix([
[rho11(t, z), rho12(t, z)],
[rho21(t, z), rho22(t, z)]])
"""
if Ne > 9:
comma = ","
name = r"\rho"
open_brace = "_{"
close_brace = "}"
else:
comma = ""
name = "rho"
open_brace = ""
close_brace = ""
rho = []
for i in range(Ne):
row_rho = []
for j in range(Ne):
if i == j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables,
positive=True)]
elif i > j:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
else:
if explicitly_hermitian:
row_rho += [conjugate(define_symbol(name, open_brace,
comma, j, i,
close_brace,
variables))]
else:
row_rho += [define_symbol(name, open_brace, comma, i, j,
close_brace, variables)]
rho += [row_rho]
if normalized:
rho11 = 1-sum([rho[i][i] for i in range(1, Ne)])
rho[0][0] = rho11
rho = Matrix(rho)
return rho |
def remove_annotations(self, remove_sequence):
"""
Removes several annotations from this AST.
:param remove_sequence: a sequence/set of the annotations to remove
:returns: a new AST, with the annotations removed
"""
return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa not in remove_sequence)) | Removes several annotations from this AST.
:param remove_sequence: a sequence/set of the annotations to remove
:returns: a new AST, with the annotations removed | Below is the the instruction that describes the task:
### Input:
Removes several annotations from this AST.
:param remove_sequence: a sequence/set of the annotations to remove
:returns: a new AST, with the annotations removed
### Response:
def remove_annotations(self, remove_sequence):
"""
Removes several annotations from this AST.
:param remove_sequence: a sequence/set of the annotations to remove
:returns: a new AST, with the annotations removed
"""
return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa not in remove_sequence)) |
def UpdateResourcesFromDict(dstpath, res, types=None, names=None,
languages=None):
"""
Update or add resources from resource dict in dll/exe file dstpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
"""
if types: types = set(types)
if names: names = set(names)
if langauges: languages = set(languages)
for type_ in res:
if not types or type_ in types:
for name in res[type_]:
if not names or name in names:
for language in res[type_][name]:
if not languages or language in languages:
UpdateResources(dstpath,
res[type_][name][language],
[type_], [name], [language]) | Update or add resources from resource dict in dll/exe file dstpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all) | Below is the the instruction that describes the task:
### Input:
Update or add resources from resource dict in dll/exe file dstpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
### Response:
def UpdateResourcesFromDict(dstpath, res, types=None, names=None,
languages=None):
"""
Update or add resources from resource dict in dll/exe file dstpath.
types = a list of resource types to update (None = all)
names = a list of resource names to update (None = all)
languages = a list of resource languages to update (None = all)
"""
if types: types = set(types)
if names: names = set(names)
if langauges: languages = set(languages)
for type_ in res:
if not types or type_ in types:
for name in res[type_]:
if not names or name in names:
for language in res[type_][name]:
if not languages or language in languages:
UpdateResources(dstpath,
res[type_][name][language],
[type_], [name], [language]) |
def pixbuf_to_cairo_png(pixbuf):
"""Convert from PixBuf to ImageSurface, by going through the PNG format.
This method is 10~30x slower than GDK but always works.
"""
buffer_pointer = ffi.new('gchar **')
buffer_size = ffi.new('gsize *')
error = ffi.new('GError **')
handle_g_error(error, pixbuf.save_to_buffer(
buffer_pointer, buffer_size, ffi.new('char[]', b'png'), error,
ffi.new('char[]', b'compression'), ffi.new('char[]', b'0'),
ffi.NULL))
png_bytes = ffi.buffer(buffer_pointer[0], buffer_size[0])
return ImageSurface.create_from_png(BytesIO(png_bytes)) | Convert from PixBuf to ImageSurface, by going through the PNG format.
This method is 10~30x slower than GDK but always works. | Below is the the instruction that describes the task:
### Input:
Convert from PixBuf to ImageSurface, by going through the PNG format.
This method is 10~30x slower than GDK but always works.
### Response:
def pixbuf_to_cairo_png(pixbuf):
"""Convert from PixBuf to ImageSurface, by going through the PNG format.
This method is 10~30x slower than GDK but always works.
"""
buffer_pointer = ffi.new('gchar **')
buffer_size = ffi.new('gsize *')
error = ffi.new('GError **')
handle_g_error(error, pixbuf.save_to_buffer(
buffer_pointer, buffer_size, ffi.new('char[]', b'png'), error,
ffi.new('char[]', b'compression'), ffi.new('char[]', b'0'),
ffi.NULL))
png_bytes = ffi.buffer(buffer_pointer[0], buffer_size[0])
return ImageSurface.create_from_png(BytesIO(png_bytes)) |
def select(self,sql):
"""
Execute an SQL select statement and stuff the results into a
dictionary.
sql = the (case sensitve) SQL statment to execute
"""
if not self.curs:
raise LIGOLwDBError, "Database connection not initalized"
if len(self.table) != 0:
raise LIGOLwDBError, 'attempt to fill non-empty table from database'
ligolw = ''
self.table = {}
sqltypes = {
-2 : 'ilwd:char_u',
1 : 'lstring',
3 : 'real_8',
4 : 'int_4s',
5 : 'int_2s',
7 : 'real_4',
8 : 'real_8',
12 : 'lstring',
93 : 'lstring',
}
try:
tab = re.compile(r'[Ff][Rr][Oo][Mm]\s+([A-Za-z0-0_]+)([,\s]+|$)').search(sql).group(1)
except AttributeError:
raise LIGOLwDBError, 'could not find table name in query ' + str(sql)
self.table[tab] = {
'pos' : 0,
'column' : {},
'stream' : (),
'query' : sql
}
try:
self.curs.execute(sql)
except DB2.Error, e:
raise LIGOLwDBError, e[2]
desc = self.curs.description
for col,typ,disp,intsz,prec,sca,nul in desc:
try:
self.table[tab]['column'][col] = sqltypes[typ]
except KeyError:
raise LIGOLwDBError, 'unknown type returned by database ' + str(typ)
self.table[tab].setdefault('orderedcol',[]).append(col)
try:
self.table[tab]['stream'] = self.curs.fetchall()
except DB2.Error, e:
raise LIGOLwDBError, e[2]
return len(self.table[tab]['stream']) | Execute an SQL select statement and stuff the results into a
dictionary.
sql = the (case sensitve) SQL statment to execute | Below is the the instruction that describes the task:
### Input:
Execute an SQL select statement and stuff the results into a
dictionary.
sql = the (case sensitve) SQL statment to execute
### Response:
def select(self,sql):
"""
Execute an SQL select statement and stuff the results into a
dictionary.
sql = the (case sensitve) SQL statment to execute
"""
if not self.curs:
raise LIGOLwDBError, "Database connection not initalized"
if len(self.table) != 0:
raise LIGOLwDBError, 'attempt to fill non-empty table from database'
ligolw = ''
self.table = {}
sqltypes = {
-2 : 'ilwd:char_u',
1 : 'lstring',
3 : 'real_8',
4 : 'int_4s',
5 : 'int_2s',
7 : 'real_4',
8 : 'real_8',
12 : 'lstring',
93 : 'lstring',
}
try:
tab = re.compile(r'[Ff][Rr][Oo][Mm]\s+([A-Za-z0-0_]+)([,\s]+|$)').search(sql).group(1)
except AttributeError:
raise LIGOLwDBError, 'could not find table name in query ' + str(sql)
self.table[tab] = {
'pos' : 0,
'column' : {},
'stream' : (),
'query' : sql
}
try:
self.curs.execute(sql)
except DB2.Error, e:
raise LIGOLwDBError, e[2]
desc = self.curs.description
for col,typ,disp,intsz,prec,sca,nul in desc:
try:
self.table[tab]['column'][col] = sqltypes[typ]
except KeyError:
raise LIGOLwDBError, 'unknown type returned by database ' + str(typ)
self.table[tab].setdefault('orderedcol',[]).append(col)
try:
self.table[tab]['stream'] = self.curs.fetchall()
except DB2.Error, e:
raise LIGOLwDBError, e[2]
return len(self.table[tab]['stream']) |
def _process_redirect(self):
'''Update the Redirect Tracker.'''
_logger.debug('Handling redirect.')
if self._redirect_tracker.exceeded():
raise ProtocolError('Too many redirects.')
try:
url = self._redirect_tracker.next_location()
if not url:
raise ProtocolError('Redirect location missing.')
if self._redirect_tracker.is_repeat():
_logger.debug('Got redirect is repeat.')
request = self._original_request.copy()
request.url = url
else:
request = self._request_factory(url)
request.prepare_for_send()
except ValueError as error:
raise ProtocolError('Invalid redirect location.') from error
self._next_request = request
_logger.debug('Updated next redirect request to {0}.'.format(request)) | Update the Redirect Tracker. | Below is the the instruction that describes the task:
### Input:
Update the Redirect Tracker.
### Response:
def _process_redirect(self):
'''Update the Redirect Tracker.'''
_logger.debug('Handling redirect.')
if self._redirect_tracker.exceeded():
raise ProtocolError('Too many redirects.')
try:
url = self._redirect_tracker.next_location()
if not url:
raise ProtocolError('Redirect location missing.')
if self._redirect_tracker.is_repeat():
_logger.debug('Got redirect is repeat.')
request = self._original_request.copy()
request.url = url
else:
request = self._request_factory(url)
request.prepare_for_send()
except ValueError as error:
raise ProtocolError('Invalid redirect location.') from error
self._next_request = request
_logger.debug('Updated next redirect request to {0}.'.format(request)) |
def hmset(self, name, mapping):
"""
Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future()
"""
with self.pipe as pipe:
m_encode = self.memberparse.encode
mapping = {m_encode(k): self._value_encode(k, v)
for k, v in mapping.items()}
return pipe.hmset(self.redis_key(name), mapping) | Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future() | Below is the the instruction that describes the task:
### Input:
Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future()
### Response:
def hmset(self, name, mapping):
"""
Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future()
"""
with self.pipe as pipe:
m_encode = self.memberparse.encode
mapping = {m_encode(k): self._value_encode(k, v)
for k, v in mapping.items()}
return pipe.hmset(self.redis_key(name), mapping) |
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A) | Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------ | Below is the the instruction that describes the task:
### Input:
Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
### Response:
def _delta_sigma(**cosmo):
""" Perturb best-fit constant of proportionality Ascaling for
rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)
Parameters
----------
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float
The perturbed 'A' relation between rho_2 and rho_crit for the cosmology
Raises
------
"""
M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)
perturbed_A = (0.796/cosmo['sigma_8']) * \
(M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)
return(perturbed_A) |
def _rewrite_tag_attrs(self, tag, tag_attrs, set_parsing_context=True):
"""Rewrite a tags attributes.
If set_parsing_context is false then the parsing context will not set.
If the head insert has not been added to the HTML being rewritten, there
is no parsing context and the tag is not in BEFORE_HEAD_TAGS then the
head_insert will be "inserted" and set to None
:param str tag: The name of the tag to be rewritten
:param list[tuple[str, str]] tag_attrs: A list of tuples representing
the tags attributes
:param bool set_parsing_context: Boolean indicating if the parsing
context should be set
:return: True
:rtype: bool
"""
# special case: head insertion, before-head tags
if (self.head_insert and
not self._wb_parse_context
and (tag not in self.BEFORE_HEAD_TAGS)):
self.out.write(self.head_insert)
self.head_insert = None
if set_parsing_context:
self._set_parse_context(tag, tag_attrs)
# attr rewriting
handler = self.rewrite_tags.get(tag)
if not handler:
handler = {}
self.out.write('<' + tag)
for attr_name, attr_value in tag_attrs:
empty_attr = False
if attr_value is None:
attr_value = ''
empty_attr = True
# special case: inline JS/event handler
if ((attr_value and attr_value.startswith('javascript:'))
or attr_name.startswith('on') and attr_name[2:3] != '-'):
attr_value = self._rewrite_script(attr_value, True)
# special case: inline CSS/style attribute
elif attr_name == 'style':
attr_value = self._rewrite_css(attr_value)
# special case: deprecated background attribute
elif attr_name == 'background':
rw_mod = 'im_'
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: srcset list
elif attr_name == 'srcset':
rw_mod = handler.get(attr_name, '')
attr_value = self._rewrite_srcset(attr_value, rw_mod)
# special case: disable crossorigin and integrity attr
# as they may interfere with rewriting semantics
elif attr_name in ('crossorigin', 'integrity'):
attr_name = '_' + attr_name
# special case: if rewrite_canon not set,
# don't rewrite rel=canonical
elif tag == 'link' and attr_name == 'href':
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_link_href(attr_value, tag_attrs, rw_mod)
# special case: meta tag
elif (tag == 'meta') and (attr_name == 'content'):
if self.has_attr(tag_attrs, ('http-equiv', 'refresh')):
attr_value = self._rewrite_meta_refresh(attr_value)
elif self.has_attr(tag_attrs, ('http-equiv', 'content-security-policy')):
attr_name = '_' + attr_name
elif self.has_attr(tag_attrs, ('name', 'referrer')):
attr_value = 'no-referrer-when-downgrade'
elif attr_value.startswith(self.DATA_RW_PROTOCOLS):
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: param value, conditional rewrite
elif (tag == 'param'):
if attr_value.startswith(self.DATA_RW_PROTOCOLS):
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: data- attrs, conditional rewrite
elif attr_name and attr_value and attr_name.startswith('data-'):
if attr_value.startswith(self.DATA_RW_PROTOCOLS):
rw_mod = 'oe_'
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: base tag
elif (tag == 'base') and (attr_name == 'href') and attr_value:
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_base(attr_value, rw_mod)
elif attr_name == 'href':
rw_mod = self.defmod
attr_value = self._rewrite_url(attr_value, rw_mod)
elif tag == 'script' and attr_name == 'src':
rw_mod = handler.get(attr_name)
ov = attr_value
attr_value = self._rewrite_url(attr_value, rw_mod)
if attr_value == ov and not ov.startswith(self.url_rewriter.NO_REWRITE_URI_PREFIX):
# URL not skipped, likely src='js/....', forcing abs to make sure, cause PHP MIME(JS) === HTML
attr_value = self._rewrite_url(attr_value, rw_mod, True)
self._write_attr('__wb_orig_src', ov, empty_attr=None)
else:
# rewrite url using tag handler
rw_mod = handler.get(attr_name)
if rw_mod is not None:
attr_value = self._rewrite_url(attr_value, rw_mod)
# write the attr!
self._write_attr(attr_name, attr_value, empty_attr)
return True | Rewrite a tags attributes.
If set_parsing_context is false then the parsing context will not set.
If the head insert has not been added to the HTML being rewritten, there
is no parsing context and the tag is not in BEFORE_HEAD_TAGS then the
head_insert will be "inserted" and set to None
:param str tag: The name of the tag to be rewritten
:param list[tuple[str, str]] tag_attrs: A list of tuples representing
the tags attributes
:param bool set_parsing_context: Boolean indicating if the parsing
context should be set
:return: True
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Rewrite a tags attributes.
If set_parsing_context is false then the parsing context will not set.
If the head insert has not been added to the HTML being rewritten, there
is no parsing context and the tag is not in BEFORE_HEAD_TAGS then the
head_insert will be "inserted" and set to None
:param str tag: The name of the tag to be rewritten
:param list[tuple[str, str]] tag_attrs: A list of tuples representing
the tags attributes
:param bool set_parsing_context: Boolean indicating if the parsing
context should be set
:return: True
:rtype: bool
### Response:
def _rewrite_tag_attrs(self, tag, tag_attrs, set_parsing_context=True):
"""Rewrite a tags attributes.
If set_parsing_context is false then the parsing context will not set.
If the head insert has not been added to the HTML being rewritten, there
is no parsing context and the tag is not in BEFORE_HEAD_TAGS then the
head_insert will be "inserted" and set to None
:param str tag: The name of the tag to be rewritten
:param list[tuple[str, str]] tag_attrs: A list of tuples representing
the tags attributes
:param bool set_parsing_context: Boolean indicating if the parsing
context should be set
:return: True
:rtype: bool
"""
# special case: head insertion, before-head tags
if (self.head_insert and
not self._wb_parse_context
and (tag not in self.BEFORE_HEAD_TAGS)):
self.out.write(self.head_insert)
self.head_insert = None
if set_parsing_context:
self._set_parse_context(tag, tag_attrs)
# attr rewriting
handler = self.rewrite_tags.get(tag)
if not handler:
handler = {}
self.out.write('<' + tag)
for attr_name, attr_value in tag_attrs:
empty_attr = False
if attr_value is None:
attr_value = ''
empty_attr = True
# special case: inline JS/event handler
if ((attr_value and attr_value.startswith('javascript:'))
or attr_name.startswith('on') and attr_name[2:3] != '-'):
attr_value = self._rewrite_script(attr_value, True)
# special case: inline CSS/style attribute
elif attr_name == 'style':
attr_value = self._rewrite_css(attr_value)
# special case: deprecated background attribute
elif attr_name == 'background':
rw_mod = 'im_'
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: srcset list
elif attr_name == 'srcset':
rw_mod = handler.get(attr_name, '')
attr_value = self._rewrite_srcset(attr_value, rw_mod)
# special case: disable crossorigin and integrity attr
# as they may interfere with rewriting semantics
elif attr_name in ('crossorigin', 'integrity'):
attr_name = '_' + attr_name
# special case: if rewrite_canon not set,
# don't rewrite rel=canonical
elif tag == 'link' and attr_name == 'href':
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_link_href(attr_value, tag_attrs, rw_mod)
# special case: meta tag
elif (tag == 'meta') and (attr_name == 'content'):
if self.has_attr(tag_attrs, ('http-equiv', 'refresh')):
attr_value = self._rewrite_meta_refresh(attr_value)
elif self.has_attr(tag_attrs, ('http-equiv', 'content-security-policy')):
attr_name = '_' + attr_name
elif self.has_attr(tag_attrs, ('name', 'referrer')):
attr_value = 'no-referrer-when-downgrade'
elif attr_value.startswith(self.DATA_RW_PROTOCOLS):
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: param value, conditional rewrite
elif (tag == 'param'):
if attr_value.startswith(self.DATA_RW_PROTOCOLS):
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: data- attrs, conditional rewrite
elif attr_name and attr_value and attr_name.startswith('data-'):
if attr_value.startswith(self.DATA_RW_PROTOCOLS):
rw_mod = 'oe_'
attr_value = self._rewrite_url(attr_value, rw_mod)
# special case: base tag
elif (tag == 'base') and (attr_name == 'href') and attr_value:
rw_mod = handler.get(attr_name)
attr_value = self._rewrite_base(attr_value, rw_mod)
elif attr_name == 'href':
rw_mod = self.defmod
attr_value = self._rewrite_url(attr_value, rw_mod)
elif tag == 'script' and attr_name == 'src':
rw_mod = handler.get(attr_name)
ov = attr_value
attr_value = self._rewrite_url(attr_value, rw_mod)
if attr_value == ov and not ov.startswith(self.url_rewriter.NO_REWRITE_URI_PREFIX):
# URL not skipped, likely src='js/....', forcing abs to make sure, cause PHP MIME(JS) === HTML
attr_value = self._rewrite_url(attr_value, rw_mod, True)
self._write_attr('__wb_orig_src', ov, empty_attr=None)
else:
# rewrite url using tag handler
rw_mod = handler.get(attr_name)
if rw_mod is not None:
attr_value = self._rewrite_url(attr_value, rw_mod)
# write the attr!
self._write_attr(attr_name, attr_value, empty_attr)
return True |
def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False,
manager=None, flow_class=PhononFlow, allocate=True):
"""
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
"""
logger.critical("phonon_flow is deprecated and could give wrong results")
if with_dde:
with_ddk = True
natom = len(scf_input.structure)
# Create the container that will manage the different works.
flow = flow_class(workdir, manager=manager)
# Register the first work (GS calculation)
# register_task creates a work for the task, registers it to the flow and returns the work
# the 0the element of the work is the task
scf_task = flow.register_task(scf_input, task_class=ScfTask)[0]
# Build a temporary work with a shell manager just to run
# ABINIT to get the list of irreducible pertubations for this q-point.
shell_manager = flow.manager.to_shell_manager(mpi_procs=1)
if with_ddk:
logger.info('add ddk')
# TODO
# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!)
ddk_input = ph_inputs[0].deepcopy()
ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1])
ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0]
if with_dde:
logger.info('add dde')
dde_input = ph_inputs[0].deepcopy()
dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2)
dde_input_idir = dde_input.deepcopy()
dde_input_idir.set_vars(rfdir=[1, 1, 1])
dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0]
if not isinstance(ph_inputs, (list, tuple)):
ph_inputs = [ph_inputs]
for i, ph_input in enumerate(ph_inputs):
fake_input = ph_input.deepcopy()
# Run abinit on the front-end to get the list of irreducible pertubations.
tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__")
w = PhononWork(workdir=tmp_dir, manager=shell_manager)
fake_task = w.register(fake_input)
# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point.
abivars = dict(
paral_rf=-1,
rfatpol=[1, natom], # Set of atoms to displace.
rfdir=[1, 1, 1], # Along this set of reduced coordinate axis.
)
fake_task.set_vars(abivars)
w.allocate()
w.start(wait=True)
# Parse the file to get the perturbations.
try:
irred_perts = yaml_read_irred_perts(fake_task.log_file.path)
except:
print("Error in %s" % fake_task.log_file.path)
raise
logger.info(irred_perts)
w.rmtree()
# Now we can build the final list of works:
# One work per q-point, each work computes all
# the irreducible perturbations for a singe q-point.
work_qpt = PhononWork()
if with_nscf:
# MG: Warning this code assume 0 is Gamma!
nscf_input = copy.deepcopy(scf_input)
nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1)
nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
deps = {nscf_task: "WFQ", scf_task: "WFK"}
else:
deps = {scf_task: "WFK"}
if with_ddk:
deps[ddk_task] = 'DDK'
logger.info(irred_perts[0]['qpt'])
for irred_pert in irred_perts:
#print(irred_pert)
new_input = ph_input.deepcopy()
#rfatpol 1 1 # Only the first atom is displaced
#rfdir 1 0 0 # Along the first reduced coordinate axis
qpt = irred_pert["qpt"]
idir = irred_pert["idir"]
ipert = irred_pert["ipert"]
# TODO this will work for phonons, but not for the other types of perturbations.
rfdir = 3 * [0]
rfdir[idir -1] = 1
rfatpol = [ipert, ipert]
new_input.set_vars(
#rfpert=1,
qpt=qpt,
rfdir=rfdir,
rfatpol=rfatpol,
)
if with_ddk:
new_input.set_vars(rfelfd=3)
work_qpt.register_phonon_task(new_input, deps=deps)
flow.register_work(work_qpt)
if allocate: flow.allocate()
return flow | Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object | Below is the the instruction that describes the task:
### Input:
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
### Response:
def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False,
manager=None, flow_class=PhononFlow, allocate=True):
"""
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
"""
logger.critical("phonon_flow is deprecated and could give wrong results")
if with_dde:
with_ddk = True
natom = len(scf_input.structure)
# Create the container that will manage the different works.
flow = flow_class(workdir, manager=manager)
# Register the first work (GS calculation)
# register_task creates a work for the task, registers it to the flow and returns the work
# the 0the element of the work is the task
scf_task = flow.register_task(scf_input, task_class=ScfTask)[0]
# Build a temporary work with a shell manager just to run
# ABINIT to get the list of irreducible pertubations for this q-point.
shell_manager = flow.manager.to_shell_manager(mpi_procs=1)
if with_ddk:
logger.info('add ddk')
# TODO
# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!)
ddk_input = ph_inputs[0].deepcopy()
ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1])
ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0]
if with_dde:
logger.info('add dde')
dde_input = ph_inputs[0].deepcopy()
dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2)
dde_input_idir = dde_input.deepcopy()
dde_input_idir.set_vars(rfdir=[1, 1, 1])
dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0]
if not isinstance(ph_inputs, (list, tuple)):
ph_inputs = [ph_inputs]
for i, ph_input in enumerate(ph_inputs):
fake_input = ph_input.deepcopy()
# Run abinit on the front-end to get the list of irreducible pertubations.
tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__")
w = PhononWork(workdir=tmp_dir, manager=shell_manager)
fake_task = w.register(fake_input)
# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point.
abivars = dict(
paral_rf=-1,
rfatpol=[1, natom], # Set of atoms to displace.
rfdir=[1, 1, 1], # Along this set of reduced coordinate axis.
)
fake_task.set_vars(abivars)
w.allocate()
w.start(wait=True)
# Parse the file to get the perturbations.
try:
irred_perts = yaml_read_irred_perts(fake_task.log_file.path)
except:
print("Error in %s" % fake_task.log_file.path)
raise
logger.info(irred_perts)
w.rmtree()
# Now we can build the final list of works:
# One work per q-point, each work computes all
# the irreducible perturbations for a singe q-point.
work_qpt = PhononWork()
if with_nscf:
# MG: Warning this code assume 0 is Gamma!
nscf_input = copy.deepcopy(scf_input)
nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1)
nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
deps = {nscf_task: "WFQ", scf_task: "WFK"}
else:
deps = {scf_task: "WFK"}
if with_ddk:
deps[ddk_task] = 'DDK'
logger.info(irred_perts[0]['qpt'])
for irred_pert in irred_perts:
#print(irred_pert)
new_input = ph_input.deepcopy()
#rfatpol 1 1 # Only the first atom is displaced
#rfdir 1 0 0 # Along the first reduced coordinate axis
qpt = irred_pert["qpt"]
idir = irred_pert["idir"]
ipert = irred_pert["ipert"]
# TODO this will work for phonons, but not for the other types of perturbations.
rfdir = 3 * [0]
rfdir[idir -1] = 1
rfatpol = [ipert, ipert]
new_input.set_vars(
#rfpert=1,
qpt=qpt,
rfdir=rfdir,
rfatpol=rfatpol,
)
if with_ddk:
new_input.set_vars(rfelfd=3)
work_qpt.register_phonon_task(new_input, deps=deps)
flow.register_work(work_qpt)
if allocate: flow.allocate()
return flow |
def resetToDefault(self, resetChildren=True):
""" Resets the data to the default data. By default the children will be reset as well
"""
self.data = self.defaultData
if resetChildren:
for child in self.childItems:
child.resetToDefault(resetChildren=True) | Resets the data to the default data. By default the children will be reset as well | Below is the the instruction that describes the task:
### Input:
Resets the data to the default data. By default the children will be reset as well
### Response:
def resetToDefault(self, resetChildren=True):
""" Resets the data to the default data. By default the children will be reset as well
"""
self.data = self.defaultData
if resetChildren:
for child in self.childItems:
child.resetToDefault(resetChildren=True) |
def _getDirection(coord1, coord2):
"""
Return the direction the line formed by the (x, y)
points in `coord1` and `coord2`.
"""
x1, y1 = coord1
x2, y2 = coord2
if x1 == x2 and y1 == y2:
return None # two coordinates are the same.
elif x1 == x2 and y1 > y2:
return UP
elif x1 == x2 and y1 < y2:
return DOWN
elif x1 > x2 and y1 == y2:
return LEFT
elif x1 < x2 and y1 == y2:
return RIGHT
slope = float(y2 - y1) / float(x2 - x1)
# Figure out which quadrant the line is going in, and then
# determine the closest direction by calculating the slope
if x2 > x1 and y2 < y1: # up right quadrant
if slope > -0.4142:
return RIGHT # slope is between 0 and 22.5 degrees
elif slope < -2.4142:
return UP # slope is between 67.5 and 90 degrees
else:
return UPRIGHT # slope is between 22.5 and 67.5 degrees
elif x2 > x1 and y2 > y1: # down right quadrant
if slope > 2.4142:
return DOWN
elif slope < 0.4142:
return RIGHT
else:
return DOWNRIGHT
elif x2 < x1 and y2 < y1: # up left quadrant
if slope < 0.4142:
return LEFT
elif slope > 2.4142:
return UP
else:
return UPLEFT
elif x2 < x1 and y2 > y1: # down left quadrant
if slope < -2.4142:
return DOWN
elif slope > -0.4142:
return LEFT
else:
return DOWNLEFT | Return the direction the line formed by the (x, y)
points in `coord1` and `coord2`. | Below is the the instruction that describes the task:
### Input:
Return the direction the line formed by the (x, y)
points in `coord1` and `coord2`.
### Response:
def _getDirection(coord1, coord2):
"""
Return the direction the line formed by the (x, y)
points in `coord1` and `coord2`.
"""
x1, y1 = coord1
x2, y2 = coord2
if x1 == x2 and y1 == y2:
return None # two coordinates are the same.
elif x1 == x2 and y1 > y2:
return UP
elif x1 == x2 and y1 < y2:
return DOWN
elif x1 > x2 and y1 == y2:
return LEFT
elif x1 < x2 and y1 == y2:
return RIGHT
slope = float(y2 - y1) / float(x2 - x1)
# Figure out which quadrant the line is going in, and then
# determine the closest direction by calculating the slope
if x2 > x1 and y2 < y1: # up right quadrant
if slope > -0.4142:
return RIGHT # slope is between 0 and 22.5 degrees
elif slope < -2.4142:
return UP # slope is between 67.5 and 90 degrees
else:
return UPRIGHT # slope is between 22.5 and 67.5 degrees
elif x2 > x1 and y2 > y1: # down right quadrant
if slope > 2.4142:
return DOWN
elif slope < 0.4142:
return RIGHT
else:
return DOWNRIGHT
elif x2 < x1 and y2 < y1: # up left quadrant
if slope < 0.4142:
return LEFT
elif slope > 2.4142:
return UP
else:
return UPLEFT
elif x2 < x1 and y2 > y1: # down left quadrant
if slope < -2.4142:
return DOWN
elif slope > -0.4142:
return LEFT
else:
return DOWNLEFT |
def indicator_constraints(self,x):
"""
Returns array of ones and zeros indicating if x is within the constraints
"""
x = np.atleast_2d(x)
I_x = np.ones((x.shape[0],1))
if self.constraints is not None:
for d in self.constraints:
try:
exec('constraint = lambda x:' + d['constraint'], globals())
ind_x = (constraint(x) <= 0) * 1
I_x *= ind_x.reshape(x.shape[0],1)
except:
print('Fail to compile the constraint: ' + str(d))
raise
return I_x | Returns array of ones and zeros indicating if x is within the constraints | Below is the the instruction that describes the task:
### Input:
Returns array of ones and zeros indicating if x is within the constraints
### Response:
def indicator_constraints(self,x):
"""
Returns array of ones and zeros indicating if x is within the constraints
"""
x = np.atleast_2d(x)
I_x = np.ones((x.shape[0],1))
if self.constraints is not None:
for d in self.constraints:
try:
exec('constraint = lambda x:' + d['constraint'], globals())
ind_x = (constraint(x) <= 0) * 1
I_x *= ind_x.reshape(x.shape[0],1)
except:
print('Fail to compile the constraint: ' + str(d))
raise
return I_x |
def touchstone_options(obj):
r"""
Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
"""
if (not isinstance(obj, dict)) or (
isinstance(obj, dict)
and (sorted(obj.keys()) != sorted(["units", "ptype", "pformat", "z0"]))
):
raise ValueError(pexdoc.pcontracts.get_exdesc())
if not (
(obj["units"].lower() in ["ghz", "mhz", "khz", "hz"])
and (obj["ptype"].lower() in ["s", "y", "z", "h", "g"])
and (obj["pformat"].lower() in ["db", "ma", "ri"])
and isinstance(obj["z0"], float)
and (obj["z0"] >= 0)
):
raise ValueError(pexdoc.pcontracts.get_exdesc()) | r"""
Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None | Below is the the instruction that describes the task:
### Input:
r"""
Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
### Response:
def touchstone_options(obj):
r"""
Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
"""
if (not isinstance(obj, dict)) or (
isinstance(obj, dict)
and (sorted(obj.keys()) != sorted(["units", "ptype", "pformat", "z0"]))
):
raise ValueError(pexdoc.pcontracts.get_exdesc())
if not (
(obj["units"].lower() in ["ghz", "mhz", "khz", "hz"])
and (obj["ptype"].lower() in ["s", "y", "z", "h", "g"])
and (obj["pformat"].lower() in ["db", "ma", "ri"])
and isinstance(obj["z0"], float)
and (obj["z0"] >= 0)
):
raise ValueError(pexdoc.pcontracts.get_exdesc()) |
def get_mathitemsinsertion(cls, indent) -> str:
"""Return a string defining a model specific XML type extending
`ItemType`.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS
<complexType name="arma_v1_mathitemType">
<complexContent>
<extension base="hpcb:setitemType">
<choice>
<element name="control.responses"/>
...
<element name="logs.logout"/>
</choice>
</extension>
</complexContent>
</complexType>
<BLANKLINE>
<complexType name="dam_v001_mathitemType">
...
"""
blanks = ' ' * (indent*4)
subs = []
for modelname in cls.get_modelnames():
model = importtools.prepare_model(modelname)
subs.extend([
f'{blanks}<complexType name="{modelname}_mathitemType">',
f'{blanks} <complexContent>',
f'{blanks} <extension base="hpcb:setitemType">',
f'{blanks} <choice>'])
for subvars in cls._get_subvars(model):
for var in subvars:
subs.append(
f'{blanks} '
f'<element name="{subvars.name}.{var.name}"/>')
subs.extend([
f'{blanks} </choice>',
f'{blanks} </extension>',
f'{blanks} </complexContent>',
f'{blanks}</complexType>',
f''])
return '\n'.join(subs) | Return a string defining a model specific XML type extending
`ItemType`.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS
<complexType name="arma_v1_mathitemType">
<complexContent>
<extension base="hpcb:setitemType">
<choice>
<element name="control.responses"/>
...
<element name="logs.logout"/>
</choice>
</extension>
</complexContent>
</complexType>
<BLANKLINE>
<complexType name="dam_v001_mathitemType">
... | Below is the the instruction that describes the task:
### Input:
Return a string defining a model specific XML type extending
`ItemType`.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS
<complexType name="arma_v1_mathitemType">
<complexContent>
<extension base="hpcb:setitemType">
<choice>
<element name="control.responses"/>
...
<element name="logs.logout"/>
</choice>
</extension>
</complexContent>
</complexType>
<BLANKLINE>
<complexType name="dam_v001_mathitemType">
...
### Response:
def get_mathitemsinsertion(cls, indent) -> str:
"""Return a string defining a model specific XML type extending
`ItemType`.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_mathitemsinsertion(1)) # doctest: +ELLIPSIS
<complexType name="arma_v1_mathitemType">
<complexContent>
<extension base="hpcb:setitemType">
<choice>
<element name="control.responses"/>
...
<element name="logs.logout"/>
</choice>
</extension>
</complexContent>
</complexType>
<BLANKLINE>
<complexType name="dam_v001_mathitemType">
...
"""
blanks = ' ' * (indent*4)
subs = []
for modelname in cls.get_modelnames():
model = importtools.prepare_model(modelname)
subs.extend([
f'{blanks}<complexType name="{modelname}_mathitemType">',
f'{blanks} <complexContent>',
f'{blanks} <extension base="hpcb:setitemType">',
f'{blanks} <choice>'])
for subvars in cls._get_subvars(model):
for var in subvars:
subs.append(
f'{blanks} '
f'<element name="{subvars.name}.{var.name}"/>')
subs.extend([
f'{blanks} </choice>',
f'{blanks} </extension>',
f'{blanks} </complexContent>',
f'{blanks}</complexType>',
f''])
return '\n'.join(subs) |
def WriteBlobs(self, blob_id_data_map):
"""Creates or overwrites blobs."""
urns = {self._BlobUrn(blob_id): blob_id for blob_id in blob_id_data_map}
mutation_pool = data_store.DB.GetMutationPool()
existing = aff4.FACTORY.MultiOpen(
urns, aff4_type=aff4.AFF4MemoryStreamBase, mode="r")
for blob_urn, blob_id in iteritems(urns):
if blob_urn in existing:
logging.debug("Blob %s already stored.", blob_id)
continue
with aff4.FACTORY.Create(
blob_urn,
aff4.AFF4UnversionedMemoryStream,
mode="w",
mutation_pool=mutation_pool) as fd:
content = blob_id_data_map[blob_id]
fd.Write(content)
logging.debug("Got blob %s (length %s)", blob_id.AsHexString(),
len(content))
mutation_pool.Flush() | Creates or overwrites blobs. | Below is the the instruction that describes the task:
### Input:
Creates or overwrites blobs.
### Response:
def WriteBlobs(self, blob_id_data_map):
"""Creates or overwrites blobs."""
urns = {self._BlobUrn(blob_id): blob_id for blob_id in blob_id_data_map}
mutation_pool = data_store.DB.GetMutationPool()
existing = aff4.FACTORY.MultiOpen(
urns, aff4_type=aff4.AFF4MemoryStreamBase, mode="r")
for blob_urn, blob_id in iteritems(urns):
if blob_urn in existing:
logging.debug("Blob %s already stored.", blob_id)
continue
with aff4.FACTORY.Create(
blob_urn,
aff4.AFF4UnversionedMemoryStream,
mode="w",
mutation_pool=mutation_pool) as fd:
content = blob_id_data_map[blob_id]
fd.Write(content)
logging.debug("Got blob %s (length %s)", blob_id.AsHexString(),
len(content))
mutation_pool.Flush() |
def source_set(method_name):
"""
Creates a setter that will call the source method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the source.
@type method_name: str
"""
def source_set(value, context, **_params):
method = getattr(context["model"].source, method_name)
return _set(method, context["key"], value, (), {})
return source_set | Creates a setter that will call the source method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the source.
@type method_name: str | Below is the the instruction that describes the task:
### Input:
Creates a setter that will call the source method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the source.
@type method_name: str
### Response:
def source_set(method_name):
"""
Creates a setter that will call the source method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the source.
@type method_name: str
"""
def source_set(value, context, **_params):
method = getattr(context["model"].source, method_name)
return _set(method, context["key"], value, (), {})
return source_set |
def GetMemOverheadMB(self):
'''Retrieves the amount of "overhead" memory associated with this virtual
machine that is currently consumed on the host system. Overhead
memory is additional memory that is reserved for data structures required
by the virtualization layer.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemOverheadMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Retrieves the amount of "overhead" memory associated with this virtual
machine that is currently consumed on the host system. Overhead
memory is additional memory that is reserved for data structures required
by the virtualization layer. | Below is the the instruction that describes the task:
### Input:
Retrieves the amount of "overhead" memory associated with this virtual
machine that is currently consumed on the host system. Overhead
memory is additional memory that is reserved for data structures required
by the virtualization layer.
### Response:
def GetMemOverheadMB(self):
'''Retrieves the amount of "overhead" memory associated with this virtual
machine that is currently consumed on the host system. Overhead
memory is additional memory that is reserved for data structures required
by the virtualization layer.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemOverheadMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value |
def coord_to_tile(self, lat, lon, zoom):
'''convert lat/lon/zoom to a TileInfo'''
world_tiles = 1<<zoom
x = world_tiles / 360.0 * (lon + 180.0)
tiles_pre_radian = world_tiles / (2 * math.pi)
e = math.sin(lat * (1/180.*math.pi))
y = world_tiles/2 + 0.5*math.log((1+e)/(1-e)) * (-tiles_pre_radian)
offsetx = int((x - int(x)) * TILES_WIDTH)
offsety = int((y - int(y)) * TILES_HEIGHT)
return TileInfo((int(x) % world_tiles, int(y) % world_tiles), zoom, self.service, offset=(offsetx, offsety)) | convert lat/lon/zoom to a TileInfo | Below is the the instruction that describes the task:
### Input:
convert lat/lon/zoom to a TileInfo
### Response:
def coord_to_tile(self, lat, lon, zoom):
'''convert lat/lon/zoom to a TileInfo'''
world_tiles = 1<<zoom
x = world_tiles / 360.0 * (lon + 180.0)
tiles_pre_radian = world_tiles / (2 * math.pi)
e = math.sin(lat * (1/180.*math.pi))
y = world_tiles/2 + 0.5*math.log((1+e)/(1-e)) * (-tiles_pre_radian)
offsetx = int((x - int(x)) * TILES_WIDTH)
offsety = int((y - int(y)) * TILES_HEIGHT)
return TileInfo((int(x) % world_tiles, int(y) % world_tiles), zoom, self.service, offset=(offsetx, offsety)) |
def forward(self, x, boxes):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logits, box_regression = x
class_prob = F.softmax(class_logits, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, -4:]
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results | Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores | Below is the the instruction that describes the task:
### Input:
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
### Response:
def forward(self, x, boxes):
"""
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
"""
class_logits, box_regression = x
class_prob = F.softmax(class_logits, -1)
# TODO think about a representation of batch of boxes
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, -4:]
proposals = self.box_coder.decode(
box_regression.view(sum(boxes_per_image), -1), concat_boxes
)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for prob, boxes_per_img, image_shape in zip(
class_prob, proposals, image_shapes
):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results |
def add_downloads(self, filemap):
"""Add the dict of downloads. (Note the Winscp command line accepts wildcards)
Parameters
----------
filemap: dict, (remote_filename -> local_filename)
"""
[self.add_download(k, v) for k, v in filemap.iteritems()] | Add the dict of downloads. (Note the Winscp command line accepts wildcards)
Parameters
----------
filemap: dict, (remote_filename -> local_filename) | Below is the the instruction that describes the task:
### Input:
Add the dict of downloads. (Note the Winscp command line accepts wildcards)
Parameters
----------
filemap: dict, (remote_filename -> local_filename)
### Response:
def add_downloads(self, filemap):
"""Add the dict of downloads. (Note the Winscp command line accepts wildcards)
Parameters
----------
filemap: dict, (remote_filename -> local_filename)
"""
[self.add_download(k, v) for k, v in filemap.iteritems()] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.